diff options
117 files changed, 8801 insertions, 3560 deletions
diff --git a/Documentation/s390/crypto/crypto-API.txt b/Documentation/s390/crypto/crypto-API.txt deleted file mode 100644 index 71ae6ca9f2c..00000000000 --- a/Documentation/s390/crypto/crypto-API.txt +++ /dev/null @@ -1,83 +0,0 @@ -crypto-API support for z990 Message Security Assist (MSA) instructions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -AUTHOR: Thomas Spatzier (tspat@de.ibm.com) - - -1. Introduction crypto-API -~~~~~~~~~~~~~~~~~~~~~~~~~~ -See Documentation/crypto/api-intro.txt for an introduction/description of the -kernel crypto API. -According to api-intro.txt support for z990 crypto instructions has been added -in the algorithm api layer of the crypto API. Several files containing z990 -optimized implementations of crypto algorithms are placed in the -arch/s390/crypto directory. - - -2. Probing for availability of MSA -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -It should be possible to use Kernels with the z990 crypto implementations both -on machines with MSA available and on those without MSA (pre z990 or z990 -without MSA). Therefore a simple probing mechanism has been implemented: -In the init function of each crypto module the availability of MSA and of the -respective crypto algorithm in particular will be tested. If the algorithm is -available the module will load and register its algorithm with the crypto API. - -If the respective crypto algorithm is not available, the init function will -return -ENOSYS. In that case a fallback to the standard software implementation -of the crypto algorithm must be taken ( -> the standard crypto modules are -also built when compiling the kernel). - - -3. Ensuring z990 crypto module preference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If z990 crypto instructions are available the optimized modules should be -preferred instead of standard modules. - -3.1. compiled-in modules -~~~~~~~~~~~~~~~~~~~~~~~~ -For compiled-in modules it has to be ensured that the z990 modules are linked -before the standard crypto modules. Then, on system startup the init functions -of z990 crypto modules will be called first and query for availability of z990 -crypto instructions. If instruction is available, the z990 module will register -its crypto algorithm implementation -> the load of the standard module will fail -since the algorithm is already registered. -If z990 crypto instruction is not available the load of the z990 module will -fail -> the standard module will load and register its algorithm. - -3.2. dynamic modules -~~~~~~~~~~~~~~~~~~~~ -A system administrator has to take care of giving preference to z990 crypto -modules. If MSA is available appropriate lines have to be added to -/etc/modprobe.conf. - -Example: z990 crypto instruction for SHA1 algorithm is available - - add the following line to /etc/modprobe.conf (assuming the - z990 crypto modules for SHA1 is called sha1_z990): - - alias sha1 sha1_z990 - - -> when the sha1 algorithm is requested through the crypto API - (which has a module autoloader) the z990 module will be loaded. - -TBD: a userspace module probing mechanism - something like 'probe sha1 sha1_z990 sha1' in modprobe.conf - -> try module sha1_z990, if it fails to load standard module sha1 - the 'probe' statement is currently not supported in modprobe.conf - - -4. Currently implemented z990 crypto algorithms -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The following crypto algorithms with z990 MSA support are currently implemented. -The name of each algorithm under which it is registered in crypto API and the -name of the respective module is given in square brackets. - -- SHA1 Digest Algorithm [sha1 -> sha1_z990] -- DES Encrypt/Decrypt Algorithm (64bit key) [des -> des_z990] -- Triple DES Encrypt/Decrypt Algorithm (128bit key) [des3_ede128 -> des_z990] -- Triple DES Encrypt/Decrypt Algorithm (192bit key) [des3_ede -> des_z990] - -In order to load, for example, the sha1_z990 module when the sha1 algorithm is -requested (see 3.2.) add 'alias sha1 sha1_z990' to /etc/modprobe.conf. - diff --git a/Documentation/s390/zfcpdump.txt b/Documentation/s390/zfcpdump.txt new file mode 100644 index 00000000000..cf45d27c460 --- /dev/null +++ b/Documentation/s390/zfcpdump.txt @@ -0,0 +1,87 @@ +s390 SCSI dump tool (zfcpdump) + +System z machines (z900 or higher) provide hardware support for creating system +dumps on SCSI disks. The dump process is initiated by booting a dump tool, which +has to create a dump of the current (probably crashed) Linux image. In order to +not overwrite memory of the crashed Linux with data of the dump tool, the +hardware saves some memory plus the register sets of the boot cpu before the +dump tool is loaded. There exists an SCLP hardware interface to obtain the saved +memory afterwards. Currently 32 MB are saved. + +This zfcpdump implementation consists of a Linux dump kernel together with +a userspace dump tool, which are loaded together into the saved memory region +below 32 MB. zfcpdump is installed on a SCSI disk using zipl (as contained in +the s390-tools package) to make the device bootable. The operator of a Linux +system can then trigger a SCSI dump by booting the SCSI disk, where zfcpdump +resides on. + +The kernel part of zfcpdump is implemented as a debugfs file under "zcore/mem", +which exports memory and registers of the crashed Linux in an s390 +standalone dump format. It can be used in the same way as e.g. /dev/mem. The +dump format defines a 4K header followed by plain uncompressed memory. The +register sets are stored in the prefix pages of the respective cpus. To build a +dump enabled kernel with the zcore driver, the kernel config option +CONFIG_ZFCPDUMP has to be set. When reading from "zcore/mem", the part of +memory, which has been saved by hardware is read by the driver via the SCLP +hardware interface. The second part is just copied from the non overwritten real +memory. + +The userspace application of zfcpdump can reside e.g. in an intitramfs or an +initrd. It reads from zcore/mem and writes the system dump to a file on a +SCSI disk. + +To build a zfcpdump kernel use the following settings in your kernel +configuration: + * CONFIG_ZFCPDUMP=y + * Enable ZFCP driver + * Enable SCSI driver + * Enable ext2 and ext3 filesystems + * Disable as many features as possible to keep the kernel small. + E.g. network support is not needed at all. + +To use the zfcpdump userspace application in an initramfs you have to do the +following: + + * Copy the zfcpdump executable somewhere into your Linux tree. + E.g. to "arch/s390/boot/zfcpdump. If you do not want to include + shared libraries, compile the tool with the "-static" gcc option. + * If you want to include e2fsck, add it to your source tree, too. The zfcpdump + application attempts to start /sbin/e2fsck from the ramdisk. + * Use an initramfs config file like the following: + + dir /dev 755 0 0 + nod /dev/console 644 0 0 c 5 1 + nod /dev/null 644 0 0 c 1 3 + nod /dev/sda1 644 0 0 b 8 1 + nod /dev/sda2 644 0 0 b 8 2 + nod /dev/sda3 644 0 0 b 8 3 + nod /dev/sda4 644 0 0 b 8 4 + nod /dev/sda5 644 0 0 b 8 5 + nod /dev/sda6 644 0 0 b 8 6 + nod /dev/sda7 644 0 0 b 8 7 + nod /dev/sda8 644 0 0 b 8 8 + nod /dev/sda9 644 0 0 b 8 9 + nod /dev/sda10 644 0 0 b 8 10 + nod /dev/sda11 644 0 0 b 8 11 + nod /dev/sda12 644 0 0 b 8 12 + nod /dev/sda13 644 0 0 b 8 13 + nod /dev/sda14 644 0 0 b 8 14 + nod /dev/sda15 644 0 0 b 8 15 + file /init arch/s390/boot/zfcpdump 755 0 0 + file /sbin/e2fsck arch/s390/boot/e2fsck 755 0 0 + dir /proc 755 0 0 + dir /sys 755 0 0 + dir /mnt 755 0 0 + dir /sbin 755 0 0 + + * Issue "make image" to build the zfcpdump image with initramfs. + +In a Linux distribution the zfcpdump enabled kernel image must be copied to +/usr/share/zfcpdump/zfcpdump.image, where the s390 zipl tool is looking for the +dump kernel when preparing a SCSI dump disk. + +If you use a ramdisk copy it to "/usr/share/zfcpdump/zfcpdump.rd". + +For more information on how to use zfcpdump refer to the s390 'Using the Dump +Tools book', which is available from +http://www.ibm.com/developerworks/linux/linux390. diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index ce4013aee59..3ec76586877 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -57,9 +57,6 @@ config ARCH_HAS_ILOG2_U64 bool default n -config GENERIC_BUST_SPINLOCK - bool - config GENERIC_HWEIGHT bool default y @@ -68,6 +65,11 @@ config GENERIC_CALIBRATE_DELAY bool default y +config GENERIC_BUG + bool + default y + depends on BUG + source "init/Kconfig" menu "System Type and features" @@ -106,6 +108,9 @@ choice config BOARD_ATSTK1000 bool "ATSTK1000 evaluation board" select BOARD_ATSTK1002 if CPU_AT32AP7000 + +config BOARD_ATNGW100 + bool "ATNGW100 Network Gateway" endchoice choice @@ -116,6 +121,8 @@ config LOADER_U_BOOT bool "U-Boot (or similar) bootloader" endchoice +source "arch/avr32/mach-at32ap/Kconfig" + config LOAD_ADDRESS hex default 0x10000000 if LOADER_U_BOOT=y && CPU_AT32AP7000=y diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile index 7b842e98efe..6115fc1f0cf 100644 --- a/arch/avr32/Makefile +++ b/arch/avr32/Makefile @@ -27,6 +27,7 @@ head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o head-y += arch/avr32/kernel/head.o core-$(CONFIG_PLATFORM_AT32AP) += arch/avr32/mach-at32ap/ core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/ +core-$(CONFIG_BOARD_ATNGW100) += arch/avr32/boards/atngw100/ core-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/ core-y += arch/avr32/kernel/ core-y += arch/avr32/mm/ diff --git a/arch/avr32/boards/atngw100/Makefile b/arch/avr32/boards/atngw100/Makefile new file mode 100644 index 00000000000..c740aa11675 --- /dev/null +++ b/arch/avr32/boards/atngw100/Makefile @@ -0,0 +1 @@ +obj-y += setup.o flash.o diff --git a/arch/avr32/boards/atngw100/flash.c b/arch/avr32/boards/atngw100/flash.c new file mode 100644 index 00000000000..f9b32a8eab9 --- /dev/null +++ b/arch/avr32/boards/atngw100/flash.c @@ -0,0 +1,95 @@ +/* + * ATNGW100 board-specific flash initialization + * + * Copyright (C) 2005-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/physmap.h> + +#include <asm/arch/smc.h> + +static struct smc_config flash_config __initdata = { + .ncs_read_setup = 0, + .nrd_setup = 40, + .ncs_write_setup = 0, + .nwe_setup = 10, + + .ncs_read_pulse = 80, + .nrd_pulse = 40, + .ncs_write_pulse = 65, + .nwe_pulse = 55, + + .read_cycle = 120, + .write_cycle = 120, + + .bus_width = 2, + .nrd_controlled = 1, + .nwe_controlled = 1, + .byte_write = 1, +}; + +static struct mtd_partition flash_parts[] = { + { + .name = "u-boot", + .offset = 0x00000000, + .size = 0x00020000, /* 128 KiB */ + .mask_flags = MTD_WRITEABLE, + }, + { + .name = "root", + .offset = 0x00020000, + .size = 0x007d0000, + }, + { + .name = "env", + .offset = 0x007f0000, + .size = 0x00010000, + .mask_flags = MTD_WRITEABLE, + }, +}; + +static struct physmap_flash_data flash_data = { + .width = 2, + .nr_parts = ARRAY_SIZE(flash_parts), + .parts = flash_parts, +}; + +static struct resource flash_resource = { + .start = 0x00000000, + .end = 0x007fffff, + .flags = IORESOURCE_MEM, +}; + +static struct platform_device flash_device = { + .name = "physmap-flash", + .id = 0, + .resource = &flash_resource, + .num_resources = 1, + .dev = { + .platform_data = &flash_data, + }, +}; + +/* This needs to be called after the SMC has been initialized */ +static int __init atngw100_flash_init(void) +{ + int ret; + + ret = smc_set_configuration(0, &flash_config); + if (ret < 0) { + printk(KERN_ERR "atngw100: failed to set NOR flash timing\n"); + return ret; + } + + platform_device_register(&flash_device); + + return 0; +} +device_initcall(atngw100_flash_init); diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c new file mode 100644 index 00000000000..9bc37d4f668 --- /dev/null +++ b/arch/avr32/boards/atngw100/setup.c @@ -0,0 +1,124 @@ +/* + * Board-specific setup code for the ATNGW100 Network Gateway + * + * Copyright (C) 2005-2006 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/clk.h> +#include <linux/etherdevice.h> +#include <linux/init.h> +#include <linux/linkage.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <linux/spi/spi.h> + +#include <asm/io.h> +#include <asm/setup.h> + +#include <asm/arch/at32ap7000.h> +#include <asm/arch/board.h> +#include <asm/arch/init.h> + +/* Initialized by bootloader-specific startup code. */ +struct tag *bootloader_tags __initdata; + +struct eth_addr { + u8 addr[6]; +}; +static struct eth_addr __initdata hw_addr[2]; +static struct eth_platform_data __initdata eth_data[2]; + +static struct spi_board_info spi0_board_info[] __initdata = { + { + .modalias = "mtd_dataflash", + .max_speed_hz = 10000000, + .chip_select = 0, + }, +}; + +/* + * The next two functions should go away as the boot loader is + * supposed to initialize the macb address registers with a valid + * ethernet address. But we need to keep it around for a while until + * we can be reasonably sure the boot loader does this. + * + * The phy_id is ignored as the driver will probe for it. + */ +static int __init parse_tag_ethernet(struct tag *tag) +{ + int i; + + i = tag->u.ethernet.mac_index; + if (i < ARRAY_SIZE(hw_addr)) + memcpy(hw_addr[i].addr, tag->u.ethernet.hw_address, + sizeof(hw_addr[i].addr)); + + return 0; +} +__tagtable(ATAG_ETHERNET, parse_tag_ethernet); + +static void __init set_hw_addr(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + const u8 *addr; + void __iomem *regs; + struct clk *pclk; + + if (!res) + return; + if (pdev->id >= ARRAY_SIZE(hw_addr)) + return; + + addr = hw_addr[pdev->id].addr; + if (!is_valid_ether_addr(addr)) + return; + + /* + * Since this is board-specific code, we'll cheat and use the + * physical address directly as we happen to know that it's + * the same as the virtual address. + */ + regs = (void __iomem __force *)res->start; + pclk = clk_get(&pdev->dev, "pclk"); + if (!pclk) + return; + + clk_enable(pclk); + __raw_writel((addr[3] << 24) | (addr[2] << 16) + | (addr[1] << 8) | addr[0], regs + 0x98); + __raw_writel((addr[5] << 8) | addr[4], regs + 0x9c); + clk_disable(pclk); + clk_put(pclk); +} + +struct platform_device *at32_usart_map[1]; +unsigned int at32_nr_usarts = 1; + +void __init setup_board(void) +{ + at32_map_usart(1, 0); /* USART 1: /dev/ttyS0, DB9 */ + at32_setup_serial_console(0); +} + +static int __init atngw100_init(void) +{ + /* + * ATNGW100 uses 16-bit SDRAM interface, so we don't need to + * reserve any pins for it. + */ + + at32_add_system_devices(); + + at32_add_device_usart(0); + + set_hw_addr(at32_add_device_eth(0, ð_data[0])); + set_hw_addr(at32_add_device_eth(1, ð_data[1])); + + at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info)); + + return 0; +} +postcore_initcall(atngw100_init); diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c index 5974768a59e..abe6ca203fa 100644 --- a/arch/avr32/boards/atstk1000/atstk1002.c +++ b/arch/avr32/boards/atstk1000/atstk1002.c @@ -33,7 +33,7 @@ struct eth_addr { static struct eth_addr __initdata hw_addr[2]; static struct eth_platform_data __initdata eth_data[2]; -extern struct lcdc_platform_data atstk1000_fb0_data; +static struct lcdc_platform_data atstk1000_fb0_data; static struct spi_board_info spi0_board_info[] __initdata = { { @@ -148,6 +148,8 @@ static int __init atstk1002_init(void) set_hw_addr(at32_add_device_eth(0, ð_data[0])); at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info)); + atstk1000_fb0_data.fbmem_start = fbmem_start; + atstk1000_fb0_data.fbmem_size = fbmem_size; at32_add_device_lcdc(0, &atstk1000_fb0_data); return 0; diff --git a/arch/avr32/boards/atstk1000/setup.c b/arch/avr32/boards/atstk1000/setup.c index 272c011802a..2bc4b88d7ed 100644 --- a/arch/avr32/boards/atstk1000/setup.c +++ b/arch/avr32/boards/atstk1000/setup.c @@ -18,33 +18,3 @@ /* Initialized by bootloader-specific startup code. */ struct tag *bootloader_tags __initdata; - -struct lcdc_platform_data __initdata atstk1000_fb0_data; - -void __init board_setup_fbmem(unsigned long fbmem_start, - unsigned long fbmem_size) -{ - if (!fbmem_size) - return; - - if (!fbmem_start) { - void *fbmem; - - fbmem = alloc_bootmem_low_pages(fbmem_size); - fbmem_start = __pa(fbmem); - } else { - pg_data_t *pgdat; - - for_each_online_pgdat(pgdat) { - if (fbmem_start >= pgdat->bdata->node_boot_start - && fbmem_start <= pgdat->bdata->node_low_pfn) - reserve_bootmem_node(pgdat, fbmem_start, - fbmem_size); - } - } - - printk("%luKiB framebuffer memory at address 0x%08lx\n", - fbmem_size >> 10, fbmem_start); - atstk1000_fb0_data.fbmem_start = fbmem_start; - atstk1000_fb0_data.fbmem_size = fbmem_size; -} diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig new file mode 100644 index 00000000000..c254ffcfa45 --- /dev/null +++ b/arch/avr32/configs/atngw100_defconfig @@ -0,0 +1,1085 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.21-rc6 +# Thu Apr 12 16:35:07 2007 +# +CONFIG_AVR32=y +CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_TIME=y +# CONFIG_ARCH_HAS_ILOG2_U32 is not set +# CONFIG_ARCH_HAS_ILOG2_U64 is not set +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_BUG=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 + +# +# General setup +# +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_IPC_NS is not set +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +# CONFIG_TASKSTATS is not set +# CONFIG_UTS_NS is not set +# CONFIG_AUDIT is not set +# CONFIG_IKCONFIG is not set +CONFIG_SYSFS_DEPRECATED=y +# CONFIG_RELAY is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_EMBEDDED=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +# CONFIG_BASE_FULL is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SHMEM=y +CONFIG_SLAB=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_RT_MUTEXES=y +# CONFIG_TINY_SHMEM is not set +CONFIG_BASE_SMALL=1 +# CONFIG_SLOB is not set + +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_KMOD=y + +# +# Block layer +# +CONFIG_BLOCK=y +# CONFIG_LBD is not set +# CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_LSF is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_AS is not set +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" + +# +# System Type and features +# +CONFIG_SUBARCH_AVR32B=y +CONFIG_MMU=y +CONFIG_PERFORMANCE_COUNTERS=y +CONFIG_PLATFORM_AT32AP=y +CONFIG_CPU_AT32AP7000=y +# CONFIG_BOARD_ATSTK1000 is not set +CONFIG_BOARD_ATNGW100=y +CONFIG_LOADER_U_BOOT=y + +# +# Atmel AVR32 AP options +# +# CONFIG_AP7000_32_BIT_SMC is not set +CONFIG_AP7000_16_BIT_SMC=y +# CONFIG_AP7000_8_BIT_SMC is not set +CONFIG_LOAD_ADDRESS=0x10000000 +CONFIG_ENTRY_ADDRESS=0x90000000 +CONFIG_PHYS_OFFSET=0x10000000 +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +# CONFIG_HAVE_ARCH_BOOTMEM_NODE is not set +# CONFIG_ARCH_HAVE_MEMORY_PRESENT is not set +# CONFIG_NEED_NODE_MEMMAP_SIZE is not set +CONFIG_ARCH_FLATMEM_ENABLE=y +# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +# CONFIG_ARCH_SPARSEMEM_ENABLE is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_SPARSEMEM_STATIC is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_RESOURCES_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +# CONFIG_OWNERSHIP_TRACE is not set +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_CMDLINE="" + +# +# Bus options +# + +# +# PCCARD (PCMCIA/CardBus) support +# +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set + +# +# Networking +# +CONFIG_NET=y + +# +# Networking options +# +# CONFIG_NETDEBUG is not set +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_SUB_POLICY is not set +# CONFIG_XFRM_MIGRATE is not set +CONFIG_NET_KEY=y +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_ASK_IP_FIB_HASH=y +# CONFIG_IP_FIB_TRIE is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_MULTIPLE_TABLES is not set +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +# CONFIG_IP_PIMSM_V2 is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_XFRM_TUNNEL=y +CONFIG_INET_TUNNEL=y +CONFIG_INET_XFRM_MODE_TRANSPORT=y +CONFIG_INET_XFRM_MODE_TUNNEL=y +CONFIG_INET_XFRM_MODE_BEET=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set + +# +# IP: Virtual Server Configuration +# +# CONFIG_IP_VS is not set +CONFIG_IPV6=y +# CONFIG_IPV6_PRIVACY is not set +# CONFIG_IPV6_ROUTER_PREF is not set +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +# CONFIG_IPV6_MIP6 is not set +CONFIG_INET6_XFRM_TUNNEL=y +CONFIG_INET6_TUNNEL=y +CONFIG_INET6_XFRM_MODE_TRANSPORT=y +CONFIG_INET6_XFRM_MODE_TUNNEL=y +CONFIG_INET6_XFRM_MODE_BEET=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y +# CONFIG_IPV6_TUNNEL is not set +# CONFIG_IPV6_MULTIPLE_TABLES is not set +# CONFIG_NETWORK_SECMARK is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set + +# +# Core Netfilter Configuration +# +# CONFIG_NETFILTER_NETLINK is not set +CONFIG_NF_CONNTRACK_ENABLED=m +CONFIG_NF_CONNTRACK_SUPPORT=y +# CONFIG_IP_NF_CONNTRACK_SUPPORT is not set +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CT_ACCT=y +CONFIG_NF_CONNTRACK_MARK=y +# CONFIG_NF_CONNTRACK_EVENTS is not set +CONFIG_NF_CT_PROTO_GRE=m +# CONFIG_NF_CT_PROTO_SCTP is not set +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_IP_NF_QUEUE is not set +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_IPRANGE=m +CONFIG_IP_NF_MATCH_TOS=m +CONFIG_IP_NF_MATCH_RECENT=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_MATCH_OWNER=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m +# CONFIG_IP_NF_TARGET_ULOG is not set +CONFIG_NF_NAT=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_SAME=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_NF_NAT_SIP=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_TOS=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration (EXPERIMENTAL) +# +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_QUEUE=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_OWNER=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_LOG=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_RAW=m + +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set + +# +# TIPC Configuration (EXPERIMENTAL) +# +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +CONFIG_VLAN_8021Q=m +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set +CONFIG_NET_CLS_ROUTE=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_IEEE80211 is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FW_LOADER is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set + +# +# Connector - unified userspace <-> kernelspace linker +# +# CONFIG_CONNECTOR is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_CFI_INTELEXT is not set +CONFIG_MTD_CFI_AMDSTD=y +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# CONFIG_MTD_OBSOLETE_CHIPS is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_START=0x80000000 +CONFIG_MTD_PHYSMAP_LEN=0x0 +CONFIG_MTD_PHYSMAP_BANKWIDTH=2 +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +CONFIG_MTD_DATAFLASH=y +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +# CONFIG_MTD_NAND is not set + +# +# OneNAND Flash Device Drivers +# +# CONFIG_MTD_ONENAND is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# +# CONFIG_PNPACPI is not set + +# +# Block devices +# +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=m +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set + +# +# Misc devices +# + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_NETLINK is not set + +# +# Serial ATA (prod) and Parallel ATA (experimental) drivers +# +# CONFIG_ATA is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# + +# +# I2O device support +# + +# +# Network device support +# +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +CONFIG_TUN=m + +# +# PHY device support +# +# CONFIG_PHYLIB is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +CONFIG_MACB=y + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +CONFIG_PPP=m +# CONFIG_PPP_MULTILINK is not set +CONFIG_PPP_FILTER=y +CONFIG_PPP_ASYNC=m +# CONFIG_PPP_SYNC_TTY is not set +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_MPPE=m +CONFIG_PPPOE=m +# CONFIG_SLIP is not set +CONFIG_SLHC=m +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +# CONFIG_INPUT is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_ATMEL=y +CONFIG_SERIAL_ATMEL_CONSOLE=y +# CONFIG_SERIAL_ATMEL_TTYAT is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set + +# +# TPM devices +# +# CONFIG_TCG_TPM is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# SPI support +# +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +CONFIG_SPI_ATMEL=y +# CONFIG_SPI_BITBANG is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_AT25 is not set + +# +# Dallas's 1-wire bus +# +# CONFIG_W1 is not set + +# +# Hardware Monitoring support +# +# CONFIG_HWMON is not set +# CONFIG_HWMON_VID is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_SM501 is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set +# CONFIG_FB is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB_ARCH_HAS_HCD is not set +# CONFIG_USB_ARCH_HAS_OHCI is not set +# CONFIG_USB_ARCH_HAS_EHCI is not set + +# +# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# MMC/SD Card support +# +# CONFIG_MMC is not set + +# +# LED devices +# +# CONFIG_NEW_LEDS is not set + +# +# LED drivers +# + +# +# LED Triggers +# + +# +# InfiniBand support +# + +# +# EDAC - error detection and reporting (RAS) (EXPERIMENTAL) +# + +# +# Real Time Clock +# +# CONFIG_RTC_CLASS is not set + +# +# DMA Engine support +# +# CONFIG_DMA_ENGINE is not set + +# +# DMA Clients +# + +# +# DMA Devices +# + +# +# Auxiliary Display support +# + +# +# Virtualization +# + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_FS_XATTR is not set +# CONFIG_EXT4DEV_FS is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_INOTIFY is not set +# CONFIG_QUOTA is not set +# CONFIG_DNOTIFY is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +CONFIG_FUSE_FS=m + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=850 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +# CONFIG_PROC_KCORE is not set +CONFIG_PROC_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_TMPFS_POSIX_ACL is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y +CONFIG_CONFIGFS_FS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_SUMMARY is not set +# CONFIG_JFFS2_FS_XATTR is not set +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +CONFIG_SMB_FS=m +# CONFIG_SMB_NLS_DEFAULT is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS is not set +# CONFIG_CIFS_WEAK_PW_HASH is not set +# CONFIG_CIFS_XATTR is not set +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_EXPERIMENTAL is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +CONFIG_NLS_CODEPAGE_850=y +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Distributed Lock Manager +# +# CONFIG_DLM is not set + +# +# Kernel hacking +# +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_MAGIC_SYSRQ=y +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_DEBUG_FS is not set +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SHIRQ is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_LIST is not set +CONFIG_FRAME_POINTER=y +# CONFIG_FORCED_INLINING is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_KPROBES is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +CONFIG_CRYPTO=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_SHA1=y +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_WP512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_GF128MUL is not set +CONFIG_CRYPTO_ECB=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_PCBC=m +# CONFIG_CRYPTO_LRW is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_TWOFISH is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +# CONFIG_CRYPTO_TEA is not set +CONFIG_CRYPTO_ARC4=m +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_ANUBIS is not set +CONFIG_CRYPTO_DEFLATE=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Hardware crypto devices +# + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_CRC_CCITT=m +# CONFIG_CRC16 is not set +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_PLIST=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y diff --git a/arch/avr32/kernel/cpu.c b/arch/avr32/kernel/cpu.c index 2e72fd2699d..2714cf6452b 100644 --- a/arch/avr32/kernel/cpu.c +++ b/arch/avr32/kernel/cpu.c @@ -209,16 +209,17 @@ static const char *mmu_types[] = { void __init setup_processor(void) { unsigned long config0, config1; + unsigned long features; unsigned cpu_id, cpu_rev, arch_id, arch_rev, mmu_type; unsigned tmp; - config0 = sysreg_read(CONFIG0); /* 0x0000013e; */ - config1 = sysreg_read(CONFIG1); /* 0x01f689a2; */ - cpu_id = config0 >> 24; - cpu_rev = (config0 >> 16) & 0xff; - arch_id = (config0 >> 13) & 0x07; - arch_rev = (config0 >> 10) & 0x07; - mmu_type = (config0 >> 7) & 0x03; + config0 = sysreg_read(CONFIG0); + config1 = sysreg_read(CONFIG1); + cpu_id = SYSREG_BFEXT(PROCESSORID, config0); + cpu_rev = SYSREG_BFEXT(PROCESSORREVISION, config0); + arch_id = SYSREG_BFEXT(AT, config0); + arch_rev = SYSREG_BFEXT(AR, config0); + mmu_type = SYSREG_BFEXT(MMUT, config0); boot_cpu_data.arch_type = arch_id; boot_cpu_data.cpu_type = cpu_id; @@ -226,16 +227,16 @@ void __init setup_processor(void) boot_cpu_data.cpu_revision = cpu_rev; boot_cpu_data.tlb_config = mmu_type; - tmp = (config1 >> 13) & 0x07; + tmp = SYSREG_BFEXT(ILSZ, config1); if (tmp) { - boot_cpu_data.icache.ways = 1 << ((config1 >> 10) & 0x07); - boot_cpu_data.icache.sets = 1 << ((config1 >> 16) & 0x0f); + boot_cpu_data.icache.ways = 1 << SYSREG_BFEXT(IASS, config1); + boot_cpu_data.icache.sets = 1 << SYSREG_BFEXT(ISET, config1); boot_cpu_data.icache.linesz = 1 << (tmp + 1); } - tmp = (config1 >> 3) & 0x07; + tmp = SYSREG_BFEXT(DLSZ, config1); if (tmp) { - boot_cpu_data.dcache.ways = 1 << (config1 & 0x07); - boot_cpu_data.dcache.sets = 1 << ((config1 >> 6) & 0x0f); + boot_cpu_data.dcache.ways = 1 << SYSREG_BFEXT(DASS, config1); + boot_cpu_data.dcache.sets = 1 << SYSREG_BFEXT(DSET, config1); boot_cpu_data.dcache.linesz = 1 << (tmp + 1); } @@ -250,16 +251,39 @@ void __init setup_processor(void) cpu_names[cpu_id], cpu_id, cpu_rev, arch_names[arch_id], arch_rev); printk ("CPU: MMU configuration: %s\n", mmu_types[mmu_type]); + printk ("CPU: features:"); - if (config0 & (1 << 6)) - printk(" fpu"); - if (config0 & (1 << 5)) - printk(" java"); - if (config0 & (1 << 4)) - printk(" perfctr"); - if (config0 & (1 << 3)) + features = 0; + if (config0 & SYSREG_BIT(CONFIG0_R)) { + features |= AVR32_FEATURE_RMW; + printk(" rmw"); + } + if (config0 & SYSREG_BIT(CONFIG0_D)) { + features |= AVR32_FEATURE_DSP; + printk(" dsp"); + } + if (config0 & SYSREG_BIT(CONFIG0_S)) { + features |= AVR32_FEATURE_SIMD; + printk(" simd"); + } + if (config0 & SYSREG_BIT(CONFIG0_O)) { + features |= AVR32_FEATURE_OCD; printk(" ocd"); + } + if (config0 & SYSREG_BIT(CONFIG0_P)) { + features |= AVR32_FEATURE_PCTR; + printk(" perfctr"); + } + if (config0 & SYSREG_BIT(CONFIG0_J)) { + features |= AVR32_FEATURE_JAVA; + printk(" java"); + } + if (config0 & SYSREG_BIT(CONFIG0_F)) { + features |= AVR32_FEATURE_FPU; + printk(" fpu"); + } printk("\n"); + boot_cpu_data.features = features; } #ifdef CONFIG_PROC_FS diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S index eeb66792bc3..42657f1703b 100644 --- a/arch/avr32/kernel/entry-avr32b.S +++ b/arch/avr32/kernel/entry-avr32b.S @@ -100,55 +100,49 @@ dtlb_miss_write: .global tlb_miss_common tlb_miss_common: - mfsr r0, SYSREG_PTBR - mfsr r1, SYSREG_TLBEAR + mfsr r0, SYSREG_TLBEAR + mfsr r1, SYSREG_PTBR /* Is it the vmalloc space? */ - bld r1, 31 + bld r0, 31 brcs handle_vmalloc_miss /* First level lookup */ pgtbl_lookup: - lsr r2, r1, PGDIR_SHIFT - ld.w r0, r0[r2 << 2] - bld r0, _PAGE_BIT_PRESENT + lsr r2, r0, PGDIR_SHIFT + ld.w r3, r1[r2 << 2] + bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT + bld r3, _PAGE_BIT_PRESENT brcc page_table_not_present - /* TODO: Check access rights on page table if necessary */ - /* Translate to virtual address in P1. */ - andl r0, 0xf000 - sbr r0, 31 + andl r3, 0xf000 + sbr r3, 31 /* Second level lookup */ - lsl r1, (32 - PGDIR_SHIFT) - lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT - add r2, r0, r1 << 2 - ld.w r1, r2[0] - bld r1, _PAGE_BIT_PRESENT + ld.w r2, r3[r1 << 2] + mfsr r0, SYSREG_TLBARLO + bld r2, _PAGE_BIT_PRESENT brcc page_not_present /* Mark the page as accessed */ - sbr r1, _PAGE_BIT_ACCESSED - st.w r2[0], r1 + sbr r2, _PAGE_BIT_ACCESSED + st.w r3[r1 << 2], r2 /* Drop software flags */ - andl r1, _PAGE_FLAGS_HARDWARE_MASK & 0xffff - mtsr SYSREG_TLBELO, r1 + andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff + mtsr SYSREG_TLBELO, r2 /* Figure out which entry we want to replace */ - mfsr r0, SYSREG_TLBARLO + mfsr r1, SYSREG_MMUCR clz r2, r0 brcc 1f - mov r1, -1 /* All entries have been accessed, */ - mtsr SYSREG_TLBARLO, r1 /* so reset TLBAR */ - mov r2, 0 /* and start at 0 */ -1: mfsr r1, SYSREG_MMUCR - lsl r2, 14 - andl r1, 0x3fff, COH - or r1, r2 - mtsr SYSREG_MMUCR, r1 + mov r3, -1 /* All entries have been accessed, */ + mov r2, 0 /* so start at 0 */ + mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */ +1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE + mtsr SYSREG_MMUCR, r1 tlbw tlbmiss_restore @@ -156,8 +150,8 @@ pgtbl_lookup: handle_vmalloc_miss: /* Simply do the lookup in init's page table */ - mov r0, lo(swapper_pg_dir) - orh r0, hi(swapper_pg_dir) + mov r1, lo(swapper_pg_dir) + orh r1, hi(swapper_pg_dir) rjmp pgtbl_lookup @@ -340,12 +334,34 @@ do_bus_error_read: do_nmi_ll: sub sp, 4 stmts --sp, r0-lr - /* FIXME: Make sure RAR_NMI and RSR_NMI are pushed instead of *_EX */ - rcall save_full_context_ex + mfsr r9, SYSREG_RSR_NMI + mfsr r8, SYSREG_RAR_NMI + bfextu r0, r9, MODE_SHIFT, 3 + brne 2f + +1: pushm r8, r9 /* PC and SR */ mfsr r12, SYSREG_ECR mov r11, sp rcall do_nmi - rjmp bad_return + popm r8-r9 + mtsr SYSREG_RAR_NMI, r8 + tst r0, r0 + mtsr SYSREG_RSR_NMI, r9 + brne 3f + + ldmts sp++, r0-lr + sub sp, -4 /* skip r12_orig */ + rete + +2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR) + stdsp sp[4], r10 /* replace saved SP */ + rjmp 1b + +3: popm lr + sub sp, -4 /* skip sp */ + popm r0-r12 + sub sp, -4 /* skip r12_orig */ + rete handle_address_fault: sub sp, 4 @@ -630,9 +646,12 @@ irq_level\level: rcall do_IRQ lddsp r4, sp[REG_SR] - andh r4, (MODE_MASK >> 16), COH + bfextu r4, r4, SYSREG_M0_OFFSET, 3 + cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET + breq 2f + cp.w r4, MODE_USER >> SYSREG_M0_OFFSET #ifdef CONFIG_PREEMPT - brne 2f + brne 3f #else brne 1f #endif @@ -649,9 +668,18 @@ irq_level\level: sub sp, -4 /* ignore r12_orig */ rete +2: get_thread_info r0 + ld.w r1, r0[TI_flags] + bld r1, TIF_CPU_GOING_TO_SLEEP #ifdef CONFIG_PREEMPT -2: - get_thread_info r0 + brcc 3f +#else + brcc 1b +#endif + sub r1, pc, . - cpu_idle_skip_sleep + stdsp sp[REG_PC], r1 +#ifdef CONFIG_PREEMPT +3: get_thread_info r0 ld.w r2, r0[TI_preempt_count] cp.w r2, 0 brne 1b @@ -662,12 +690,32 @@ irq_level\level: bld r4, SYSREG_GM_OFFSET brcs 1b rcall preempt_schedule_irq - rjmp 1b #endif + rjmp 1b .endm .section .irq.text,"ax",@progbits +.global cpu_idle_sleep +cpu_idle_sleep: + mask_interrupts + get_thread_info r8 + ld.w r9, r8[TI_flags] + bld r9, TIF_NEED_RESCHED + brcs cpu_idle_enable_int_and_exit + sbr r9, TIF_CPU_GOING_TO_SLEEP + st.w r8[TI_flags], r9 + unmask_interrupts + sleep 0 +cpu_idle_skip_sleep: + mask_interrupts + ld.w r9, r8[TI_flags] + cbr r9, TIF_CPU_GOING_TO_SLEEP + st.w r8[TI_flags], r9 +cpu_idle_enable_int_and_exit: + unmask_interrupts + retal r12 + .global irq_level0 .global irq_level1 .global irq_level2 diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c index b599eae6457..1167fe9cf6c 100644 --- a/arch/avr32/kernel/module.c +++ b/arch/avr32/kernel/module.c @@ -12,10 +12,11 @@ * published by the Free Software Foundation. */ -#include <linux/moduleloader.h> -#include <linux/module.h> -#include <linux/kernel.h> +#include <linux/bug.h> #include <linux/elf.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleloader.h> #include <linux/vmalloc.h> void *module_alloc(unsigned long size) @@ -315,10 +316,10 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, vfree(module->arch.syminfo); module->arch.syminfo = NULL; - return 0; + return module_bug_finalize(hdr, sechdrs, module); } void module_arch_cleanup(struct module *module) { - + module_bug_cleanup(module); } diff --git a/arch/avr32/kernel/process.c b/arch/avr32/kernel/process.c index 0b4325946a4..4e4181ed1c6 100644 --- a/arch/avr32/kernel/process.c +++ b/arch/avr32/kernel/process.c @@ -11,6 +11,7 @@ #include <linux/fs.h> #include <linux/ptrace.h> #include <linux/reboot.h> +#include <linux/uaccess.h> #include <linux/unistd.h> #include <asm/sysreg.h> @@ -19,6 +20,8 @@ void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); +extern void cpu_idle_sleep(void); + /* * This file handles the architecture-dependent parts of process handling.. */ @@ -27,9 +30,8 @@ void cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { - /* TODO: Enter sleep mode */ while (!need_resched()) - cpu_relax(); + cpu_idle_sleep(); preempt_enable_no_resched(); schedule(); preempt_disable(); @@ -114,39 +116,178 @@ void release_thread(struct task_struct *dead_task) /* do nothing */ } +static void dump_mem(const char *str, const char *log_lvl, + unsigned long bottom, unsigned long top) +{ + unsigned long p; + int i; + + printk("%s%s(0x%08lx to 0x%08lx)\n", log_lvl, str, bottom, top); + + for (p = bottom & ~31; p < top; ) { + printk("%s%04lx: ", log_lvl, p & 0xffff); + + for (i = 0; i < 8; i++, p += 4) { + unsigned int val; + + if (p < bottom || p >= top) + printk(" "); + else { + if (__get_user(val, (unsigned int __user *)p)) { + printk("\n"); + goto out; + } + printk("%08x ", val); + } + } + printk("\n"); + } + +out: + return; +} + +static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p) +{ + return (p > (unsigned long)tinfo) + && (p < (unsigned long)tinfo + THREAD_SIZE - 3); +} + +#ifdef CONFIG_FRAME_POINTER +static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp, + struct pt_regs *regs, const char *log_lvl) +{ + unsigned long lr, fp; + struct thread_info *tinfo; + + if (regs) + fp = regs->r7; + else if (tsk == current) + asm("mov %0, r7" : "=r"(fp)); + else + fp = tsk->thread.cpu_context.r7; + + /* + * Walk the stack as long as the frame pointer (a) is within + * the kernel stack of the task, and (b) it doesn't move + * downwards. + */ + tinfo = task_thread_info(tsk); + printk("%sCall trace:\n", log_lvl); + while (valid_stack_ptr(tinfo, fp)) { + unsigned long new_fp; + + lr = *(unsigned long *)fp; +#ifdef CONFIG_KALLSYMS + printk("%s [<%08lx>] ", log_lvl, lr); +#else + printk(" [<%08lx>] ", lr); +#endif + print_symbol("%s\n", lr); + + new_fp = *(unsigned long *)(fp + 4); + if (new_fp <= fp) + break; + fp = new_fp; + } + printk("\n"); +} +#else +static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp, + struct pt_regs *regs, const char *log_lvl) +{ + unsigned long addr; + + printk("%sCall trace:\n", log_lvl); + + while (!kstack_end(sp)) { + addr = *sp++; + if (kernel_text_address(addr)) { +#ifdef CONFIG_KALLSYMS + printk("%s [<%08lx>] ", log_lvl, addr); +#else + printk(" [<%08lx>] ", addr); +#endif + print_symbol("%s\n", addr); + } + } + printk("\n"); +} +#endif + +void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp, + struct pt_regs *regs, const char *log_lvl) +{ + struct thread_info *tinfo; + + if (sp == 0) { + if (tsk) + sp = tsk->thread.cpu_context.ksp; + else + sp = (unsigned long)&tinfo; + } + if (!tsk) + tsk = current; + + tinfo = task_thread_info(tsk); + + if (valid_stack_ptr(tinfo, sp)) { + dump_mem("Stack: ", log_lvl, sp, + THREAD_SIZE + (unsigned long)tinfo); + show_trace_log_lvl(tsk, (unsigned long *)sp, regs, log_lvl); + } +} + +void show_stack(struct task_struct *tsk, unsigned long *stack) +{ + show_stack_log_lvl(tsk, (unsigned long)stack, NULL, ""); +} + +void dump_stack(void) +{ + unsigned long stack; + + show_trace_log_lvl(current, &stack, NULL, ""); +} +EXPORT_SYMBOL(dump_stack); + static const char *cpu_modes[] = { "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1", "Interrupt level 2", "Interrupt level 3", "Exception", "NMI" }; -void show_regs(struct pt_regs *regs) +void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl) { unsigned long sp = regs->sp; unsigned long lr = regs->lr; unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT; - if (!user_mode(regs)) + if (!user_mode(regs)) { sp = (unsigned long)regs + FRAME_SIZE_FULL; - print_symbol("PC is at %s\n", instruction_pointer(regs)); - print_symbol("LR is at %s\n", lr); - printk("pc : [<%08lx>] lr : [<%08lx>] %s\n" - "sp : %08lx r12: %08lx r11: %08lx\n", - instruction_pointer(regs), - lr, print_tainted(), sp, regs->r12, regs->r11); - printk("r10: %08lx r9 : %08lx r8 : %08lx\n", - regs->r10, regs->r9, regs->r8); - printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", - regs->r7, regs->r6, regs->r5, regs->r4); - printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", - regs->r3, regs->r2, regs->r1, regs->r0); - printk("Flags: %c%c%c%c%c\n", + printk("%s", log_lvl); + print_symbol("PC is at %s\n", instruction_pointer(regs)); + printk("%s", log_lvl); + print_symbol("LR is at %s\n", lr); + } + + printk("%spc : [<%08lx>] lr : [<%08lx>] %s\n" + "%ssp : %08lx r12: %08lx r11: %08lx\n", + log_lvl, instruction_pointer(regs), lr, print_tainted(), + log_lvl, sp, regs->r12, regs->r11); + printk("%sr10: %08lx r9 : %08lx r8 : %08lx\n", + log_lvl, regs->r10, regs->r9, regs->r8); + printk("%sr7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", + log_lvl, regs->r7, regs->r6, regs->r5, regs->r4); + printk("%sr3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", + log_lvl, regs->r3, regs->r2, regs->r1, regs->r0); + printk("%sFlags: %c%c%c%c%c\n", log_lvl, regs->sr & SR_Q ? 'Q' : 'q', regs->sr & SR_V ? 'V' : 'v', regs->sr & SR_N ? 'N' : 'n', regs->sr & SR_Z ? 'Z' : 'z', regs->sr & SR_C ? 'C' : 'c'); - printk("Mode bits: %c%c%c%c%c%c%c%c%c\n", + printk("%sMode bits: %c%c%c%c%c%c%c%c%c\n", log_lvl, regs->sr & SR_H ? 'H' : 'h', regs->sr & SR_R ? 'R' : 'r', regs->sr & SR_J ? 'J' : 'j', @@ -156,9 +297,21 @@ void show_regs(struct pt_regs *regs) regs->sr & SR_I1M ? '1' : '.', regs->sr & SR_I0M ? '0' : '.', regs->sr & SR_GM ? 'G' : 'g'); - printk("CPU Mode: %s\n", cpu_modes[mode]); + printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]); + printk("%sProcess: %s [%d] (task: %p thread: %p)\n", + log_lvl, current->comm, current->pid, current, + task_thread_info(current)); +} + +void show_regs(struct pt_regs *regs) +{ + unsigned long sp = regs->sp; + + if (!user_mode(regs)) + sp = (unsigned long)regs + FRAME_SIZE_FULL; - show_trace(NULL, (unsigned long *)sp, regs); + show_regs_log_lvl(regs, ""); + show_trace_log_lvl(current, (unsigned long *)sp, regs, ""); } EXPORT_SYMBOL(show_regs); diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c index a1a7c3c3f52..b279d66acf5 100644 --- a/arch/avr32/kernel/setup.c +++ b/arch/avr32/kernel/setup.c @@ -8,12 +8,14 @@ #include <linux/clk.h> #include <linux/init.h> +#include <linux/initrd.h> #include <linux/sched.h> #include <linux/console.h> #include <linux/ioport.h> #include <linux/bootmem.h> #include <linux/fs.h> #include <linux/module.h> +#include <linux/pfn.h> #include <linux/root_dev.h> #include <linux/cpu.h> #include <linux/kernel.h> @@ -30,13 +32,6 @@ extern int root_mountflags; /* - * Bootloader-provided information about physical memory - */ -struct tag_mem_range *mem_phys; -struct tag_mem_range *mem_reserved; -struct tag_mem_range *mem_ramdisk; - -/* * Initialize loops_per_jiffy as 5000000 (500MIPS). * Better make it too large than too small... */ @@ -48,48 +43,193 @@ EXPORT_SYMBOL(boot_cpu_data); static char __initdata command_line[COMMAND_LINE_SIZE]; /* - * Should be more than enough, but if you have a _really_ complex - * setup, you might need to increase the size of this... + * Standard memory resources */ -static struct tag_mem_range __initdata mem_range_cache[32]; -static unsigned mem_range_next_free; +static struct resource __initdata kernel_data = { + .name = "Kernel data", + .start = 0, + .end = 0, + .flags = IORESOURCE_MEM, +}; +static struct resource __initdata kernel_code = { + .name = "Kernel code", + .start = 0, + .end = 0, + .flags = IORESOURCE_MEM, + .sibling = &kernel_data, +}; /* - * Standard memory resources + * Available system RAM and reserved regions as singly linked + * lists. These lists are traversed using the sibling pointer in + * struct resource and are kept sorted at all times. */ -static struct resource mem_res[] = { - { - .name = "Kernel code", - .start = 0, - .end = 0, - .flags = IORESOURCE_MEM - }, - { - .name = "Kernel data", - .start = 0, - .end = 0, - .flags = IORESOURCE_MEM, - }, -}; +static struct resource *__initdata system_ram; +static struct resource *__initdata reserved = &kernel_code; + +/* + * We need to allocate these before the bootmem allocator is up and + * running, so we need this "cache". 32 entries are probably enough + * for all but the most insanely complex systems. + */ +static struct resource __initdata res_cache[32]; +static unsigned int __initdata res_cache_next_free; + +static void __init resource_init(void) +{ + struct resource *mem, *res; + struct resource *new; + + kernel_code.start = __pa(init_mm.start_code); + + for (mem = system_ram; mem; mem = mem->sibling) { + new = alloc_bootmem_low(sizeof(struct resource)); + memcpy(new, mem, sizeof(struct resource)); + + new->sibling = NULL; + if (request_resource(&iomem_resource, new)) + printk(KERN_WARNING "Bad RAM resource %08x-%08x\n", + mem->start, mem->end); + } + + for (res = reserved; res; res = res->sibling) { + new = alloc_bootmem_low(sizeof(struct resource)); + memcpy(new, res, sizeof(struct resource)); + + new->sibling = NULL; + if (insert_resource(&iomem_resource, new)) + printk(KERN_WARNING + "Bad reserved resource %s (%08x-%08x)\n", + res->name, res->start, res->end); + } +} + +static void __init +add_physical_memory(resource_size_t start, resource_size_t end) +{ + struct resource *new, *next, **pprev; + + for (pprev = &system_ram, next = system_ram; next; + pprev = &next->sibling, next = next->sibling) { + if (end < next->start) + break; + if (start <= next->end) { + printk(KERN_WARNING + "Warning: Physical memory map is broken\n"); + printk(KERN_WARNING + "Warning: %08x-%08x overlaps %08x-%08x\n", + start, end, next->start, next->end); + return; + } + } + + if (res_cache_next_free >= ARRAY_SIZE(res_cache)) { + printk(KERN_WARNING + "Warning: Failed to add physical memory %08x-%08x\n", + start, end); + return; + } + + new = &res_cache[res_cache_next_free++]; + new->start = start; + new->end = end; + new->name = "System RAM"; + new->flags = IORESOURCE_MEM; + + *pprev = new; +} + +static int __init +add_reserved_region(resource_size_t start, resource_size_t end, + const char *name) +{ + struct resource *new, *next, **pprev; + + if (end < start) + return -EINVAL; + + if (res_cache_next_free >= ARRAY_SIZE(res_cache)) + return -ENOMEM; + + for (pprev = &reserved, next = reserved; next; + pprev = &next->sibling, next = next->sibling) { + if (end < next->start) + break; + if (start <= next->end) + return -EBUSY; + } + + new = &res_cache[res_cache_next_free++]; + new->start = start; + new->end = end; + new->name = name; + new->flags = IORESOURCE_MEM; + + *pprev = new; + + return 0; +} + +static unsigned long __init +find_free_region(const struct resource *mem, resource_size_t size, + resource_size_t align) +{ + struct resource *res; + unsigned long target; + + target = ALIGN(mem->start, align); + for (res = reserved; res; res = res->sibling) { + if ((target + size) <= res->start) + break; + if (target <= res->end) + target = ALIGN(res->end + 1, align); + } + + if ((target + size) > (mem->end + 1)) + return mem->end + 1; + + return target; +} + +static int __init +alloc_reserved_region(resource_size_t *start, resource_size_t size, + resource_size_t align, const char *name) +{ + struct resource *mem; + resource_size_t target; + int ret; + + for (mem = system_ram; mem; mem = mem->sibling) { + target = find_free_region(mem, size, align); + if (target <= mem->end) { + ret = add_reserved_region(target, target + size - 1, + name); + if (!ret) + *start = target; + return ret; + } + } -#define kernel_code mem_res[0] -#define kernel_data mem_res[1] + return -ENOMEM; +} /* * Early framebuffer allocation. Works as follows: * - If fbmem_size is zero, nothing will be allocated or reserved. * - If fbmem_start is zero when setup_bootmem() is called, - * fbmem_size bytes will be allocated from the bootmem allocator. + * a block of fbmem_size bytes will be reserved before bootmem + * initialization. It will be aligned to the largest page size + * that fbmem_size is a multiple of. * - If fbmem_start is nonzero, an area of size fbmem_size will be - * reserved at the physical address fbmem_start if necessary. If - * the area isn't in a memory region known to the kernel, it will - * be left alone. + * reserved at the physical address fbmem_start if possible. If + * it collides with other reserved memory, a different block of + * same size will be allocated, just as if fbmem_start was zero. * * Board-specific code may use these variables to set up platform data * for the framebuffer driver if fbmem_size is nonzero. */ -static unsigned long __initdata fbmem_start; -static unsigned long __initdata fbmem_size; +resource_size_t __initdata fbmem_start; +resource_size_t __initdata fbmem_size; /* * "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for @@ -103,48 +243,42 @@ static unsigned long __initdata fbmem_size; */ static int __init early_parse_fbmem(char *p) { + int ret; + unsigned long align; + fbmem_size = memparse(p, &p); - if (*p == '@') + if (*p == '@') { fbmem_start = memparse(p, &p); - return 0; -} -early_param("fbmem", early_parse_fbmem); - -static inline void __init resource_init(void) -{ - struct tag_mem_range *region; - - kernel_code.start = __pa(init_mm.start_code); - kernel_code.end = __pa(init_mm.end_code - 1); - kernel_data.start = __pa(init_mm.end_code); - kernel_data.end = __pa(init_mm.brk - 1); - - for (region = mem_phys; region; region = region->next) { - struct resource *res; - unsigned long phys_start, phys_end; - - if (region->size == 0) - continue; - - phys_start = region->addr; - phys_end = phys_start + region->size - 1; - - res = alloc_bootmem_low(sizeof(*res)); - res->name = "System RAM"; - res->start = phys_start; - res->end = phys_end; - res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; - - request_resource (&iomem_resource, res); + ret = add_reserved_region(fbmem_start, + fbmem_start + fbmem_size - 1, + "Framebuffer"); + if (ret) { + printk(KERN_WARNING + "Failed to reserve framebuffer memory\n"); + fbmem_start = 0; + } + } - if (kernel_code.start >= res->start && - kernel_code.end <= res->end) - request_resource (res, &kernel_code); - if (kernel_data.start >= res->start && - kernel_data.end <= res->end) - request_resource (res, &kernel_data); + if (!fbmem_start) { + if ((fbmem_size & 0x000fffffUL) == 0) + align = 0x100000; /* 1 MiB */ + else if ((fbmem_size & 0x0000ffffUL) == 0) + align = 0x10000; /* 64 KiB */ + else + align = 0x1000; /* 4 KiB */ + + ret = alloc_reserved_region(&fbmem_start, fbmem_size, + align, "Framebuffer"); + if (ret) { + printk(KERN_WARNING + "Failed to allocate framebuffer memory\n"); + fbmem_size = 0; + } } + + return 0; } +early_param("fbmem", early_parse_fbmem); static int __init parse_tag_core(struct tag *tag) { @@ -157,11 +291,9 @@ static int __init parse_tag_core(struct tag *tag) } __tagtable(ATAG_CORE, parse_tag_core); -static int __init parse_tag_mem_range(struct tag *tag, - struct tag_mem_range **root) +static int __init parse_tag_mem(struct tag *tag) { - struct tag_mem_range *cur, **pprev; - struct tag_mem_range *new; + unsigned long start, end; /* * Ignore zero-sized entries. If we're running standalone, the @@ -171,34 +303,53 @@ static int __init parse_tag_mem_range(struct tag *tag, if (tag->u.mem_range.size == 0) return 0; - /* - * Copy the data so the bootmem init code doesn't need to care - * about it. - */ - if (mem_range_next_free >= ARRAY_SIZE(mem_range_cache)) - panic("Physical memory map too complex!\n"); + start = tag->u.mem_range.addr; + end = tag->u.mem_range.addr + tag->u.mem_range.size - 1; + + add_physical_memory(start, end); + return 0; +} +__tagtable(ATAG_MEM, parse_tag_mem); + +static int __init parse_tag_rdimg(struct tag *tag) +{ +#ifdef CONFIG_INITRD + struct tag_mem_range *mem = &tag->u.mem_range; + int ret; - new = &mem_range_cache[mem_range_next_free++]; - *new = tag->u.mem_range; + if (initrd_start) { + printk(KERN_WARNING + "Warning: Only the first initrd image will be used\n"); + return 0; + } - pprev = root; - cur = *root; - while (cur) { - pprev = &cur->next; - cur = cur->next; + ret = add_reserved_region(mem->start, mem->start + mem->size - 1, + "initrd"); + if (ret) { + printk(KERN_WARNING + "Warning: Failed to reserve initrd memory\n"); + return ret; } - *pprev = new; - new->next = NULL; + initrd_start = (unsigned long)__va(mem->addr); + initrd_end = initrd_start + mem->size; +#else + printk(KERN_WARNING "RAM disk image present, but " + "no initrd support in kernel, ignoring\n"); +#endif return 0; } +__tagtable(ATAG_RDIMG, parse_tag_rdimg); -static int __init parse_tag_mem(struct tag *tag) +static int __init parse_tag_rsvd_mem(struct tag *tag) { - return parse_tag_mem_range(tag, &mem_phys); + struct tag_mem_range *mem = &tag->u.mem_range; + + return add_reserved_region(mem->addr, mem->addr + mem->size - 1, + "Reserved"); } -__tagtable(ATAG_MEM, parse_tag_mem); +__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem); static int __init parse_tag_cmdline(struct tag *tag) { @@ -207,12 +358,6 @@ static int __init parse_tag_cmdline(struct tag *tag) } __tagtable(ATAG_CMDLINE, parse_tag_cmdline); -static int __init parse_tag_rdimg(struct tag *tag) -{ - return parse_tag_mem_range(tag, &mem_ramdisk); -} -__tagtable(ATAG_RDIMG, parse_tag_rdimg); - static int __init parse_tag_clock(struct tag *tag) { /* @@ -223,12 +368,6 @@ static int __init parse_tag_clock(struct tag *tag) } __tagtable(ATAG_CLOCK, parse_tag_clock); -static int __init parse_tag_rsvd_mem(struct tag *tag) -{ - return parse_tag_mem_range(tag, &mem_reserved); -} -__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem); - /* * Scan the tag table for this tag, and call its parse function. The * tag table is built by the linker from all the __tagtable @@ -260,10 +399,137 @@ static void __init parse_tags(struct tag *t) t->hdr.tag); } +/* + * Find a free memory region large enough for storing the + * bootmem bitmap. + */ +static unsigned long __init +find_bootmap_pfn(const struct resource *mem) +{ + unsigned long bootmap_pages, bootmap_len; + unsigned long node_pages = PFN_UP(mem->end - mem->start + 1); + unsigned long bootmap_start; + + bootmap_pages = bootmem_bootmap_pages(node_pages); + bootmap_len = bootmap_pages << PAGE_SHIFT; + + /* + * Find a large enough region without reserved pages for + * storing the bootmem bitmap. We can take advantage of the + * fact that all lists have been sorted. + * + * We have to check that we don't collide with any reserved + * regions, which includes the kernel image and any RAMDISK + * images. + */ + bootmap_start = find_free_region(mem, bootmap_len, PAGE_SIZE); + + return bootmap_start >> PAGE_SHIFT; +} + +#define MAX_LOWMEM HIGHMEM_START +#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) + +static void __init setup_bootmem(void) +{ + unsigned bootmap_size; + unsigned long first_pfn, bootmap_pfn, pages; + unsigned long max_pfn, max_low_pfn; + unsigned node = 0; + struct resource *res; + + printk(KERN_INFO "Physical memory:\n"); + for (res = system_ram; res; res = res->sibling) + printk(" %08x-%08x\n", res->start, res->end); + printk(KERN_INFO "Reserved memory:\n"); + for (res = reserved; res; res = res->sibling) + printk(" %08x-%08x: %s\n", + res->start, res->end, res->name); + + nodes_clear(node_online_map); + + if (system_ram->sibling) + printk(KERN_WARNING "Only using first memory bank\n"); + + for (res = system_ram; res; res = NULL) { + first_pfn = PFN_UP(res->start); + max_low_pfn = max_pfn = PFN_DOWN(res->end + 1); + bootmap_pfn = find_bootmap_pfn(res); + if (bootmap_pfn > max_pfn) + panic("No space for bootmem bitmap!\n"); + + if (max_low_pfn > MAX_LOWMEM_PFN) { + max_low_pfn = MAX_LOWMEM_PFN; +#ifndef CONFIG_HIGHMEM + /* + * Lowmem is memory that can be addressed + * directly through P1/P2 + */ + printk(KERN_WARNING + "Node %u: Only %ld MiB of memory will be used.\n", + node, MAX_LOWMEM >> 20); + printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); +#else +#error HIGHMEM is not supported by AVR32 yet +#endif + } + + /* Initialize the boot-time allocator with low memory only. */ + bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn, + first_pfn, max_low_pfn); + + /* + * Register fully available RAM pages with the bootmem + * allocator. + */ + pages = max_low_pfn - first_pfn; + free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn), + PFN_PHYS(pages)); + + /* Reserve space for the bootmem bitmap... */ + reserve_bootmem_node(NODE_DATA(node), + PFN_PHYS(bootmap_pfn), + bootmap_size); + + /* ...and any other reserved regions. */ + for (res = reserved; res; res = res->sibling) { + if (res->start > PFN_PHYS(max_pfn)) + break; + + /* + * resource_init will complain about partial + * overlaps, so we'll just ignore such + * resources for now. + */ + if (res->start >= PFN_PHYS(first_pfn) + && res->end < PFN_PHYS(max_pfn)) + reserve_bootmem_node( + NODE_DATA(node), res->start, + res->end - res->start + 1); + } + + node_set_online(node); + } +} + void __init setup_arch (char **cmdline_p) { struct clk *cpu_clk; + init_mm.start_code = (unsigned long)_text; + init_mm.end_code = (unsigned long)_etext; + init_mm.end_data = (unsigned long)_edata; + init_mm.brk = (unsigned long)_end; + + /* + * Include .init section to make allocations easier. It will + * be removed before the resource is actually requested. + */ + kernel_code.start = __pa(__init_begin); + kernel_code.end = __pa(init_mm.end_code - 1); + kernel_data.start = __pa(init_mm.end_code); + kernel_data.end = __pa(init_mm.brk - 1); + parse_tags(bootloader_tags); setup_processor(); @@ -289,24 +555,16 @@ void __init setup_arch (char **cmdline_p) ((cpu_hz + 500) / 1000) % 1000); } - init_mm.start_code = (unsigned long) &_text; - init_mm.end_code = (unsigned long) &_etext; - init_mm.end_data = (unsigned long) &_edata; - init_mm.brk = (unsigned long) &_end; - strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; parse_early_param(); setup_bootmem(); - board_setup_fbmem(fbmem_start, fbmem_size); - #ifdef CONFIG_VT conswitchp = &dummy_con; #endif paging_init(); - resource_init(); } diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index c10833f2ee0..7014a3571ec 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2004-2006 Atmel Corporation + * Copyright (C) 2004-2007 Atmel Corporation * * Based on MIPS implementation arch/mips/kernel/time.c * Copyright 2001 MontaVista Software Inc. @@ -20,18 +20,25 @@ #include <linux/init.h> #include <linux/profile.h> #include <linux/sysdev.h> +#include <linux/err.h> #include <asm/div64.h> #include <asm/sysreg.h> #include <asm/io.h> #include <asm/sections.h> -static cycle_t read_cycle_count(void) +/* how many counter cycles in a jiffy? */ +static u32 cycles_per_jiffy; + +/* the count value for the next timer interrupt */ +static u32 expirelo; + +cycle_t __weak read_cycle_count(void) { return (cycle_t)sysreg_read(COUNT); } -static struct clocksource clocksource_avr32 = { +struct clocksource __weak clocksource_avr32 = { .name = "avr32", .rating = 350, .read = read_cycle_count, @@ -40,12 +47,20 @@ static struct clocksource clocksource_avr32 = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +irqreturn_t __weak timer_interrupt(int irq, void *dev_id); + +struct irqaction timer_irqaction = { + .handler = timer_interrupt, + .flags = IRQF_DISABLED, + .name = "timer", +}; + /* * By default we provide the null RTC ops */ static unsigned long null_rtc_get_time(void) { - return mktime(2004, 1, 1, 0, 0, 0); + return mktime(2007, 1, 1, 0, 0, 0); } static int null_rtc_set_time(unsigned long sec) @@ -56,23 +71,14 @@ static int null_rtc_set_time(unsigned long sec) static unsigned long (*rtc_get_time)(void) = null_rtc_get_time; static int (*rtc_set_time)(unsigned long) = null_rtc_set_time; -/* how many counter cycles in a jiffy? */ -static unsigned long cycles_per_jiffy; - -/* cycle counter value at the previous timer interrupt */ -static unsigned int timerhi, timerlo; - -/* the count value for the next timer interrupt */ -static unsigned int expirelo; - static void avr32_timer_ack(void) { - unsigned int count; + u32 count; /* Ack this timer interrupt and set the next one */ expirelo += cycles_per_jiffy; + /* setting COMPARE to 0 stops the COUNT-COMPARE */ if (expirelo == 0) { - printk(KERN_DEBUG "expirelo == 0\n"); sysreg_write(COMPARE, expirelo + 1); } else { sysreg_write(COMPARE, expirelo); @@ -86,27 +92,56 @@ static void avr32_timer_ack(void) } } -static unsigned int avr32_hpt_read(void) +int __weak avr32_hpt_init(void) { - return sysreg_read(COUNT); + int ret; + unsigned long mult, shift, count_hz; + + count_hz = clk_get_rate(boot_cpu_data.clk); + shift = clocksource_avr32.shift; + mult = clocksource_hz2mult(count_hz, shift); + clocksource_avr32.mult = mult; + + { + u64 tmp; + + tmp = TICK_NSEC; + tmp <<= shift; + tmp += mult / 2; + do_div(tmp, mult); + + cycles_per_jiffy = tmp; + } + + ret = setup_irq(0, &timer_irqaction); + if (ret) { + pr_debug("timer: could not request IRQ 0: %d\n", ret); + return -ENODEV; + } + + printk(KERN_INFO "timer: AT32AP COUNT-COMPARE at irq 0, " + "%lu.%03lu MHz\n", + ((count_hz + 500) / 1000) / 1000, + ((count_hz + 500) / 1000) % 1000); + + return 0; } /* * Taken from MIPS c0_hpt_timer_init(). * - * Why is it so complicated, and what is "count"? My assumption is - * that `count' specifies the "reference cycle", i.e. the cycle since - * reset that should mean "zero". The reason COUNT is written twice is - * probably to make sure we don't get any timer interrupts while we - * are messing with the counter. + * The reason COUNT is written twice is probably to make sure we don't get any + * timer interrupts while we are messing with the counter. */ -static void avr32_hpt_init(unsigned int count) +int __weak avr32_hpt_start(void) { - count = sysreg_read(COUNT) - count; + u32 count = sysreg_read(COUNT); expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy; sysreg_write(COUNT, expirelo - cycles_per_jiffy); sysreg_write(COMPARE, expirelo); sysreg_write(COUNT, count); + + return 0; } /* @@ -115,26 +150,18 @@ static void avr32_hpt_init(unsigned int count) * * In UP mode, it is invoked from the (global) timer_interrupt. */ -static void local_timer_interrupt(int irq, void *dev_id) +void local_timer_interrupt(int irq, void *dev_id) { if (current->pid) profile_tick(CPU_PROFILING); update_process_times(user_mode(get_irq_regs())); } -static irqreturn_t -timer_interrupt(int irq, void *dev_id) +irqreturn_t __weak timer_interrupt(int irq, void *dev_id) { - unsigned int count; - /* ack timer interrupt and try to set next interrupt */ - count = avr32_hpt_read(); avr32_timer_ack(); - /* Update timerhi/timerlo for intra-jiffy calibration */ - timerhi += count < timerlo; /* Wrap around */ - timerlo = count; - /* * Call the generic timer interrupt handler */ @@ -153,60 +180,37 @@ timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static struct irqaction timer_irqaction = { - .handler = timer_interrupt, - .flags = IRQF_DISABLED, - .name = "timer", -}; - void __init time_init(void) { - unsigned long mult, shift, count_hz; int ret; + /* + * Make sure we don't get any COMPARE interrupts before we can + * handle them. + */ + sysreg_write(COMPARE, 0); + xtime.tv_sec = rtc_get_time(); xtime.tv_nsec = 0; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); - printk("Before time_init: count=%08lx, compare=%08lx\n", - (unsigned long)sysreg_read(COUNT), - (unsigned long)sysreg_read(COMPARE)); - - count_hz = clk_get_rate(boot_cpu_data.clk); - shift = clocksource_avr32.shift; - mult = clocksource_hz2mult(count_hz, shift); - clocksource_avr32.mult = mult; - - printk("Cycle counter: mult=%lu, shift=%lu\n", mult, shift); - - { - u64 tmp; - - tmp = TICK_NSEC; - tmp <<= shift; - tmp += mult / 2; - do_div(tmp, mult); - - cycles_per_jiffy = tmp; + ret = avr32_hpt_init(); + if (ret) { + pr_debug("timer: failed setup: %d\n", ret); + return; } - /* This sets up the high precision timer for the first interrupt. */ - avr32_hpt_init(avr32_hpt_read()); - - printk("After time_init: count=%08lx, compare=%08lx\n", - (unsigned long)sysreg_read(COUNT), - (unsigned long)sysreg_read(COMPARE)); - ret = clocksource_register(&clocksource_avr32); if (ret) - printk(KERN_ERR - "timer: could not register clocksource: %d\n", ret); + pr_debug("timer: could not register clocksource: %d\n", ret); - ret = setup_irq(0, &timer_irqaction); - if (ret) - printk("timer: could not request IRQ 0: %d\n", ret); + ret = avr32_hpt_start(); + if (ret) { + pr_debug("timer: failed starting: %d\n", ret); + return; + } } static struct sysdev_class timer_class = { diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c index adc01a12d15..4f0382d8483 100644 --- a/arch/avr32/kernel/traps.c +++ b/arch/avr32/kernel/traps.c @@ -5,158 +5,25 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#undef DEBUG -#include <linux/sched.h> + +#include <linux/bug.h> #include <linux/init.h> -#include <linux/module.h> #include <linux/kallsyms.h> +#include <linux/module.h> #include <linux/notifier.h> +#include <linux/sched.h> +#include <linux/uaccess.h> -#include <asm/traps.h> -#include <asm/sysreg.h> #include <asm/addrspace.h> -#include <asm/ocd.h> #include <asm/mmu_context.h> -#include <asm/uaccess.h> - -static void dump_mem(const char *str, unsigned long bottom, unsigned long top) -{ - unsigned long p; - int i; - - printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); - - for (p = bottom & ~31; p < top; ) { - printk("%04lx: ", p & 0xffff); - - for (i = 0; i < 8; i++, p += 4) { - unsigned int val; - - if (p < bottom || p >= top) - printk(" "); - else { - if (__get_user(val, (unsigned int __user *)p)) { - printk("\n"); - goto out; - } - printk("%08x ", val); - } - } - printk("\n"); - } - -out: - return; -} - -static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p) -{ - return (p > (unsigned long)tinfo) - && (p < (unsigned long)tinfo + THREAD_SIZE - 3); -} - -#ifdef CONFIG_FRAME_POINTER -static inline void __show_trace(struct task_struct *tsk, unsigned long *sp, - struct pt_regs *regs) -{ - unsigned long lr, fp; - struct thread_info *tinfo; - - tinfo = (struct thread_info *) - ((unsigned long)sp & ~(THREAD_SIZE - 1)); - - if (regs) - fp = regs->r7; - else if (tsk == current) - asm("mov %0, r7" : "=r"(fp)); - else - fp = tsk->thread.cpu_context.r7; - - /* - * Walk the stack as long as the frame pointer (a) is within - * the kernel stack of the task, and (b) it doesn't move - * downwards. - */ - while (valid_stack_ptr(tinfo, fp)) { - unsigned long new_fp; - - lr = *(unsigned long *)fp; - printk(" [<%08lx>] ", lr); - print_symbol("%s\n", lr); - - new_fp = *(unsigned long *)(fp + 4); - if (new_fp <= fp) - break; - fp = new_fp; - } - printk("\n"); -} -#else -static inline void __show_trace(struct task_struct *tsk, unsigned long *sp, - struct pt_regs *regs) -{ - unsigned long addr; - - while (!kstack_end(sp)) { - addr = *sp++; - if (kernel_text_address(addr)) { - printk(" [<%08lx>] ", addr); - print_symbol("%s\n", addr); - } - } -} -#endif - -void show_trace(struct task_struct *tsk, unsigned long *sp, - struct pt_regs *regs) -{ - if (regs && - (((regs->sr & MODE_MASK) == MODE_EXCEPTION) || - ((regs->sr & MODE_MASK) == MODE_USER))) - return; - - printk ("Call trace:"); -#ifdef CONFIG_KALLSYMS - printk("\n"); -#endif - - __show_trace(tsk, sp, regs); - printk("\n"); -} - -void show_stack(struct task_struct *tsk, unsigned long *sp) -{ - unsigned long stack; - - if (!tsk) - tsk = current; - if (sp == 0) { - if (tsk == current) { - register unsigned long *real_sp __asm__("sp"); - sp = real_sp; - } else { - sp = (unsigned long *)tsk->thread.cpu_context.ksp; - } - } - - stack = (unsigned long)sp; - dump_mem("Stack: ", stack, - THREAD_SIZE + (unsigned long)tsk->thread_info); - show_trace(tsk, sp, NULL); -} - -void dump_stack(void) -{ - show_stack(NULL, NULL); -} -EXPORT_SYMBOL(dump_stack); +#include <asm/ocd.h> +#include <asm/sysreg.h> +#include <asm/traps.h> ATOMIC_NOTIFIER_HEAD(avr32_die_chain); int register_die_notifier(struct notifier_block *nb) { - pr_debug("register_die_notifier: %p\n", nb); - return atomic_notifier_chain_register(&avr32_die_chain, nb); } EXPORT_SYMBOL(register_die_notifier); @@ -169,93 +36,103 @@ EXPORT_SYMBOL(unregister_die_notifier); static DEFINE_SPINLOCK(die_lock); -void __die(const char *str, struct pt_regs *regs, unsigned long err, - const char *file, const char *func, unsigned long line) +void NORET_TYPE die(const char *str, struct pt_regs *regs, long err) { - struct task_struct *tsk = current; static int die_counter; console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); - printk(KERN_ALERT "%s", str); - if (file && func) - printk(" in %s:%s, line %ld", file, func, line); - printk("[#%d]:\n", ++die_counter); - print_modules(); - show_regs(regs); - printk("Process %s (pid: %d, stack limit = 0x%p)\n", - tsk->comm, tsk->pid, tsk->thread_info + 1); - - if (!user_mode(regs) || in_interrupt()) { - dump_mem("Stack: ", regs->sp, - THREAD_SIZE + (unsigned long)tsk->thread_info); + printk(KERN_ALERT "Oops: %s, sig: %ld [#%d]\n" KERN_EMERG, + str, err, ++die_counter); +#ifdef CONFIG_PREEMPT + printk("PREEMPT "); +#endif +#ifdef CONFIG_FRAME_POINTER + printk("FRAME_POINTER "); +#endif + if (current_cpu_data.features & AVR32_FEATURE_OCD) { + unsigned long did = __mfdr(DBGREG_DID); + printk("chip: 0x%03lx:0x%04lx rev %lu\n", + (did >> 1) & 0x7ff, + (did >> 12) & 0x7fff, + (did >> 28) & 0xf); + } else { + printk("cpu: arch %u r%u / core %u r%u\n", + current_cpu_data.arch_type, + current_cpu_data.arch_revision, + current_cpu_data.cpu_type, + current_cpu_data.cpu_revision); } + print_modules(); + show_regs_log_lvl(regs, KERN_EMERG); + show_stack_log_lvl(current, regs->sp, regs, KERN_EMERG); bust_spinlocks(0); spin_unlock_irq(&die_lock); - do_exit(SIGSEGV); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + + if (panic_on_oops) + panic("Fatal exception"); + + do_exit(err); } -void __die_if_kernel(const char *str, struct pt_regs *regs, unsigned long err, - const char *file, const char *func, unsigned long line) +void _exception(long signr, struct pt_regs *regs, int code, + unsigned long addr) { + siginfo_t info; + if (!user_mode(regs)) - __die(str, regs, err, file, func, line); -} + die("Unhandled exception in kernel mode", regs, signr); + + memset(&info, 0, sizeof(info)); + info.si_signo = signr; + info.si_code = code; + info.si_addr = (void __user *)addr; + force_sig_info(signr, &info, current); -asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) -{ -#ifdef CONFIG_SUBARCH_AVR32B /* - * The exception entry always saves RSR_EX. For NMI, this is - * wrong; it should be RSR_NMI + * Init gets no signals that it doesn't have a handler for. + * That's all very well, but if it has caused a synchronous + * exception and we ignore the resulting signal, it will just + * generate the same exception over and over again and we get + * nowhere. Better to kill it and let the kernel panic. */ - regs->sr = sysreg_read(RSR_NMI); -#endif + if (is_init(current)) { + __sighandler_t handler; + + spin_lock_irq(¤t->sighand->siglock); + handler = current->sighand->action[signr-1].sa.sa_handler; + spin_unlock_irq(¤t->sighand->siglock); + if (handler == SIG_DFL) { + /* init has generated a synchronous exception + and it doesn't have a handler for the signal */ + printk(KERN_CRIT "init has generated signal %ld " + "but has no handler for it\n", signr); + do_exit(signr); + } + } +} - printk("NMI taken!!!!\n"); - die("NMI", regs, ecr); - BUG(); +asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) +{ + printk(KERN_ALERT "Got Non-Maskable Interrupt, dumping regs\n"); + show_regs_log_lvl(regs, KERN_ALERT); + show_stack_log_lvl(current, regs->sp, regs, KERN_ALERT); } asmlinkage void do_critical_exception(unsigned long ecr, struct pt_regs *regs) { - printk("Unable to handle critical exception %lu at pc = %08lx!\n", - ecr, regs->pc); - die("Oops", regs, ecr); - BUG(); + die("Critical exception", regs, SIGKILL); } asmlinkage void do_address_exception(unsigned long ecr, struct pt_regs *regs) { - siginfo_t info; - - die_if_kernel("Oops: Address exception in kernel mode", regs, ecr); - -#ifdef DEBUG - if (ecr == ECR_ADDR_ALIGN_X) - pr_debug("Instruction Address Exception at pc = %08lx\n", - regs->pc); - else if (ecr == ECR_ADDR_ALIGN_R) - pr_debug("Data Address Exception (Read) at pc = %08lx\n", - regs->pc); - else if (ecr == ECR_ADDR_ALIGN_W) - pr_debug("Data Address Exception (Write) at pc = %08lx\n", - regs->pc); - else - BUG(); - - show_regs(regs); -#endif - - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = (void __user *)regs->pc; - - force_sig_info(SIGBUS, &info, current); + _exception(SIGBUS, regs, BUS_ADRALN, regs->pc); } /* This way of handling undefined instructions is stolen from ARM */ @@ -280,7 +157,8 @@ static int do_cop_absent(u32 insn) { int cop_nr; u32 cpucr; - if ( (insn & 0xfdf00000) == 0xf1900000 ) + + if ((insn & 0xfdf00000) == 0xf1900000) /* LDC0 */ cop_nr = 0; else @@ -292,136 +170,91 @@ static int do_cop_absent(u32 insn) sysreg_write(CPUCR, cpucr); cpucr = sysreg_read(CPUCR); - if ( !(cpucr & (1 << (24 + cop_nr))) ){ - printk("Coprocessor #%i not found!\n", cop_nr); - return -1; - } + if (!(cpucr & (1 << (24 + cop_nr)))) + return -ENODEV; return 0; } -#ifdef CONFIG_BUG -#ifdef CONFIG_DEBUG_BUGVERBOSE -static inline void do_bug_verbose(struct pt_regs *regs, u32 insn) -{ - char *file; - u16 line; - char c; - - if (__get_user(line, (u16 __user *)(regs->pc + 2))) - return; - if (__get_user(file, (char * __user *)(regs->pc + 4)) - || (unsigned long)file < PAGE_OFFSET - || __get_user(c, file)) - file = "<bad filename>"; - - printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line); -} -#else -static inline void do_bug_verbose(struct pt_regs *regs, u32 insn) +int is_valid_bugaddr(unsigned long pc) { + unsigned short opcode; + + if (pc < PAGE_OFFSET) + return 0; + if (probe_kernel_address((u16 *)pc, opcode)) + return 0; + return opcode == AVR32_BUG_OPCODE; } -#endif -#endif asmlinkage void do_illegal_opcode(unsigned long ecr, struct pt_regs *regs) { u32 insn; struct undef_hook *hook; - siginfo_t info; void __user *pc; + long code; - if (!user_mode(regs)) - goto kernel_trap; + if (!user_mode(regs) && (ecr == ECR_ILLEGAL_OPCODE)) { + enum bug_trap_type type; + + type = report_bug(regs->pc); + switch (type) { + case BUG_TRAP_TYPE_NONE: + break; + case BUG_TRAP_TYPE_WARN: + regs->pc += 2; + return; + case BUG_TRAP_TYPE_BUG: + die("Kernel BUG", regs, SIGKILL); + } + } local_irq_enable(); - pc = (void __user *)instruction_pointer(regs); - if (__get_user(insn, (u32 __user *)pc)) - goto invalid_area; + if (user_mode(regs)) { + pc = (void __user *)instruction_pointer(regs); + if (get_user(insn, (u32 __user *)pc)) + goto invalid_area; - if (ecr == ECR_COPROC_ABSENT) { - if (do_cop_absent(insn) == 0) + if (ecr == ECR_COPROC_ABSENT && !do_cop_absent(insn)) return; - } - spin_lock_irq(&undef_lock); - list_for_each_entry(hook, &undef_hook, node) { - if ((insn & hook->insn_mask) == hook->insn_val) { - if (hook->fn(regs, insn) == 0) { - spin_unlock_irq(&undef_lock); - return; + spin_lock_irq(&undef_lock); + list_for_each_entry(hook, &undef_hook, node) { + if ((insn & hook->insn_mask) == hook->insn_val) { + if (hook->fn(regs, insn) == 0) { + spin_unlock_irq(&undef_lock); + return; + } } } + spin_unlock_irq(&undef_lock); } - spin_unlock_irq(&undef_lock); - -invalid_area: -#ifdef DEBUG - printk("Illegal instruction at pc = %08lx\n", regs->pc); - if (regs->pc < TASK_SIZE) { - unsigned long ptbr, pgd, pte, *p; - - ptbr = sysreg_read(PTBR); - p = (unsigned long *)ptbr; - pgd = p[regs->pc >> 22]; - p = (unsigned long *)((pgd & 0x1ffff000) | 0x80000000); - pte = p[(regs->pc >> 12) & 0x3ff]; - printk("page table: 0x%08lx -> 0x%08lx -> 0x%08lx\n", ptbr, pgd, pte); - } -#endif - - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_addr = (void __user *)regs->pc; switch (ecr) { - case ECR_ILLEGAL_OPCODE: - case ECR_UNIMPL_INSTRUCTION: - info.si_code = ILL_ILLOPC; - break; case ECR_PRIVILEGE_VIOLATION: - info.si_code = ILL_PRVOPC; + code = ILL_PRVOPC; break; case ECR_COPROC_ABSENT: - info.si_code = ILL_COPROC; + code = ILL_COPROC; break; default: - BUG(); + code = ILL_ILLOPC; + break; } - force_sig_info(SIGILL, &info, current); + _exception(SIGILL, regs, code, regs->pc); return; -kernel_trap: -#ifdef CONFIG_BUG - if (__kernel_text_address(instruction_pointer(regs))) { - insn = *(u16 *)instruction_pointer(regs); - if (insn == AVR32_BUG_OPCODE) { - do_bug_verbose(regs, insn); - die("Kernel BUG", regs, 0); - return; - } - } -#endif - - die("Oops: Illegal instruction in kernel code", regs, ecr); +invalid_area: + _exception(SIGSEGV, regs, SEGV_MAPERR, regs->pc); } asmlinkage void do_fpe(unsigned long ecr, struct pt_regs *regs) { - siginfo_t info; - - printk("Floating-point exception at pc = %08lx\n", regs->pc); - - /* We have no FPU... */ - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_addr = (void __user *)regs->pc; - info.si_code = ILL_COPROC; - - force_sig_info(SIGILL, &info, current); + /* We have no FPU yet */ + _exception(SIGILL, regs, ILL_COPROC, regs->pc); } diff --git a/arch/avr32/kernel/vmlinux.lds.c b/arch/avr32/kernel/vmlinux.lds.c index ef13b7c7893..7ad20cfb48a 100644 --- a/arch/avr32/kernel/vmlinux.lds.c +++ b/arch/avr32/kernel/vmlinux.lds.c @@ -26,6 +26,12 @@ SECTIONS _sinittext = .; *(.text.reset) *(.init.text) + /* + * .exit.text is discarded at runtime, not + * link time, to deal with references from + * __bug_table + */ + *(.exit.text) _einittext = .; . = ALIGN(4); __tagtable_begin = .; @@ -86,6 +92,8 @@ SECTIONS __stop___ex_table = .; } + BUG_TABLE + RODATA . = ALIGN(8192); @@ -126,7 +134,6 @@ SECTIONS * thrown away, as cleanup code is never called unless it's a module. */ /DISCARD/ : { - *(.exit.text) *(.exit.data) *(.exitcall.exit) } diff --git a/arch/avr32/mach-at32ap/Kconfig b/arch/avr32/mach-at32ap/Kconfig new file mode 100644 index 00000000000..eb307838457 --- /dev/null +++ b/arch/avr32/mach-at32ap/Kconfig @@ -0,0 +1,31 @@ +if PLATFORM_AT32AP + +menu "Atmel AVR32 AP options" + +choice + prompt "AT32AP7000 static memory bus width" + depends on CPU_AT32AP7000 + default AP7000_16_BIT_SMC + help + Define the width of the AP7000 external static memory interface. + This is used to determine how to mangle the address and/or data + when doing little-endian port access. + + The current code can only support a single external memory bus + width for all chip selects, excluding the flash (which is using + raw access and is thus not affected by any of this.) + +config AP7000_32_BIT_SMC + bool "32 bit" + +config AP7000_16_BIT_SMC + bool "16 bit" + +config AP7000_8_BIT_SMC + bool "8 bit" + +endchoice + +endmenu + +endif # PLATFORM_AT32AP diff --git a/arch/avr32/mach-at32ap/Makefile b/arch/avr32/mach-at32ap/Makefile index b21bea9af8b..f1d395724ac 100644 --- a/arch/avr32/mach-at32ap/Makefile +++ b/arch/avr32/mach-at32ap/Makefile @@ -1,2 +1,3 @@ obj-y += at32ap.o clock.o intc.o extint.o pio.o hsmc.o obj-$(CONFIG_CPU_AT32AP7000) += at32ap7000.o +obj-$(CONFIG_CPU_AT32AP7000) += time-tc.o diff --git a/arch/avr32/mach-at32ap/at32ap7000.c b/arch/avr32/mach-at32ap/at32ap7000.c index 472703f90c2..56db45b99a0 100644 --- a/arch/avr32/mach-at32ap/at32ap7000.c +++ b/arch/avr32/mach-at32ap/at32ap7000.c @@ -18,6 +18,7 @@ #include <asm/arch/sm.h> #include "clock.h" +#include "hmatrix.h" #include "pio.h" #include "sm.h" @@ -416,7 +417,15 @@ struct platform_device at32_sm_device = { .resource = sm_resource, .num_resources = ARRAY_SIZE(sm_resource), }; -DEV_CLK(pclk, at32_sm, pbb, 0); +static struct clk at32_sm_pclk = { + .name = "pclk", + .dev = &at32_sm_device.dev, + .parent = &pbb_clk, + .mode = pbb_clk_mode, + .get_rate = pbb_clk_get_rate, + .users = 1, + .index = 0, +}; static struct resource intc0_resource[] = { PBMEM(0xfff00400), @@ -442,6 +451,7 @@ static struct clk hramc_clk = { .mode = hsb_clk_mode, .get_rate = hsb_clk_get_rate, .users = 1, + .index = 3, }; static struct resource smc0_resource[] = { @@ -467,6 +477,57 @@ static struct clk pico_clk = { }; /* -------------------------------------------------------------------- + * HMATRIX + * -------------------------------------------------------------------- */ + +static struct clk hmatrix_clk = { + .name = "hmatrix_clk", + .parent = &pbb_clk, + .mode = pbb_clk_mode, + .get_rate = pbb_clk_get_rate, + .index = 2, + .users = 1, +}; +#define HMATRIX_BASE ((void __iomem *)0xfff00800) + +#define hmatrix_readl(reg) \ + __raw_readl((HMATRIX_BASE) + HMATRIX_##reg) +#define hmatrix_writel(reg,value) \ + __raw_writel((value), (HMATRIX_BASE) + HMATRIX_##reg) + +/* + * Set bits in the HMATRIX Special Function Register (SFR) used by the + * External Bus Interface (EBI). This can be used to enable special + * features like CompactFlash support, NAND Flash support, etc. on + * certain chipselects. + */ +static inline void set_ebi_sfr_bits(u32 mask) +{ + u32 sfr; + + clk_enable(&hmatrix_clk); + sfr = hmatrix_readl(SFR4); + sfr |= mask; + hmatrix_writel(SFR4, sfr); + clk_disable(&hmatrix_clk); +} + +/* -------------------------------------------------------------------- + * System Timer/Counter (TC) + * -------------------------------------------------------------------- */ +static struct resource at32_systc0_resource[] = { + PBMEM(0xfff00c00), + IRQ(22), +}; +struct platform_device at32_systc0_device = { + .name = "systc", + .id = 0, + .resource = at32_systc0_resource, + .num_resources = ARRAY_SIZE(at32_systc0_resource), +}; +DEV_CLK(pclk, at32_systc0, pbb, 3); + +/* -------------------------------------------------------------------- * PIO * -------------------------------------------------------------------- */ @@ -514,6 +575,8 @@ void __init at32_add_system_devices(void) platform_device_register(&smc0_device); platform_device_register(&pdc_device); + platform_device_register(&at32_systc0_device); + platform_device_register(&pio0_device); platform_device_register(&pio1_device); platform_device_register(&pio2_device); @@ -950,6 +1013,7 @@ struct clk *at32_clock_list[] = { &pbb_clk, &at32_sm_pclk, &at32_intc0_pclk, + &hmatrix_clk, &ebi_clk, &hramc_clk, &smc0_pclk, @@ -962,6 +1026,7 @@ struct clk *at32_clock_list[] = { &pio2_mck, &pio3_mck, &pio4_mck, + &at32_systc0_pclk, &atmel_usart0_usart, &atmel_usart1_usart, &atmel_usart2_usart, @@ -1024,6 +1089,9 @@ void __init at32_clock_init(void) for (i = 0; i < ARRAY_SIZE(at32_clock_list); i++) { struct clk *clk = at32_clock_list[i]; + if (clk->users == 0) + continue; + if (clk->mode == &cpu_clk_mode) cpu_mask |= 1 << clk->index; else if (clk->mode == &hsb_clk_mode) diff --git a/arch/avr32/mach-at32ap/hmatrix.h b/arch/avr32/mach-at32ap/hmatrix.h new file mode 100644 index 00000000000..d10bfb60d68 --- /dev/null +++ b/arch/avr32/mach-at32ap/hmatrix.h @@ -0,0 +1,182 @@ +/* + * Register definitions for High-Speed Bus Matrix + */ +#ifndef __HMATRIX_H +#define __HMATRIX_H + +/* HMATRIX register offsets */ +#define HMATRIX_MCFG0 0x0000 +#define HMATRIX_MCFG1 0x0004 +#define HMATRIX_MCFG2 0x0008 +#define HMATRIX_MCFG3 0x000c +#define HMATRIX_MCFG4 0x0010 +#define HMATRIX_MCFG5 0x0014 +#define HMATRIX_MCFG6 0x0018 +#define HMATRIX_MCFG7 0x001c +#define HMATRIX_MCFG8 0x0020 +#define HMATRIX_MCFG9 0x0024 +#define HMATRIX_MCFG10 0x0028 +#define HMATRIX_MCFG11 0x002c +#define HMATRIX_MCFG12 0x0030 +#define HMATRIX_MCFG13 0x0034 +#define HMATRIX_MCFG14 0x0038 +#define HMATRIX_MCFG15 0x003c +#define HMATRIX_SCFG0 0x0040 +#define HMATRIX_SCFG1 0x0044 +#define HMATRIX_SCFG2 0x0048 +#define HMATRIX_SCFG3 0x004c +#define HMATRIX_SCFG4 0x0050 +#define HMATRIX_SCFG5 0x0054 +#define HMATRIX_SCFG6 0x0058 +#define HMATRIX_SCFG7 0x005c +#define HMATRIX_SCFG8 0x0060 +#define HMATRIX_SCFG9 0x0064 +#define HMATRIX_SCFG10 0x0068 +#define HMATRIX_SCFG11 0x006c +#define HMATRIX_SCFG12 0x0070 +#define HMATRIX_SCFG13 0x0074 +#define HMATRIX_SCFG14 0x0078 +#define HMATRIX_SCFG15 0x007c +#define HMATRIX_PRAS0 0x0080 +#define HMATRIX_PRBS0 0x0084 +#define HMATRIX_PRAS1 0x0088 +#define HMATRIX_PRBS1 0x008c +#define HMATRIX_PRAS2 0x0090 +#define HMATRIX_PRBS2 0x0094 +#define HMATRIX_PRAS3 0x0098 +#define HMATRIX_PRBS3 0x009c +#define HMATRIX_PRAS4 0x00a0 +#define HMATRIX_PRBS4 0x00a4 +#define HMATRIX_PRAS5 0x00a8 +#define HMATRIX_PRBS5 0x00ac +#define HMATRIX_PRAS6 0x00b0 +#define HMATRIX_PRBS6 0x00b4 +#define HMATRIX_PRAS7 0x00b8 +#define HMATRIX_PRBS7 0x00bc +#define HMATRIX_PRAS8 0x00c0 +#define HMATRIX_PRBS8 0x00c4 +#define HMATRIX_PRAS9 0x00c8 +#define HMATRIX_PRBS9 0x00cc +#define HMATRIX_PRAS10 0x00d0 +#define HMATRIX_PRBS10 0x00d4 +#define HMATRIX_PRAS11 0x00d8 +#define HMATRIX_PRBS11 0x00dc +#define HMATRIX_PRAS12 0x00e0 +#define HMATRIX_PRBS12 0x00e4 +#define HMATRIX_PRAS13 0x00e8 +#define HMATRIX_PRBS13 0x00ec +#define HMATRIX_PRAS14 0x00f0 +#define HMATRIX_PRBS14 0x00f4 +#define HMATRIX_PRAS15 0x00f8 +#define HMATRIX_PRBS15 0x00fc +#define HMATRIX_MRCR 0x0100 +#define HMATRIX_SFR0 0x0110 +#define HMATRIX_SFR1 0x0114 +#define HMATRIX_SFR2 0x0118 +#define HMATRIX_SFR3 0x011c +#define HMATRIX_SFR4 0x0120 +#define HMATRIX_SFR5 0x0124 +#define HMATRIX_SFR6 0x0128 +#define HMATRIX_SFR7 0x012c +#define HMATRIX_SFR8 0x0130 +#define HMATRIX_SFR9 0x0134 +#define HMATRIX_SFR10 0x0138 +#define HMATRIX_SFR11 0x013c +#define HMATRIX_SFR12 0x0140 +#define HMATRIX_SFR13 0x0144 +#define HMATRIX_SFR14 0x0148 +#define HMATRIX_SFR15 0x014c + +/* Bitfields in MCFGx */ +#define HMATRIX_ULBT_OFFSET 0 +#define HMATRIX_ULBT_SIZE 3 + +/* Bitfields in SCFGx */ +#define HMATRIX_SLOT_CYCLE_OFFSET 0 +#define HMATRIX_SLOT_CYCLE_SIZE 8 +#define HMATRIX_DEFMSTR_TYPE_OFFSET 16 +#define HMATRIX_DEFMSTR_TYPE_SIZE 2 +#define HMATRIX_FIXED_DEFMSTR_OFFSET 18 +#define HMATRIX_FIXED_DEFMSTR_SIZE 4 +#define HMATRIX_ARBT_OFFSET 24 +#define HMATRIX_ARBT_SIZE 2 + +/* Bitfields in PRASx */ +#define HMATRIX_M0PR_OFFSET 0 +#define HMATRIX_M0PR_SIZE 4 +#define HMATRIX_M1PR_OFFSET 4 +#define HMATRIX_M1PR_SIZE 4 +#define HMATRIX_M2PR_OFFSET 8 +#define HMATRIX_M2PR_SIZE 4 +#define HMATRIX_M3PR_OFFSET 12 +#define HMATRIX_M3PR_SIZE 4 +#define HMATRIX_M4PR_OFFSET 16 +#define HMATRIX_M4PR_SIZE 4 +#define HMATRIX_M5PR_OFFSET 20 +#define HMATRIX_M5PR_SIZE 4 +#define HMATRIX_M6PR_OFFSET 24 +#define HMATRIX_M6PR_SIZE 4 +#define HMATRIX_M7PR_OFFSET 28 +#define HMATRIX_M7PR_SIZE 4 + +/* Bitfields in PRBSx */ +#define HMATRIX_M8PR_OFFSET 0 +#define HMATRIX_M8PR_SIZE 4 +#define HMATRIX_M9PR_OFFSET 4 +#define HMATRIX_M9PR_SIZE 4 +#define HMATRIX_M10PR_OFFSET 8 +#define HMATRIX_M10PR_SIZE 4 +#define HMATRIX_M11PR_OFFSET 12 +#define HMATRIX_M11PR_SIZE 4 +#define HMATRIX_M12PR_OFFSET 16 +#define HMATRIX_M12PR_SIZE 4 +#define HMATRIX_M13PR_OFFSET 20 +#define HMATRIX_M13PR_SIZE 4 +#define HMATRIX_M14PR_OFFSET 24 +#define HMATRIX_M14PR_SIZE 4 +#define HMATRIX_M15PR_OFFSET 28 +#define HMATRIX_M15PR_SIZE 4 + +/* Bitfields in SFR4 */ +#define HMATRIX_CS1A_OFFSET 1 +#define HMATRIX_CS1A_SIZE 1 +#define HMATRIX_CS3A_OFFSET 3 +#define HMATRIX_CS3A_SIZE 1 +#define HMATRIX_CS4A_OFFSET 4 +#define HMATRIX_CS4A_SIZE 1 +#define HMATRIX_CS5A_OFFSET 5 +#define HMATRIX_CS5A_SIZE 1 +#define HMATRIX_DBPUC_OFFSET 8 +#define HMATRIX_DBPUC_SIZE 1 + +/* Constants for ULBT */ +#define HMATRIX_ULBT_INFINITE 0 +#define HMATRIX_ULBT_SINGLE 1 +#define HMATRIX_ULBT_FOUR_BEAT 2 +#define HMATRIX_ULBT_EIGHT_BEAT 3 +#define HMATRIX_ULBT_SIXTEEN_BEAT 4 + +/* Constants for DEFMSTR_TYPE */ +#define HMATRIX_DEFMSTR_TYPE_NO_DEFAULT 0 +#define HMATRIX_DEFMSTR_TYPE_LAST_DEFAULT 1 +#define HMATRIX_DEFMSTR_TYPE_FIXED_DEFAULT 2 + +/* Constants for ARBT */ +#define HMATRIX_ARBT_ROUND_ROBIN 0 +#define HMATRIX_ARBT_FIXED_PRIORITY 1 + +/* Bit manipulation macros */ +#define HMATRIX_BIT(name) \ + (1 << HMATRIX_##name##_OFFSET) +#define HMATRIX_BF(name,value) \ + (((value) & ((1 << HMATRIX_##name##_SIZE) - 1)) \ + << HMATRIX_##name##_OFFSET) +#define HMATRIX_BFEXT(name,value) \ + (((value) >> HMATRIX_##name##_OFFSET) \ + & ((1 << HMATRIX_##name##_SIZE) - 1)) +#define HMATRIX_BFINS(name,value,old) \ + (((old) & ~(((1 << HMATRIX_##name##_SIZE) - 1) \ + << HMATRIX_##name##_OFFSET)) \ + | HMATRIX_BF(name,value)) + +#endif /* __HMATRIX_H */ diff --git a/arch/avr32/mach-at32ap/hsmc.c b/arch/avr32/mach-at32ap/hsmc.c index 7691721928a..5e22a750632 100644 --- a/arch/avr32/mach-at32ap/hsmc.c +++ b/arch/avr32/mach-at32ap/hsmc.c @@ -75,12 +75,35 @@ int smc_set_configuration(int cs, const struct smc_config *config) return -EINVAL; } + switch (config->nwait_mode) { + case 0: + mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_DISABLED); + break; + case 1: + mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_RESERVED); + break; + case 2: + mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_FROZEN); + break; + case 3: + mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_READY); + break; + default: + return -EINVAL; + } + + if (config->tdf_cycles) { + mode |= HSMC_BF(TDF_CYCLES, config->tdf_cycles); + } + if (config->nrd_controlled) mode |= HSMC_BIT(READ_MODE); if (config->nwe_controlled) mode |= HSMC_BIT(WRITE_MODE); if (config->byte_write) mode |= HSMC_BIT(BAT); + if (config->tdf_mode) + mode |= HSMC_BIT(TDF_MODE); pr_debug("smc cs%d: setup/%08x pulse/%08x cycle/%08x mode/%08x\n", cs, setup, pulse, cycle, mode); diff --git a/arch/avr32/mach-at32ap/time-tc.c b/arch/avr32/mach-at32ap/time-tc.c new file mode 100644 index 00000000000..e3070bdd4bb --- /dev/null +++ b/arch/avr32/mach-at32ap/time-tc.c @@ -0,0 +1,218 @@ +/* + * Copyright (C) 2004-2007 Atmel Corporation + * + * Based on MIPS implementation arch/mips/kernel/time.c + * Copyright 2001 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/clocksource.h> +#include <linux/time.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel_stat.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/profile.h> +#include <linux/sysdev.h> +#include <linux/err.h> + +#include <asm/div64.h> +#include <asm/sysreg.h> +#include <asm/io.h> +#include <asm/sections.h> + +#include <asm/arch/time.h> + +/* how many counter cycles in a jiffy? */ +static u32 cycles_per_jiffy; + +/* the count value for the next timer interrupt */ +static u32 expirelo; + +/* the I/O registers of the TC module */ +static void __iomem *ioregs; + +cycle_t read_cycle_count(void) +{ + return (cycle_t)timer_read(ioregs, 0, CV); +} + +struct clocksource clocksource_avr32 = { + .name = "avr32", + .rating = 342, + .read = read_cycle_count, + .mask = CLOCKSOURCE_MASK(16), + .shift = 16, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static void avr32_timer_ack(void) +{ + u16 count = expirelo; + + /* Ack this timer interrupt and set the next one, use a u16 + * variable so it will wrap around correctly */ + count += cycles_per_jiffy; + expirelo = count; + timer_write(ioregs, 0, RC, expirelo); + + /* Check to see if we have missed any timer interrupts */ + count = timer_read(ioregs, 0, CV); + if ((count - expirelo) < 0x7fff) { + expirelo = count + cycles_per_jiffy; + timer_write(ioregs, 0, RC, expirelo); + } +} + +u32 avr32_hpt_read(void) +{ + return timer_read(ioregs, 0, CV); +} + +static int avr32_timer_calc_div_and_set_jiffies(struct clk *pclk) +{ + unsigned int cycles_max = (clocksource_avr32.mask + 1) / 2; + unsigned int divs[] = { 4, 8, 16, 32 }; + int divs_size = sizeof(divs) / sizeof(*divs); + int i = 0; + unsigned long count_hz; + unsigned long shift; + unsigned long mult; + int clock_div = -1; + u64 tmp; + + shift = clocksource_avr32.shift; + + do { + count_hz = clk_get_rate(pclk) / divs[i]; + mult = clocksource_hz2mult(count_hz, shift); + clocksource_avr32.mult = mult; + + tmp = TICK_NSEC; + tmp <<= shift; + tmp += mult / 2; + do_div(tmp, mult); + + cycles_per_jiffy = tmp; + } while (cycles_per_jiffy > cycles_max && ++i < divs_size); + + clock_div = i + 1; + + if (clock_div > divs_size) { + pr_debug("timer: could not calculate clock divider\n"); + return -EFAULT; + } + + /* Set the clock divider */ + timer_write(ioregs, 0, CMR, TIMER_BF(CMR_TCCLKS, clock_div)); + + return 0; +} + +int avr32_hpt_init(unsigned int count) +{ + struct resource *regs; + struct clk *pclk; + int irq = -1; + int ret = 0; + + ret = -ENXIO; + + irq = platform_get_irq(&at32_systc0_device, 0); + if (irq < 0) { + pr_debug("timer: could not get irq\n"); + goto out_error; + } + + pclk = clk_get(&at32_systc0_device.dev, "pclk"); + if (IS_ERR(pclk)) { + pr_debug("timer: could not get clk: %ld\n", PTR_ERR(pclk)); + goto out_error; + } + clk_enable(pclk); + + regs = platform_get_resource(&at32_systc0_device, IORESOURCE_MEM, 0); + if (!regs) { + pr_debug("timer: could not get resource\n"); + goto out_error_clk; + } + + ioregs = ioremap(regs->start, regs->end - regs->start + 1); + if (!ioregs) { + pr_debug("timer: could not get ioregs\n"); + goto out_error_clk; + } + + ret = avr32_timer_calc_div_and_set_jiffies(pclk); + if (ret) + goto out_error_io; + + ret = setup_irq(irq, &timer_irqaction); + if (ret) { + pr_debug("timer: could not request irq %d: %d\n", + irq, ret); + goto out_error_io; + } + + expirelo = (timer_read(ioregs, 0, CV) / cycles_per_jiffy + 1) + * cycles_per_jiffy; + + /* Enable clock and interrupts on RC compare */ + timer_write(ioregs, 0, CCR, TIMER_BIT(CCR_CLKEN)); + timer_write(ioregs, 0, IER, TIMER_BIT(IER_CPCS)); + /* Set cycles to first interrupt */ + timer_write(ioregs, 0, RC, expirelo); + + printk(KERN_INFO "timer: AT32AP system timer/counter at 0x%p irq %d\n", + ioregs, irq); + + return 0; + +out_error_io: + iounmap(ioregs); +out_error_clk: + clk_put(pclk); +out_error: + return ret; +} + +int avr32_hpt_start(void) +{ + timer_write(ioregs, 0, CCR, TIMER_BIT(CCR_SWTRG)); + return 0; +} + +irqreturn_t timer_interrupt(int irq, void *dev_id) +{ + unsigned int sr = timer_read(ioregs, 0, SR); + + if (sr & TIMER_BIT(SR_CPCS)) { + /* ack timer interrupt and try to set next interrupt */ + avr32_timer_ack(); + + /* + * Call the generic timer interrupt handler + */ + write_seqlock(&xtime_lock); + do_timer(1); + write_sequnlock(&xtime_lock); + + /* + * In UP mode, we call local_timer_interrupt() to do profiling + * and process accounting. + * + * SMP is not supported yet. + */ + local_timer_interrupt(irq, dev_id); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 678557260a3..146ebdbdc30 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -16,26 +16,8 @@ #include <asm/kdebug.h> #include <asm/mmu_context.h> #include <asm/sysreg.h> -#include <asm/uaccess.h> #include <asm/tlb.h> - -#ifdef DEBUG -static void dump_code(unsigned long pc) -{ - char *p = (char *)pc; - char val; - int i; - - - printk(KERN_DEBUG "Code:"); - for (i = 0; i < 16; i++) { - if (__get_user(val, p + i)) - break; - printk(" %02x", val); - } - printk("\n"); -} -#endif +#include <asm/uaccess.h> #ifdef CONFIG_KPROBES ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); @@ -68,17 +50,19 @@ static inline int notify_page_fault(enum die_val val, struct pt_regs *regs, } #endif +int exception_trace = 1; + /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. * * ecr is the Exception Cause Register. Possible values are: - * 5: Page not found (instruction access) * 6: Protection fault (instruction access) - * 12: Page not found (read access) - * 13: Page not found (write access) - * 14: Protection fault (read access) - * 15: Protection fault (write access) + * 15: Protection fault (read access) + * 16: Protection fault (write access) + * 20: Page not found (instruction access) + * 24: Page not found (read access) + * 28: Page not found (write access) */ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) { @@ -88,7 +72,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) const struct exception_table_entry *fixup; unsigned long address; unsigned long page; - int writeaccess = 0; + int writeaccess; + long signr; + int code; if (notify_page_fault(DIE_PAGE_FAULT, regs, ecr, SIGSEGV) == NOTIFY_STOP) @@ -99,6 +85,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) tsk = current; mm = tsk->mm; + signr = SIGSEGV; + code = SEGV_MAPERR; + /* * If we're in an interrupt or have no user context, we must * not take the fault... @@ -125,7 +114,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) * can handle it... */ good_area: - //pr_debug("good area: vm_flags = 0x%lx\n", vma->vm_flags); + code = SEGV_ACCERR; + writeaccess = 0; + switch (ecr) { case ECR_PROTECTION_X: case ECR_TLB_MISS_X: @@ -176,46 +167,24 @@ survive: * map. Fix it, but check if it's kernel or user first... */ bad_area: - pr_debug("Bad area [%s:%u]: addr %08lx, ecr %lu\n", - tsk->comm, tsk->pid, address, ecr); - up_read(&mm->mmap_sem); if (user_mode(regs)) { - /* Hmm...we have to pass address and ecr somehow... */ - /* tsk->thread.address = address; - tsk->thread.error_code = ecr; */ -#ifdef DEBUG - show_regs(regs); - dump_code(regs->pc); - - page = sysreg_read(PTBR); - printk("ptbr = %08lx", page); - if (page) { - page = ((unsigned long *)page)[address >> 22]; - printk(" pgd = %08lx", page); - if (page & _PAGE_PRESENT) { - page &= PAGE_MASK; - address &= 0x003ff000; - page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; - printk(" pte = %08lx\n", page); - } - } -#endif - pr_debug("Sending SIGSEGV to PID %d...\n", - tsk->pid); - force_sig(SIGSEGV, tsk); + if (exception_trace) + printk("%s%s[%d]: segfault at %08lx pc %08lx " + "sp %08lx ecr %lu\n", + is_init(tsk) ? KERN_EMERG : KERN_INFO, + tsk->comm, tsk->pid, address, regs->pc, + regs->sp, ecr); + _exception(SIGSEGV, regs, code, address); return; } no_context: - pr_debug("No context\n"); - /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->pc); if (fixup) { regs->pc = fixup->fixup; - pr_debug("Found fixup at %08lx\n", fixup->fixup); return; } @@ -230,7 +199,6 @@ no_context: printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n", address); - printk(KERN_ALERT "pc = %08lx\n", regs->pc); page = sysreg_read(PTBR); printk(KERN_ALERT "ptbr = %08lx", page); @@ -241,20 +209,20 @@ no_context: page &= PAGE_MASK; address &= 0x003ff000; page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT]; - printk(" pte = %08lx\n", page); + printk(" pte = %08lx", page); } } - die("\nOops", regs, ecr); - do_exit(SIGKILL); + printk("\n"); + die("Kernel access of bad area", regs, signr); + return; /* * We ran out of memory, or some other thing happened to us * that made us unable to handle the page fault gracefully. */ out_of_memory: - printk("Out of memory\n"); up_read(&mm->mmap_sem); - if (current->pid == 1) { + if (is_init(current)) { yield(); down_read(&mm->mmap_sem); goto survive; @@ -267,21 +235,20 @@ out_of_memory: do_sigbus: up_read(&mm->mmap_sem); - /* - * Send a sigbus, regardless of whether we were in kernel or - * user mode. - */ - /* address, error_code, trap_no, ... */ -#ifdef DEBUG - show_regs(regs); - dump_code(regs->pc); -#endif - pr_debug("Sending SIGBUS to PID %d...\n", tsk->pid); - force_sig(SIGBUS, tsk); - /* Kernel mode? Handle exceptions or die */ + signr = SIGBUS; + code = BUS_ADRERR; if (!user_mode(regs)) goto no_context; + + if (exception_trace) + printk("%s%s[%d]: bus error at %08lx pc %08lx " + "sp %08lx ecr %lu\n", + is_init(tsk) ? KERN_EMERG : KERN_INFO, + tsk->comm, tsk->pid, address, regs->pc, + regs->sp, ecr); + + _exception(SIGBUS, regs, BUS_ADRERR, address); } asmlinkage void do_bus_error(unsigned long addr, int write_access, @@ -292,8 +259,7 @@ asmlinkage void do_bus_error(unsigned long addr, int write_access, addr, write_access ? "write" : "read"); printk(KERN_INFO "DTLB dump:\n"); dump_dtlb(); - die("Bus Error", regs, write_access); - do_exit(SIGKILL); + die("Bus Error", regs, SIGKILL); } /* diff --git a/arch/avr32/mm/init.c b/arch/avr32/mm/init.c index 70da6894acc..82cf70854b9 100644 --- a/arch/avr32/mm/init.c +++ b/arch/avr32/mm/init.c @@ -10,11 +10,9 @@ #include <linux/mm.h> #include <linux/swap.h> #include <linux/init.h> -#include <linux/initrd.h> #include <linux/mmzone.h> #include <linux/bootmem.h> #include <linux/pagemap.h> -#include <linux/pfn.h> #include <linux/nodemask.h> #include <asm/page.h> @@ -78,242 +76,6 @@ void show_mem(void) printk ("%d pages swap cached\n", cached); } -static void __init print_memory_map(const char *what, - struct tag_mem_range *mem) -{ - printk ("%s:\n", what); - for (; mem; mem = mem->next) { - printk (" %08lx - %08lx\n", - (unsigned long)mem->addr, - (unsigned long)(mem->addr + mem->size)); - } -} - -#define MAX_LOWMEM HIGHMEM_START -#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM) - -/* - * Sort a list of memory regions in-place by ascending address. - * - * We're using bubble sort because we only have singly linked lists - * with few elements. - */ -static void __init sort_mem_list(struct tag_mem_range **pmem) -{ - int done; - struct tag_mem_range **a, **b; - - if (!*pmem) - return; - - do { - done = 1; - a = pmem, b = &(*pmem)->next; - while (*b) { - if ((*a)->addr > (*b)->addr) { - struct tag_mem_range *tmp; - tmp = (*b)->next; - (*b)->next = *a; - *a = *b; - *b = tmp; - done = 0; - } - a = &(*a)->next; - b = &(*a)->next; - } - } while (!done); -} - -/* - * Find a free memory region large enough for storing the - * bootmem bitmap. - */ -static unsigned long __init -find_bootmap_pfn(const struct tag_mem_range *mem) -{ - unsigned long bootmap_pages, bootmap_len; - unsigned long node_pages = PFN_UP(mem->size); - unsigned long bootmap_addr = mem->addr; - struct tag_mem_range *reserved = mem_reserved; - struct tag_mem_range *ramdisk = mem_ramdisk; - unsigned long kern_start = virt_to_phys(_stext); - unsigned long kern_end = virt_to_phys(_end); - - bootmap_pages = bootmem_bootmap_pages(node_pages); - bootmap_len = bootmap_pages << PAGE_SHIFT; - - /* - * Find a large enough region without reserved pages for - * storing the bootmem bitmap. We can take advantage of the - * fact that all lists have been sorted. - * - * We have to check explicitly reserved regions as well as the - * kernel image and any RAMDISK images... - * - * Oh, and we have to make sure we don't overwrite the taglist - * since we're going to use it until the bootmem allocator is - * fully up and running. - */ - while (1) { - if ((bootmap_addr < kern_end) && - ((bootmap_addr + bootmap_len) > kern_start)) - bootmap_addr = kern_end; - - while (reserved && - (bootmap_addr >= (reserved->addr + reserved->size))) - reserved = reserved->next; - - if (reserved && - ((bootmap_addr + bootmap_len) >= reserved->addr)) { - bootmap_addr = reserved->addr + reserved->size; - continue; - } - - while (ramdisk && - (bootmap_addr >= (ramdisk->addr + ramdisk->size))) - ramdisk = ramdisk->next; - - if (!ramdisk || - ((bootmap_addr + bootmap_len) < ramdisk->addr)) - break; - - bootmap_addr = ramdisk->addr + ramdisk->size; - } - - if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size)) - return ~0UL; - - return PFN_UP(bootmap_addr); -} - -void __init setup_bootmem(void) -{ - unsigned bootmap_size; - unsigned long first_pfn, bootmap_pfn, pages; - unsigned long max_pfn, max_low_pfn; - unsigned long kern_start = virt_to_phys(_stext); - unsigned long kern_end = virt_to_phys(_end); - unsigned node = 0; - struct tag_mem_range *bank, *res; - - sort_mem_list(&mem_phys); - sort_mem_list(&mem_reserved); - - print_memory_map("Physical memory", mem_phys); - print_memory_map("Reserved memory", mem_reserved); - - nodes_clear(node_online_map); - - if (mem_ramdisk) { -#ifdef CONFIG_BLK_DEV_INITRD - initrd_start = (unsigned long)__va(mem_ramdisk->addr); - initrd_end = initrd_start + mem_ramdisk->size; - - print_memory_map("RAMDISK images", mem_ramdisk); - if (mem_ramdisk->next) - printk(KERN_WARNING - "Warning: Only the first RAMDISK image " - "will be used\n"); - sort_mem_list(&mem_ramdisk); -#else - printk(KERN_WARNING "RAM disk image present, but " - "no initrd support in kernel!\n"); -#endif - } - - if (mem_phys->next) - printk(KERN_WARNING "Only using first memory bank\n"); - - for (bank = mem_phys; bank; bank = NULL) { - first_pfn = PFN_UP(bank->addr); - max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size); - bootmap_pfn = find_bootmap_pfn(bank); - if (bootmap_pfn > max_pfn) - panic("No space for bootmem bitmap!\n"); - - if (max_low_pfn > MAX_LOWMEM_PFN) { - max_low_pfn = MAX_LOWMEM_PFN; -#ifndef CONFIG_HIGHMEM - /* - * Lowmem is memory that can be addressed - * directly through P1/P2 - */ - printk(KERN_WARNING - "Node %u: Only %ld MiB of memory will be used.\n", - node, MAX_LOWMEM >> 20); - printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); -#else -#error HIGHMEM is not supported by AVR32 yet -#endif - } - - /* Initialize the boot-time allocator with low memory only. */ - bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn, - first_pfn, max_low_pfn); - - printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n", - node, NODE_DATA(node)->bdata, - NODE_DATA(node)->bdata->node_bootmem_map); - - /* - * Register fully available RAM pages with the bootmem - * allocator. - */ - pages = max_low_pfn - first_pfn; - free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn), - PFN_PHYS(pages)); - - /* - * Reserve space for the kernel image (if present in - * this node)... - */ - if ((kern_start >= PFN_PHYS(first_pfn)) && - (kern_start < PFN_PHYS(max_pfn))) { - printk("Node %u: Kernel image %08lx - %08lx\n", - node, kern_start, kern_end); - reserve_bootmem_node(NODE_DATA(node), kern_start, - kern_end - kern_start); - } - - /* ...the bootmem bitmap... */ - reserve_bootmem_node(NODE_DATA(node), - PFN_PHYS(bootmap_pfn), - bootmap_size); - - /* ...any RAMDISK images... */ - for (res = mem_ramdisk; res; res = res->next) { - if (res->addr > PFN_PHYS(max_pfn)) - break; - - if (res->addr >= PFN_PHYS(first_pfn)) { - printk("Node %u: RAMDISK %08lx - %08lx\n", - node, - (unsigned long)res->addr, - (unsigned long)(res->addr + res->size)); - reserve_bootmem_node(NODE_DATA(node), - res->addr, res->size); - } - } - - /* ...and any other reserved regions. */ - for (res = mem_reserved; res; res = res->next) { - if (res->addr > PFN_PHYS(max_pfn)) - break; - - if (res->addr >= PFN_PHYS(first_pfn)) { - printk("Node %u: Reserved %08lx - %08lx\n", - node, - (unsigned long)res->addr, - (unsigned long)(res->addr + res->size)); - reserve_bootmem_node(NODE_DATA(node), - res->addr, res->size); - } - } - - node_set_online(node); - } -} - /* * paging_init() sets up the page tables * diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 0f293aa7b0f..e6ec418093e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -41,6 +41,11 @@ config GENERIC_HWEIGHT config GENERIC_TIME def_bool y +config GENERIC_BUG + bool + depends on BUG + default y + config NO_IOMEM def_bool y @@ -514,6 +519,14 @@ config KEXEC current kernel, and to start another kernel. It is like a reboot but is independent of hardware/microcode support. +config ZFCPDUMP + tristate "zfcpdump support" + select SMP + default n + help + Select this option if you want to build an zfcpdump enabled kernel. + Refer to "Documentation/s390/zfcpdump.txt" for more details on this. + endmenu source "net/Kconfig" diff --git a/arch/s390/Makefile b/arch/s390/Makefile index b1e55849646..68441e0e74b 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -67,8 +67,10 @@ endif ifeq ($(call cc-option-yn,-mstack-size=8192 -mstack-guard=128),y) cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE) +ifneq ($(call cc-option-yn,-mstack-size=8192),y) cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD) endif +endif ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack @@ -103,6 +105,9 @@ install: vmlinux image: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ +zfcpdump: + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 0c3cf4b16ae..ee89b33145d 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -668,45 +668,7 @@ EXPORT_SYMBOL_GPL(appldata_register_ops); EXPORT_SYMBOL_GPL(appldata_unregister_ops); EXPORT_SYMBOL_GPL(appldata_diag); -#ifdef MODULE -/* - * Kernel symbols needed by appldata_mem and appldata_os modules. - * However, if this file is compiled as a module (for testing only), these - * symbols are not exported. In this case, we define them locally and export - * those. - */ -void si_swapinfo(struct sysinfo *val) -{ - val->freeswap = -1ul; - val->totalswap = -1ul; -} - -unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200, - -1 - FIXED_1/200}; -int nr_threads = -1; - -void get_full_page_state(struct page_state *ps) -{ - memset(ps, -1, sizeof(struct page_state)); -} - -unsigned long nr_running(void) -{ - return -1; -} - -unsigned long nr_iowait(void) -{ - return -1; -} - -/*unsigned long nr_context_switches(void) -{ - return -1; -}*/ -#endif /* MODULE */ EXPORT_SYMBOL_GPL(si_swapinfo); EXPORT_SYMBOL_GPL(nr_threads); EXPORT_SYMBOL_GPL(nr_running); EXPORT_SYMBOL_GPL(nr_iowait); -//EXPORT_SYMBOL_GPL(nr_context_switches); diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 969639f3197..af4460ec381 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -25,99 +25,100 @@ */ #include <linux/init.h> #include <linux/module.h> -#include <linux/mm.h> #include <linux/crypto.h> -#include <asm/scatterlist.h> -#include <asm/byteorder.h> + #include "crypt_s390.h" #define SHA1_DIGEST_SIZE 20 #define SHA1_BLOCK_SIZE 64 -struct crypt_s390_sha1_ctx { - u64 count; +struct s390_sha1_ctx { + u64 count; /* message length */ u32 state[5]; - u32 buf_len; - u8 buffer[2 * SHA1_BLOCK_SIZE]; + u8 buf[2 * SHA1_BLOCK_SIZE]; }; static void sha1_init(struct crypto_tfm *tfm) { - struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->state[0] = 0x67452301; - ctx->state[1] = 0xEFCDAB89; - ctx->state[2] = 0x98BADCFE; - ctx->state[3] = 0x10325476; - ctx->state[4] = 0xC3D2E1F0; - - ctx->count = 0; - ctx->buf_len = 0; + struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + + sctx->state[0] = 0x67452301; + sctx->state[1] = 0xEFCDAB89; + sctx->state[2] = 0x98BADCFE; + sctx->state[3] = 0x10325476; + sctx->state[4] = 0xC3D2E1F0; + sctx->count = 0; } static void sha1_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) { - struct crypt_s390_sha1_ctx *sctx; - long imd_len; - - sctx = crypto_tfm_ctx(tfm); - sctx->count += len * 8; /* message bit length */ - - /* anything in buffer yet? -> must be completed */ - if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { - /* complete full block and hash */ - memcpy(sctx->buffer + sctx->buf_len, data, - SHA1_BLOCK_SIZE - sctx->buf_len); - crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, - SHA1_BLOCK_SIZE); - data += SHA1_BLOCK_SIZE - sctx->buf_len; - len -= SHA1_BLOCK_SIZE - sctx->buf_len; - sctx->buf_len = 0; + struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + unsigned int index; + int ret; + + /* how much is already in the buffer? */ + index = sctx->count & 0x3f; + + sctx->count += len; + + if (index + len < SHA1_BLOCK_SIZE) + goto store; + + /* process one stored block */ + if (index) { + memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index); + ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, + SHA1_BLOCK_SIZE); + BUG_ON(ret != SHA1_BLOCK_SIZE); + data += SHA1_BLOCK_SIZE - index; + len -= SHA1_BLOCK_SIZE - index; } - /* rest of data contains full blocks? */ - imd_len = len & ~0x3ful; - if (imd_len) { - crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); - data += imd_len; - len -= imd_len; + /* process as many blocks as possible */ + if (len >= SHA1_BLOCK_SIZE) { + ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, + len & ~(SHA1_BLOCK_SIZE - 1)); + BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1))); + data += ret; + len -= ret; } - /* anything left? store in buffer */ - if (len) { - memcpy(sctx->buffer + sctx->buf_len , data, len); - sctx->buf_len += len; - } -} +store: + /* anything left? */ + if (len) + memcpy(sctx->buf + index , data, len); +} -static void pad_message(struct crypt_s390_sha1_ctx* sctx) +/* Add padding and return the message digest. */ +static void sha1_final(struct crypto_tfm *tfm, u8 *out) { - int index; + struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + u64 bits; + unsigned int index, end; + int ret; + + /* must perform manual padding */ + index = sctx->count & 0x3f; + end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE); - index = sctx->buf_len; - sctx->buf_len = (sctx->buf_len < 56) ? - SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; /* start pad with 1 */ - sctx->buffer[index] = 0x80; + sctx->buf[index] = 0x80; + /* pad with zeros */ index++; - memset(sctx->buffer + index, 0x00, sctx->buf_len - index); - /* append length */ - memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, - sizeof sctx->count); -} + memset(sctx->buf + index, 0x00, end - index - 8); -/* Add padding and return the message digest. */ -static void sha1_final(struct crypto_tfm *tfm, u8 *out) -{ - struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); + /* append message length */ + bits = sctx->count * 8; + memcpy(sctx->buf + end - 8, &bits, sizeof(bits)); + + ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end); + BUG_ON(ret != end); - /* must perform manual padding */ - pad_message(sctx); - crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); /* copy digest to out */ memcpy(out, sctx->state, SHA1_DIGEST_SIZE); + /* wipe context */ memset(sctx, 0, sizeof *sctx); } @@ -128,7 +129,7 @@ static struct crypto_alg alg = { .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), + .cra_ctxsize = sizeof(struct s390_sha1_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .digest = { diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 78436c696d3..2ced3330bce 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -26,7 +26,7 @@ #define SHA256_BLOCK_SIZE 64 struct s390_sha256_ctx { - u64 count; + u64 count; /* message length */ u32 state[8]; u8 buf[2 * SHA256_BLOCK_SIZE]; }; @@ -54,10 +54,9 @@ static void sha256_update(struct crypto_tfm *tfm, const u8 *data, int ret; /* how much is already in the buffer? */ - index = sctx->count / 8 & 0x3f; + index = sctx->count & 0x3f; - /* update message bit length */ - sctx->count += len * 8; + sctx->count += len; if ((index + len) < SHA256_BLOCK_SIZE) goto store; @@ -87,12 +86,17 @@ store: memcpy(sctx->buf + index , data, len); } -static void pad_message(struct s390_sha256_ctx* sctx) +/* Add padding and return the message digest */ +static void sha256_final(struct crypto_tfm *tfm, u8 *out) { - int index, end; + struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); + u64 bits; + unsigned int index, end; + int ret; - index = sctx->count / 8 & 0x3f; - end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE; + /* must perform manual padding */ + index = sctx->count & 0x3f; + end = (index < 56) ? SHA256_BLOCK_SIZE : (2 * SHA256_BLOCK_SIZE); /* start pad with 1 */ sctx->buf[index] = 0x80; @@ -102,21 +106,11 @@ static void pad_message(struct s390_sha256_ctx* sctx) memset(sctx->buf + index, 0x00, end - index - 8); /* append message length */ - memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count); - - sctx->count = end * 8; -} - -/* Add padding and return the message digest */ -static void sha256_final(struct crypto_tfm *tfm, u8 *out) -{ - struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); - - /* must perform manual padding */ - pad_message(sctx); + bits = sctx->count * 8; + memcpy(sctx->buf + end - 8, &bits, sizeof(bits)); - crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, - sctx->count / 8); + ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, end); + BUG_ON(ret != end); /* copy digest to out */ memcpy(out, sctx->state, SHA256_DIGEST_SIZE); diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 741d2bbb2b3..0e4da8a7d82 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -12,6 +12,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y # CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_BUG=y CONFIG_NO_IOMEM=y CONFIG_S390=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" @@ -166,6 +167,7 @@ CONFIG_NO_IDLE_HZ=y CONFIG_NO_IDLE_HZ_INIT=y CONFIG_S390_HYPFS_FS=y CONFIG_KEXEC=y +# CONFIG_ZFCPDUMP is not set # # Networking @@ -705,6 +707,7 @@ CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_SPINLOCK_SLEEP=y # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_LIST is not set diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 5492d25d7d6..3195d375bd5 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -6,7 +6,7 @@ EXTRA_AFLAGS := -traditional obj-y := bitmap.o traps.o time.o process.o base.o early.o \ setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ - semaphore.o s390_ext.o debug.o irq.o ipl.o + semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 664c669b185..5236fdb17fc 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -495,29 +495,34 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) * sys32_execve() executes a new program after the asm stub has set * things up for us. This should basically do what I want it to. */ -asmlinkage long -sys32_execve(struct pt_regs regs) +asmlinkage long sys32_execve(void) { - int error; - char * filename; + struct pt_regs *regs = task_pt_regs(current); + char *filename; + unsigned long result; + int rc; - filename = getname(compat_ptr(regs.orig_gpr2)); - error = PTR_ERR(filename); - if (IS_ERR(filename)) + filename = getname(compat_ptr(regs->orig_gpr2)); + if (IS_ERR(filename)) { + result = PTR_ERR(filename); goto out; - error = compat_do_execve(filename, compat_ptr(regs.gprs[3]), - compat_ptr(regs.gprs[4]), ®s); - if (error == 0) - { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - current->thread.fp_regs.fpc=0; - asm volatile("sfpc %0,0" : : "d" (0)); } + rc = compat_do_execve(filename, compat_ptr(regs->gprs[3]), + compat_ptr(regs->gprs[4]), regs); + if (rc) { + result = rc; + goto out_putname; + } + task_lock(current); + current->ptrace &= ~PT_DTRACE; + task_unlock(current); + current->thread.fp_regs.fpc=0; + asm volatile("sfpc %0,0" : : "d" (0)); + result = regs->gprs[2]; +out_putname: putname(filename); out: - return error; + return result; } @@ -918,19 +923,20 @@ asmlinkage long sys32_write(unsigned int fd, char __user * buf, size_t count) return sys_write(fd, buf, count); } -asmlinkage long sys32_clone(struct pt_regs regs) +asmlinkage long sys32_clone(void) { - unsigned long clone_flags; - unsigned long newsp; + struct pt_regs *regs = task_pt_regs(current); + unsigned long clone_flags; + unsigned long newsp; int __user *parent_tidptr, *child_tidptr; - clone_flags = regs.gprs[3] & 0xffffffffUL; - newsp = regs.orig_gpr2 & 0x7fffffffUL; - parent_tidptr = compat_ptr(regs.gprs[4]); - child_tidptr = compat_ptr(regs.gprs[5]); - if (!newsp) - newsp = regs.gprs[15]; - return do_fork(clone_flags, newsp, ®s, 0, + clone_flags = regs->gprs[3] & 0xffffffffUL; + newsp = regs->orig_gpr2 & 0x7fffffffUL; + parent_tidptr = compat_ptr(regs->gprs[4]); + child_tidptr = compat_ptr(regs->gprs[5]); + if (!newsp) + newsp = regs->gprs[15]; + return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); } diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 887a9881d0d..80a54a0149a 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -255,9 +255,9 @@ sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, } asmlinkage long -sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss, - struct pt_regs *regs) +sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss) { + struct pt_regs *regs = task_pt_regs(current); stack_t kss, koss; unsigned long ss_sp; int ret, err = 0; @@ -344,8 +344,9 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) return 0; } -asmlinkage long sys32_sigreturn(struct pt_regs *regs) +asmlinkage long sys32_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; sigset_t set; @@ -370,8 +371,9 @@ badframe: return 0; } -asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) +asmlinkage long sys32_rt_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; sigset_t set; stack_t st; @@ -407,8 +409,8 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) return regs->gprs[2]; badframe: - force_sig(SIGSEGV, current); - return 0; + force_sig(SIGSEGV, current); + return 0; } /* diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c new file mode 100644 index 00000000000..dabaf98943d --- /dev/null +++ b/arch/s390/kernel/dis.c @@ -0,0 +1,1278 @@ +/* + * arch/s390/kernel/dis.c + * + * Disassemble s390 instructions. + * + * Copyright IBM Corp. 2007 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <linux/timer.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <linux/reboot.h> +#include <linux/kprobes.h> + +#include <asm/system.h> +#include <asm/uaccess.h> +#include <asm/io.h> +#include <asm/atomic.h> +#include <asm/mathemu.h> +#include <asm/cpcmd.h> +#include <asm/s390_ext.h> +#include <asm/lowcore.h> +#include <asm/debug.h> +#include <asm/kdebug.h> + +#ifndef CONFIG_64BIT +#define ONELONG "%08lx: " +#else /* CONFIG_64BIT */ +#define ONELONG "%016lx: " +#endif /* CONFIG_64BIT */ + +#define OPERAND_GPR 0x1 /* Operand printed as %rx */ +#define OPERAND_FPR 0x2 /* Operand printed as %fx */ +#define OPERAND_AR 0x4 /* Operand printed as %ax */ +#define OPERAND_CR 0x8 /* Operand printed as %cx */ +#define OPERAND_DISP 0x10 /* Operand printed as displacement */ +#define OPERAND_BASE 0x20 /* Operand printed as base register */ +#define OPERAND_INDEX 0x40 /* Operand printed as index register */ +#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ +#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ +#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ + +enum { + UNUSED, /* Indicates the end of the operand list */ + R_8, /* GPR starting at position 8 */ + R_12, /* GPR starting at position 12 */ + R_16, /* GPR starting at position 16 */ + R_20, /* GPR starting at position 20 */ + R_24, /* GPR starting at position 24 */ + R_28, /* GPR starting at position 28 */ + R_32, /* GPR starting at position 32 */ + F_8, /* FPR starting at position 8 */ + F_12, /* FPR starting at position 12 */ + F_16, /* FPR starting at position 16 */ + F_20, /* FPR starting at position 16 */ + F_24, /* FPR starting at position 24 */ + F_28, /* FPR starting at position 28 */ + F_32, /* FPR starting at position 32 */ + A_8, /* Access reg. starting at position 8 */ + A_12, /* Access reg. starting at position 12 */ + A_24, /* Access reg. starting at position 24 */ + A_28, /* Access reg. starting at position 28 */ + C_8, /* Control reg. starting at position 8 */ + C_12, /* Control reg. starting at position 12 */ + B_16, /* Base register starting at position 16 */ + B_32, /* Base register starting at position 32 */ + X_12, /* Index register starting at position 12 */ + D_20, /* Displacement starting at position 20 */ + D_36, /* Displacement starting at position 36 */ + D20_20, /* 20 bit displacement starting at 20 */ + L4_8, /* 4 bit length starting at position 8 */ + L4_12, /* 4 bit length starting at position 12 */ + L8_8, /* 8 bit length starting at position 8 */ + U4_8, /* 4 bit unsigned value starting at 8 */ + U4_12, /* 4 bit unsigned value starting at 12 */ + U4_16, /* 4 bit unsigned value starting at 16 */ + U4_20, /* 4 bit unsigned value starting at 20 */ + U8_8, /* 8 bit unsigned value starting at 8 */ + U8_16, /* 8 bit unsigned value starting at 16 */ + I16_16, /* 16 bit signed value starting at 16 */ + U16_16, /* 16 bit unsigned value starting at 16 */ + J16_16, /* PC relative jump offset at 16 */ + J32_16, /* PC relative long offset at 16 */ + I32_16, /* 32 bit signed value starting at 16 */ + U32_16, /* 32 bit unsigned value starting at 16 */ + M_16, /* 4 bit optional mask starting at 16 */ + RO_28, /* optional GPR starting at position 28 */ +}; + +/* + * Enumeration of the different instruction formats. + * For details consult the principles of operation. + */ +enum { + INSTR_INVALID, + INSTR_E, INSTR_RIE_RRP, INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, + INSTR_RIL_UP, INSTR_RI_RI, INSTR_RI_RP, INSTR_RI_RU, INSTR_RI_UP, + INSTR_RRE_00, INSTR_RRE_0R, INSTR_RRE_AA, INSTR_RRE_AR, INSTR_RRE_F0, + INSTR_RRE_FF, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, INSTR_RRE_RR, + INSTR_RRE_RR_OPT, INSTR_RRF_F0FF, INSTR_RRF_FUFF, INSTR_RRF_M0RR, + INSTR_RRF_R0RR, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, + INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, + INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, INSTR_RSI_RRP, + INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, + INSTR_RSY_RURD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, + INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, + INSTR_RXF_FRRDF, INSTR_RXY_FRRD, INSTR_RXY_RRRD, INSTR_RX_FRRD, + INSTR_RX_RRRD, INSTR_RX_URRD, INSTR_SIY_URD, INSTR_SI_URD, + INSTR_SSE_RDRD, INSTR_SSF_RRDRD, INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, + INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, INSTR_SS_RRRDRD2, INSTR_SS_RRRDRD3, + INSTR_S_00, INSTR_S_RD, +}; + +struct operand { + int bits; /* The number of bits in the operand. */ + int shift; /* The number of bits to shift. */ + int flags; /* One bit syntax flags. */ +}; + +struct insn { + const char name[5]; + unsigned char opfrag; + unsigned char format; +}; + +static const struct operand operands[] = +{ + [UNUSED] = { 0, 0, 0 }, + [R_8] = { 4, 8, OPERAND_GPR }, + [R_12] = { 4, 12, OPERAND_GPR }, + [R_16] = { 4, 16, OPERAND_GPR }, + [R_20] = { 4, 20, OPERAND_GPR }, + [R_24] = { 4, 24, OPERAND_GPR }, + [R_28] = { 4, 28, OPERAND_GPR }, + [R_32] = { 4, 32, OPERAND_GPR }, + [F_8] = { 4, 8, OPERAND_FPR }, + [F_12] = { 4, 12, OPERAND_FPR }, + [F_16] = { 4, 16, OPERAND_FPR }, + [F_20] = { 4, 16, OPERAND_FPR }, + [F_24] = { 4, 24, OPERAND_FPR }, + [F_28] = { 4, 28, OPERAND_FPR }, + [F_32] = { 4, 32, OPERAND_FPR }, + [A_8] = { 4, 8, OPERAND_AR }, + [A_12] = { 4, 12, OPERAND_AR }, + [A_24] = { 4, 24, OPERAND_AR }, + [A_28] = { 4, 28, OPERAND_AR }, + [C_8] = { 4, 8, OPERAND_CR }, + [C_12] = { 4, 12, OPERAND_CR }, + [B_16] = { 4, 16, OPERAND_BASE | OPERAND_GPR }, + [B_32] = { 4, 32, OPERAND_BASE | OPERAND_GPR }, + [X_12] = { 4, 12, OPERAND_INDEX | OPERAND_GPR }, + [D_20] = { 12, 20, OPERAND_DISP }, + [D_36] = { 12, 36, OPERAND_DISP }, + [D20_20] = { 20, 20, OPERAND_DISP | OPERAND_SIGNED }, + [L4_8] = { 4, 8, OPERAND_LENGTH }, + [L4_12] = { 4, 12, OPERAND_LENGTH }, + [L8_8] = { 8, 8, OPERAND_LENGTH }, + [U4_8] = { 4, 8, 0 }, + [U4_12] = { 4, 12, 0 }, + [U4_16] = { 4, 16, 0 }, + [U4_20] = { 4, 20, 0 }, + [U8_8] = { 8, 8, 0 }, + [U8_16] = { 8, 16, 0 }, + [I16_16] = { 16, 16, OPERAND_SIGNED }, + [U16_16] = { 16, 16, 0 }, + [J16_16] = { 16, 16, OPERAND_PCREL }, + [J32_16] = { 32, 16, OPERAND_PCREL }, + [I32_16] = { 32, 16, OPERAND_SIGNED }, + [U32_16] = { 32, 16, 0 }, + [M_16] = { 4, 16, 0 }, + [RO_28] = { 4, 28, OPERAND_GPR } +}; + +static const unsigned char formats[][7] = { + [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, /* e.g. pr */ + [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxhg */ + [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, /* e.g. brasl */ + [INSTR_RIL_UP] = { 0x0f, U4_8,J32_16,0,0,0,0 }, /* e.g. brcl */ + [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, /* e.g. afi */ + [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, /* e.g. alfi */ + [INSTR_RI_RI] = { 0x0f, R_8,I16_16,0,0,0,0 }, /* e.g. ahi */ + [INSTR_RI_RP] = { 0x0f, R_8,J16_16,0,0,0,0 }, /* e.g. brct */ + [INSTR_RI_RU] = { 0x0f, R_8,U16_16,0,0,0,0 }, /* e.g. tml */ + [INSTR_RI_UP] = { 0x0f, U4_8,J16_16,0,0,0,0 }, /* e.g. brc */ + [INSTR_RRE_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. palb */ + [INSTR_RRE_0R] = { 0xff, R_28,0,0,0,0,0 }, /* e.g. tb */ + [INSTR_RRE_AA] = { 0xff, A_24,A_28,0,0,0,0 }, /* e.g. cpya */ + [INSTR_RRE_AR] = { 0xff, A_24,R_28,0,0,0,0 }, /* e.g. sar */ + [INSTR_RRE_F0] = { 0xff, F_24,0,0,0,0,0 }, /* e.g. sqer */ + [INSTR_RRE_FF] = { 0xff, F_24,F_28,0,0,0,0 }, /* e.g. debr */ + [INSTR_RRE_R0] = { 0xff, R_24,0,0,0,0,0 }, /* e.g. ipm */ + [INSTR_RRE_RA] = { 0xff, R_24,A_28,0,0,0,0 }, /* e.g. ear */ + [INSTR_RRE_RF] = { 0xff, R_24,F_28,0,0,0,0 }, /* e.g. cefbr */ + [INSTR_RRE_RR] = { 0xff, R_24,R_28,0,0,0,0 }, /* e.g. lura */ + [INSTR_RRE_RR_OPT]= { 0xff, R_24,RO_28,0,0,0,0 }, /* efpc, sfpc */ + [INSTR_RRF_F0FF] = { 0xff, F_16,F_24,F_28,0,0,0 }, /* e.g. madbr */ + [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },/* e.g. didbr */ + [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },/* e.g. .insn */ + [INSTR_RRF_R0RR] = { 0xff, R_24,R_28,R_16,0,0,0 }, /* e.g. idte */ + [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, /* e.g. fixr */ + [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, /* e.g. cfebr */ + [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, /* e.g. sske */ + [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, /* e.g. adr */ + [INSTR_RR_R0] = { 0xff, R_8, 0,0,0,0,0 }, /* e.g. spm */ + [INSTR_RR_RR] = { 0xff, R_8,R_12,0,0,0,0 }, /* e.g. lr */ + [INSTR_RR_U0] = { 0xff, U8_8, 0,0,0,0,0 }, /* e.g. svc */ + [INSTR_RR_UR] = { 0xff, U4_8,R_12,0,0,0,0 }, /* e.g. bcr */ + [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. lmh */ + [INSTR_RSE_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lmh */ + [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icmh */ + [INSTR_RSL_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. tp */ + [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, /* e.g. brxh */ + [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },/* e.g. stmy */ + [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, + /* e.g. icmh */ + [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },/* e.g. lamy */ + [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },/* e.g. lamy */ + [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, /* e.g. lam */ + [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, /* e.g. lctl */ + [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, /* e.g. sll */ + [INSTR_RS_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, /* e.g. cs */ + [INSTR_RS_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, /* e.g. icm */ + [INSTR_RXE_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. axbr */ + [INSTR_RXE_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. lg */ + [INSTR_RXF_FRRDF] = { 0xff, F_32,F_8,D_20,X_12,B_16,0 }, + /* e.g. madb */ + [INSTR_RXY_RRRD] = { 0xff, R_8,D20_20,X_12,B_16,0,0 },/* e.g. ly */ + [INSTR_RXY_FRRD] = { 0xff, F_8,D20_20,X_12,B_16,0,0 },/* e.g. ley */ + [INSTR_RX_FRRD] = { 0xff, F_8,D_20,X_12,B_16,0,0 }, /* e.g. ae */ + [INSTR_RX_RRRD] = { 0xff, R_8,D_20,X_12,B_16,0,0 }, /* e.g. l */ + [INSTR_RX_URRD] = { 0x00, U4_8,D_20,X_12,B_16,0,0 }, /* e.g. bc */ + [INSTR_SI_URD] = { 0x00, D_20,B_16,U8_8,0,0,0 }, /* e.g. cli */ + [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, /* e.g. tmy */ + [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, /* e.g. mvsdk */ + [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, + /* e.g. mvc */ + [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, + /* e.g. srp */ + [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, + /* e.g. pack */ + [INSTR_SS_RRRDRD] = { 0xff, D_20,R_8,B_16,D_36,B_32,R_12 }, + /* e.g. mvck */ + [INSTR_SS_RRRDRD2]= { 0xff, R_8,D_20,B_16,R_12,D_36,B_32 }, + /* e.g. plo */ + [INSTR_SS_RRRDRD3]= { 0xff, R_8,R_12,D_20,B_16,D_36,B_32 }, + /* e.g. lmd */ + [INSTR_S_00] = { 0xff, 0,0,0,0,0,0 }, /* e.g. hsch */ + [INSTR_S_RD] = { 0xff, D_20,B_16,0,0,0,0 }, /* e.g. lpsw */ + [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, + /* e.g. mvcos */ +}; + +static struct insn opcode[] = { +#ifdef CONFIG_64BIT + { "lmd", 0xef, INSTR_SS_RRRDRD3 }, +#endif + { "spm", 0x04, INSTR_RR_R0 }, + { "balr", 0x05, INSTR_RR_RR }, + { "bctr", 0x06, INSTR_RR_RR }, + { "bcr", 0x07, INSTR_RR_UR }, + { "svc", 0x0a, INSTR_RR_U0 }, + { "bsm", 0x0b, INSTR_RR_RR }, + { "bassm", 0x0c, INSTR_RR_RR }, + { "basr", 0x0d, INSTR_RR_RR }, + { "mvcl", 0x0e, INSTR_RR_RR }, + { "clcl", 0x0f, INSTR_RR_RR }, + { "lpr", 0x10, INSTR_RR_RR }, + { "lnr", 0x11, INSTR_RR_RR }, + { "ltr", 0x12, INSTR_RR_RR }, + { "lcr", 0x13, INSTR_RR_RR }, + { "nr", 0x14, INSTR_RR_RR }, + { "clr", 0x15, INSTR_RR_RR }, + { "or", 0x16, INSTR_RR_RR }, + { "xr", 0x17, INSTR_RR_RR }, + { "lr", 0x18, INSTR_RR_RR }, + { "cr", 0x19, INSTR_RR_RR }, + { "ar", 0x1a, INSTR_RR_RR }, + { "sr", 0x1b, INSTR_RR_RR }, + { "mr", 0x1c, INSTR_RR_RR }, + { "dr", 0x1d, INSTR_RR_RR }, + { "alr", 0x1e, INSTR_RR_RR }, + { "slr", 0x1f, INSTR_RR_RR }, + { "lpdr", 0x20, INSTR_RR_FF }, + { "lndr", 0x21, INSTR_RR_FF }, + { "ltdr", 0x22, INSTR_RR_FF }, + { "lcdr", 0x23, INSTR_RR_FF }, + { "hdr", 0x24, INSTR_RR_FF }, + { "ldxr", 0x25, INSTR_RR_FF }, + { "lrdr", 0x25, INSTR_RR_FF }, + { "mxr", 0x26, INSTR_RR_FF }, + { "mxdr", 0x27, INSTR_RR_FF }, + { "ldr", 0x28, INSTR_RR_FF }, + { "cdr", 0x29, INSTR_RR_FF }, + { "adr", 0x2a, INSTR_RR_FF }, + { "sdr", 0x2b, INSTR_RR_FF }, + { "mdr", 0x2c, INSTR_RR_FF }, + { "ddr", 0x2d, INSTR_RR_FF }, + { "awr", 0x2e, INSTR_RR_FF }, + { "swr", 0x2f, INSTR_RR_FF }, + { "lper", 0x30, INSTR_RR_FF }, + { "lner", 0x31, INSTR_RR_FF }, + { "lter", 0x32, INSTR_RR_FF }, + { "lcer", 0x33, INSTR_RR_FF }, + { "her", 0x34, INSTR_RR_FF }, + { "ledr", 0x35, INSTR_RR_FF }, + { "lrer", 0x35, INSTR_RR_FF }, + { "axr", 0x36, INSTR_RR_FF }, + { "sxr", 0x37, INSTR_RR_FF }, + { "ler", 0x38, INSTR_RR_FF }, + { "cer", 0x39, INSTR_RR_FF }, + { "aer", 0x3a, INSTR_RR_FF }, + { "ser", 0x3b, INSTR_RR_FF }, + { "mder", 0x3c, INSTR_RR_FF }, + { "mer", 0x3c, INSTR_RR_FF }, + { "der", 0x3d, INSTR_RR_FF }, + { "aur", 0x3e, INSTR_RR_FF }, + { "sur", 0x3f, INSTR_RR_FF }, + { "sth", 0x40, INSTR_RX_RRRD }, + { "la", 0x41, INSTR_RX_RRRD }, + { "stc", 0x42, INSTR_RX_RRRD }, + { "ic", 0x43, INSTR_RX_RRRD }, + { "ex", 0x44, INSTR_RX_RRRD }, + { "bal", 0x45, INSTR_RX_RRRD }, + { "bct", 0x46, INSTR_RX_RRRD }, + { "bc", 0x47, INSTR_RX_URRD }, + { "lh", 0x48, INSTR_RX_RRRD }, + { "ch", 0x49, INSTR_RX_RRRD }, + { "ah", 0x4a, INSTR_RX_RRRD }, + { "sh", 0x4b, INSTR_RX_RRRD }, + { "mh", 0x4c, INSTR_RX_RRRD }, + { "bas", 0x4d, INSTR_RX_RRRD }, + { "cvd", 0x4e, INSTR_RX_RRRD }, + { "cvb", 0x4f, INSTR_RX_RRRD }, + { "st", 0x50, INSTR_RX_RRRD }, + { "lae", 0x51, INSTR_RX_RRRD }, + { "n", 0x54, INSTR_RX_RRRD }, + { "cl", 0x55, INSTR_RX_RRRD }, + { "o", 0x56, INSTR_RX_RRRD }, + { "x", 0x57, INSTR_RX_RRRD }, + { "l", 0x58, INSTR_RX_RRRD }, + { "c", 0x59, INSTR_RX_RRRD }, + { "a", 0x5a, INSTR_RX_RRRD }, + { "s", 0x5b, INSTR_RX_RRRD }, + { "m", 0x5c, INSTR_RX_RRRD }, + { "d", 0x5d, INSTR_RX_RRRD }, + { "al", 0x5e, INSTR_RX_RRRD }, + { "sl", 0x5f, INSTR_RX_RRRD }, + { "std", 0x60, INSTR_RX_FRRD }, + { "mxd", 0x67, INSTR_RX_FRRD }, + { "ld", 0x68, INSTR_RX_FRRD }, + { "cd", 0x69, INSTR_RX_FRRD }, + { "ad", 0x6a, INSTR_RX_FRRD }, + { "sd", 0x6b, INSTR_RX_FRRD }, + { "md", 0x6c, INSTR_RX_FRRD }, + { "dd", 0x6d, INSTR_RX_FRRD }, + { "aw", 0x6e, INSTR_RX_FRRD }, + { "sw", 0x6f, INSTR_RX_FRRD }, + { "ste", 0x70, INSTR_RX_FRRD }, + { "ms", 0x71, INSTR_RX_RRRD }, + { "le", 0x78, INSTR_RX_FRRD }, + { "ce", 0x79, INSTR_RX_FRRD }, + { "ae", 0x7a, INSTR_RX_FRRD }, + { "se", 0x7b, INSTR_RX_FRRD }, + { "mde", 0x7c, INSTR_RX_FRRD }, + { "me", 0x7c, INSTR_RX_FRRD }, + { "de", 0x7d, INSTR_RX_FRRD }, + { "au", 0x7e, INSTR_RX_FRRD }, + { "su", 0x7f, INSTR_RX_FRRD }, + { "ssm", 0x80, INSTR_S_RD }, + { "lpsw", 0x82, INSTR_S_RD }, + { "diag", 0x83, INSTR_RS_RRRD }, + { "brxh", 0x84, INSTR_RSI_RRP }, + { "brxle", 0x85, INSTR_RSI_RRP }, + { "bxh", 0x86, INSTR_RS_RRRD }, + { "bxle", 0x87, INSTR_RS_RRRD }, + { "srl", 0x88, INSTR_RS_R0RD }, + { "sll", 0x89, INSTR_RS_R0RD }, + { "sra", 0x8a, INSTR_RS_R0RD }, + { "sla", 0x8b, INSTR_RS_R0RD }, + { "srdl", 0x8c, INSTR_RS_R0RD }, + { "sldl", 0x8d, INSTR_RS_R0RD }, + { "srda", 0x8e, INSTR_RS_R0RD }, + { "slda", 0x8f, INSTR_RS_R0RD }, + { "stm", 0x90, INSTR_RS_RRRD }, + { "tm", 0x91, INSTR_SI_URD }, + { "mvi", 0x92, INSTR_SI_URD }, + { "ts", 0x93, INSTR_S_RD }, + { "ni", 0x94, INSTR_SI_URD }, + { "cli", 0x95, INSTR_SI_URD }, + { "oi", 0x96, INSTR_SI_URD }, + { "xi", 0x97, INSTR_SI_URD }, + { "lm", 0x98, INSTR_RS_RRRD }, + { "trace", 0x99, INSTR_RS_RRRD }, + { "lam", 0x9a, INSTR_RS_AARD }, + { "stam", 0x9b, INSTR_RS_AARD }, + { "mvcle", 0xa8, INSTR_RS_RRRD }, + { "clcle", 0xa9, INSTR_RS_RRRD }, + { "stnsm", 0xac, INSTR_SI_URD }, + { "stosm", 0xad, INSTR_SI_URD }, + { "sigp", 0xae, INSTR_RS_RRRD }, + { "mc", 0xaf, INSTR_SI_URD }, + { "lra", 0xb1, INSTR_RX_RRRD }, + { "stctl", 0xb6, INSTR_RS_CCRD }, + { "lctl", 0xb7, INSTR_RS_CCRD }, + { "cs", 0xba, INSTR_RS_RRRD }, + { "cds", 0xbb, INSTR_RS_RRRD }, + { "clm", 0xbd, INSTR_RS_RURD }, + { "stcm", 0xbe, INSTR_RS_RURD }, + { "icm", 0xbf, INSTR_RS_RURD }, + { "mvn", 0xd1, INSTR_SS_L0RDRD }, + { "mvc", 0xd2, INSTR_SS_L0RDRD }, + { "mvz", 0xd3, INSTR_SS_L0RDRD }, + { "nc", 0xd4, INSTR_SS_L0RDRD }, + { "clc", 0xd5, INSTR_SS_L0RDRD }, + { "oc", 0xd6, INSTR_SS_L0RDRD }, + { "xc", 0xd7, INSTR_SS_L0RDRD }, + { "mvck", 0xd9, INSTR_SS_RRRDRD }, + { "mvcp", 0xda, INSTR_SS_RRRDRD }, + { "mvcs", 0xdb, INSTR_SS_RRRDRD }, + { "tr", 0xdc, INSTR_SS_L0RDRD }, + { "trt", 0xdd, INSTR_SS_L0RDRD }, + { "ed", 0xde, INSTR_SS_L0RDRD }, + { "edmk", 0xdf, INSTR_SS_L0RDRD }, + { "pku", 0xe1, INSTR_SS_L0RDRD }, + { "unpku", 0xe2, INSTR_SS_L0RDRD }, + { "mvcin", 0xe8, INSTR_SS_L0RDRD }, + { "pka", 0xe9, INSTR_SS_L0RDRD }, + { "unpka", 0xea, INSTR_SS_L0RDRD }, + { "plo", 0xee, INSTR_SS_RRRDRD2 }, + { "srp", 0xf0, INSTR_SS_LIRDRD }, + { "mvo", 0xf1, INSTR_SS_LLRDRD }, + { "pack", 0xf2, INSTR_SS_LLRDRD }, + { "unpk", 0xf3, INSTR_SS_LLRDRD }, + { "zap", 0xf8, INSTR_SS_LLRDRD }, + { "cp", 0xf9, INSTR_SS_LLRDRD }, + { "ap", 0xfa, INSTR_SS_LLRDRD }, + { "sp", 0xfb, INSTR_SS_LLRDRD }, + { "mp", 0xfc, INSTR_SS_LLRDRD }, + { "dp", 0xfd, INSTR_SS_LLRDRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_01[] = { +#ifdef CONFIG_64BIT + { "sam64", 0x0e, INSTR_E }, +#endif + { "pr", 0x01, INSTR_E }, + { "upt", 0x02, INSTR_E }, + { "sckpf", 0x07, INSTR_E }, + { "tam", 0x0b, INSTR_E }, + { "sam24", 0x0c, INSTR_E }, + { "sam31", 0x0d, INSTR_E }, + { "trap2", 0xff, INSTR_E }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_a5[] = { +#ifdef CONFIG_64BIT + { "iihh", 0x00, INSTR_RI_RU }, + { "iihl", 0x01, INSTR_RI_RU }, + { "iilh", 0x02, INSTR_RI_RU }, + { "iill", 0x03, INSTR_RI_RU }, + { "nihh", 0x04, INSTR_RI_RU }, + { "nihl", 0x05, INSTR_RI_RU }, + { "nilh", 0x06, INSTR_RI_RU }, + { "nill", 0x07, INSTR_RI_RU }, + { "oihh", 0x08, INSTR_RI_RU }, + { "oihl", 0x09, INSTR_RI_RU }, + { "oilh", 0x0a, INSTR_RI_RU }, + { "oill", 0x0b, INSTR_RI_RU }, + { "llihh", 0x0c, INSTR_RI_RU }, + { "llihl", 0x0d, INSTR_RI_RU }, + { "llilh", 0x0e, INSTR_RI_RU }, + { "llill", 0x0f, INSTR_RI_RU }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_a7[] = { +#ifdef CONFIG_64BIT + { "tmhh", 0x02, INSTR_RI_RU }, + { "tmhl", 0x03, INSTR_RI_RU }, + { "brctg", 0x07, INSTR_RI_RP }, + { "lghi", 0x09, INSTR_RI_RI }, + { "aghi", 0x0b, INSTR_RI_RI }, + { "mghi", 0x0d, INSTR_RI_RI }, + { "cghi", 0x0f, INSTR_RI_RI }, +#endif + { "tmlh", 0x00, INSTR_RI_RU }, + { "tmll", 0x01, INSTR_RI_RU }, + { "brc", 0x04, INSTR_RI_UP }, + { "bras", 0x05, INSTR_RI_RP }, + { "brct", 0x06, INSTR_RI_RP }, + { "lhi", 0x08, INSTR_RI_RI }, + { "ahi", 0x0a, INSTR_RI_RI }, + { "mhi", 0x0c, INSTR_RI_RI }, + { "chi", 0x0e, INSTR_RI_RI }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b2[] = { +#ifdef CONFIG_64BIT + { "sske", 0x2b, INSTR_RRF_M0RR }, + { "stckf", 0x7c, INSTR_S_RD }, + { "cu21", 0xa6, INSTR_RRF_M0RR }, + { "cuutf", 0xa6, INSTR_RRF_M0RR }, + { "cu12", 0xa7, INSTR_RRF_M0RR }, + { "cutfu", 0xa7, INSTR_RRF_M0RR }, + { "stfle", 0xb0, INSTR_S_RD }, + { "lpswe", 0xb2, INSTR_S_RD }, +#endif + { "stidp", 0x02, INSTR_S_RD }, + { "sck", 0x04, INSTR_S_RD }, + { "stck", 0x05, INSTR_S_RD }, + { "sckc", 0x06, INSTR_S_RD }, + { "stckc", 0x07, INSTR_S_RD }, + { "spt", 0x08, INSTR_S_RD }, + { "stpt", 0x09, INSTR_S_RD }, + { "spka", 0x0a, INSTR_S_RD }, + { "ipk", 0x0b, INSTR_S_00 }, + { "ptlb", 0x0d, INSTR_S_00 }, + { "spx", 0x10, INSTR_S_RD }, + { "stpx", 0x11, INSTR_S_RD }, + { "stap", 0x12, INSTR_S_RD }, + { "sie", 0x14, INSTR_S_RD }, + { "pc", 0x18, INSTR_S_RD }, + { "sac", 0x19, INSTR_S_RD }, + { "cfc", 0x1a, INSTR_S_RD }, + { "ipte", 0x21, INSTR_RRE_RR }, + { "ipm", 0x22, INSTR_RRE_R0 }, + { "ivsk", 0x23, INSTR_RRE_RR }, + { "iac", 0x24, INSTR_RRE_R0 }, + { "ssar", 0x25, INSTR_RRE_R0 }, + { "epar", 0x26, INSTR_RRE_R0 }, + { "esar", 0x27, INSTR_RRE_R0 }, + { "pt", 0x28, INSTR_RRE_RR }, + { "iske", 0x29, INSTR_RRE_RR }, + { "rrbe", 0x2a, INSTR_RRE_RR }, + { "sske", 0x2b, INSTR_RRE_RR }, + { "tb", 0x2c, INSTR_RRE_0R }, + { "dxr", 0x2d, INSTR_RRE_F0 }, + { "pgin", 0x2e, INSTR_RRE_RR }, + { "pgout", 0x2f, INSTR_RRE_RR }, + { "csch", 0x30, INSTR_S_00 }, + { "hsch", 0x31, INSTR_S_00 }, + { "msch", 0x32, INSTR_S_RD }, + { "ssch", 0x33, INSTR_S_RD }, + { "stsch", 0x34, INSTR_S_RD }, + { "tsch", 0x35, INSTR_S_RD }, + { "tpi", 0x36, INSTR_S_RD }, + { "sal", 0x37, INSTR_S_00 }, + { "rsch", 0x38, INSTR_S_00 }, + { "stcrw", 0x39, INSTR_S_RD }, + { "stcps", 0x3a, INSTR_S_RD }, + { "rchp", 0x3b, INSTR_S_00 }, + { "schm", 0x3c, INSTR_S_00 }, + { "bakr", 0x40, INSTR_RRE_RR }, + { "cksm", 0x41, INSTR_RRE_RR }, + { "sqdr", 0x44, INSTR_RRE_F0 }, + { "sqer", 0x45, INSTR_RRE_F0 }, + { "stura", 0x46, INSTR_RRE_RR }, + { "msta", 0x47, INSTR_RRE_R0 }, + { "palb", 0x48, INSTR_RRE_00 }, + { "ereg", 0x49, INSTR_RRE_RR }, + { "esta", 0x4a, INSTR_RRE_RR }, + { "lura", 0x4b, INSTR_RRE_RR }, + { "tar", 0x4c, INSTR_RRE_AR }, + { "cpya", INSTR_RRE_AA }, + { "sar", 0x4e, INSTR_RRE_AR }, + { "ear", 0x4f, INSTR_RRE_RA }, + { "csp", 0x50, INSTR_RRE_RR }, + { "msr", 0x52, INSTR_RRE_RR }, + { "mvpg", 0x54, INSTR_RRE_RR }, + { "mvst", 0x55, INSTR_RRE_RR }, + { "cuse", 0x57, INSTR_RRE_RR }, + { "bsg", 0x58, INSTR_RRE_RR }, + { "bsa", 0x5a, INSTR_RRE_RR }, + { "clst", 0x5d, INSTR_RRE_RR }, + { "srst", 0x5e, INSTR_RRE_RR }, + { "cmpsc", 0x63, INSTR_RRE_RR }, + { "cmpsc", 0x63, INSTR_RRE_RR }, + { "siga", 0x74, INSTR_S_RD }, + { "xsch", 0x76, INSTR_S_00 }, + { "rp", 0x77, INSTR_S_RD }, + { "stcke", 0x78, INSTR_S_RD }, + { "sacf", 0x79, INSTR_S_RD }, + { "stsi", 0x7d, INSTR_S_RD }, + { "srnm", 0x99, INSTR_S_RD }, + { "stfpc", 0x9c, INSTR_S_RD }, + { "lfpc", 0x9d, INSTR_S_RD }, + { "tre", 0xa5, INSTR_RRE_RR }, + { "cuutf", 0xa6, INSTR_RRE_RR }, + { "cutfu", 0xa7, INSTR_RRE_RR }, + { "stfl", 0xb1, INSTR_S_RD }, + { "trap4", 0xff, INSTR_S_RD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b3[] = { +#ifdef CONFIG_64BIT + { "maylr", 0x38, INSTR_RRF_F0FF }, + { "mylr", 0x39, INSTR_RRF_F0FF }, + { "mayr", 0x3a, INSTR_RRF_F0FF }, + { "myr", 0x3b, INSTR_RRF_F0FF }, + { "mayhr", 0x3c, INSTR_RRF_F0FF }, + { "myhr", 0x3d, INSTR_RRF_F0FF }, + { "cegbr", 0xa4, INSTR_RRE_RR }, + { "cdgbr", 0xa5, INSTR_RRE_RR }, + { "cxgbr", 0xa6, INSTR_RRE_RR }, + { "cgebr", 0xa8, INSTR_RRF_U0RF }, + { "cgdbr", 0xa9, INSTR_RRF_U0RF }, + { "cgxbr", 0xaa, INSTR_RRF_U0RF }, + { "cfer", 0xb8, INSTR_RRF_U0RF }, + { "cfdr", 0xb9, INSTR_RRF_U0RF }, + { "cfxr", 0xba, INSTR_RRF_U0RF }, + { "cegr", 0xc4, INSTR_RRE_RR }, + { "cdgr", 0xc5, INSTR_RRE_RR }, + { "cxgr", 0xc6, INSTR_RRE_RR }, + { "cger", 0xc8, INSTR_RRF_U0RF }, + { "cgdr", 0xc9, INSTR_RRF_U0RF }, + { "cgxr", 0xca, INSTR_RRF_U0RF }, +#endif + { "lpebr", 0x00, INSTR_RRE_FF }, + { "lnebr", 0x01, INSTR_RRE_FF }, + { "ltebr", 0x02, INSTR_RRE_FF }, + { "lcebr", 0x03, INSTR_RRE_FF }, + { "ldebr", 0x04, INSTR_RRE_FF }, + { "lxdbr", 0x05, INSTR_RRE_FF }, + { "lxebr", 0x06, INSTR_RRE_FF }, + { "mxdbr", 0x07, INSTR_RRE_FF }, + { "kebr", 0x08, INSTR_RRE_FF }, + { "cebr", 0x09, INSTR_RRE_FF }, + { "aebr", 0x0a, INSTR_RRE_FF }, + { "sebr", 0x0b, INSTR_RRE_FF }, + { "mdebr", 0x0c, INSTR_RRE_FF }, + { "debr", 0x0d, INSTR_RRE_FF }, + { "maebr", 0x0e, INSTR_RRF_F0FF }, + { "msebr", 0x0f, INSTR_RRF_F0FF }, + { "lpdbr", 0x10, INSTR_RRE_FF }, + { "lndbr", 0x11, INSTR_RRE_FF }, + { "ltdbr", 0x12, INSTR_RRE_FF }, + { "lcdbr", 0x13, INSTR_RRE_FF }, + { "sqebr", 0x14, INSTR_RRE_FF }, + { "sqdbr", 0x15, INSTR_RRE_FF }, + { "sqxbr", 0x16, INSTR_RRE_FF }, + { "meebr", 0x17, INSTR_RRE_FF }, + { "kdbr", 0x18, INSTR_RRE_FF }, + { "cdbr", 0x19, INSTR_RRE_FF }, + { "adbr", 0x1a, INSTR_RRE_FF }, + { "sdbr", 0x1b, INSTR_RRE_FF }, + { "mdbr", 0x1c, INSTR_RRE_FF }, + { "ddbr", 0x1d, INSTR_RRE_FF }, + { "madbr", 0x1e, INSTR_RRF_F0FF }, + { "msdbr", 0x1f, INSTR_RRF_F0FF }, + { "lder", 0x24, INSTR_RRE_FF }, + { "lxdr", 0x25, INSTR_RRE_FF }, + { "lxer", 0x26, INSTR_RRE_FF }, + { "maer", 0x2e, INSTR_RRF_F0FF }, + { "mser", 0x2f, INSTR_RRF_F0FF }, + { "sqxr", 0x36, INSTR_RRE_FF }, + { "meer", 0x37, INSTR_RRE_FF }, + { "madr", 0x3e, INSTR_RRF_F0FF }, + { "msdr", 0x3f, INSTR_RRF_F0FF }, + { "lpxbr", 0x40, INSTR_RRE_FF }, + { "lnxbr", 0x41, INSTR_RRE_FF }, + { "ltxbr", 0x42, INSTR_RRE_FF }, + { "lcxbr", 0x43, INSTR_RRE_FF }, + { "ledbr", 0x44, INSTR_RRE_FF }, + { "ldxbr", 0x45, INSTR_RRE_FF }, + { "lexbr", 0x46, INSTR_RRE_FF }, + { "fixbr", 0x47, INSTR_RRF_U0FF }, + { "kxbr", 0x48, INSTR_RRE_FF }, + { "cxbr", 0x49, INSTR_RRE_FF }, + { "axbr", 0x4a, INSTR_RRE_FF }, + { "sxbr", 0x4b, INSTR_RRE_FF }, + { "mxbr", 0x4c, INSTR_RRE_FF }, + { "dxbr", 0x4d, INSTR_RRE_FF }, + { "tbedr", 0x50, INSTR_RRF_U0FF }, + { "tbdr", 0x51, INSTR_RRF_U0FF }, + { "diebr", 0x53, INSTR_RRF_FUFF }, + { "fiebr", 0x57, INSTR_RRF_U0FF }, + { "thder", 0x58, INSTR_RRE_RR }, + { "thdr", 0x59, INSTR_RRE_RR }, + { "didbr", 0x5b, INSTR_RRF_FUFF }, + { "fidbr", 0x5f, INSTR_RRF_U0FF }, + { "lpxr", 0x60, INSTR_RRE_FF }, + { "lnxr", 0x61, INSTR_RRE_FF }, + { "ltxr", 0x62, INSTR_RRE_FF }, + { "lcxr", 0x63, INSTR_RRE_FF }, + { "lxr", 0x65, INSTR_RRE_RR }, + { "lexr", 0x66, INSTR_RRE_FF }, + { "fixr", 0x67, INSTR_RRF_U0FF }, + { "cxr", 0x69, INSTR_RRE_FF }, + { "lzer", 0x74, INSTR_RRE_R0 }, + { "lzdr", 0x75, INSTR_RRE_R0 }, + { "lzxr", 0x76, INSTR_RRE_R0 }, + { "fier", 0x77, INSTR_RRF_U0FF }, + { "fidr", 0x7f, INSTR_RRF_U0FF }, + { "sfpc", 0x84, INSTR_RRE_RR_OPT }, + { "efpc", 0x8c, INSTR_RRE_RR_OPT }, + { "cefbr", 0x94, INSTR_RRE_RF }, + { "cdfbr", 0x95, INSTR_RRE_RF }, + { "cxfbr", 0x96, INSTR_RRE_RF }, + { "cfebr", 0x98, INSTR_RRF_U0RF }, + { "cfdbr", 0x99, INSTR_RRF_U0RF }, + { "cfxbr", 0x9a, INSTR_RRF_U0RF }, + { "cefr", 0xb4, INSTR_RRE_RF }, + { "cdfr", 0xb5, INSTR_RRE_RF }, + { "cxfr", 0xb6, INSTR_RRE_RF }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_b9[] = { +#ifdef CONFIG_64BIT + { "lpgr", 0x00, INSTR_RRE_RR }, + { "lngr", 0x01, INSTR_RRE_RR }, + { "ltgr", 0x02, INSTR_RRE_RR }, + { "lcgr", 0x03, INSTR_RRE_RR }, + { "lgr", 0x04, INSTR_RRE_RR }, + { "lurag", 0x05, INSTR_RRE_RR }, + { "lgbr", 0x06, INSTR_RRE_RR }, + { "lghr", 0x07, INSTR_RRE_RR }, + { "agr", 0x08, INSTR_RRE_RR }, + { "sgr", 0x09, INSTR_RRE_RR }, + { "algr", 0x0a, INSTR_RRE_RR }, + { "slgr", 0x0b, INSTR_RRE_RR }, + { "msgr", 0x0c, INSTR_RRE_RR }, + { "dsgr", 0x0d, INSTR_RRE_RR }, + { "eregg", 0x0e, INSTR_RRE_RR }, + { "lrvgr", 0x0f, INSTR_RRE_RR }, + { "lpgfr", 0x10, INSTR_RRE_RR }, + { "lngfr", 0x11, INSTR_RRE_RR }, + { "ltgfr", 0x12, INSTR_RRE_RR }, + { "lcgfr", 0x13, INSTR_RRE_RR }, + { "lgfr", 0x14, INSTR_RRE_RR }, + { "llgfr", 0x16, INSTR_RRE_RR }, + { "llgtr", 0x17, INSTR_RRE_RR }, + { "agfr", 0x18, INSTR_RRE_RR }, + { "sgfr", 0x19, INSTR_RRE_RR }, + { "algfr", 0x1a, INSTR_RRE_RR }, + { "slgfr", 0x1b, INSTR_RRE_RR }, + { "msgfr", 0x1c, INSTR_RRE_RR }, + { "dsgfr", 0x1d, INSTR_RRE_RR }, + { "cgr", 0x20, INSTR_RRE_RR }, + { "clgr", 0x21, INSTR_RRE_RR }, + { "sturg", 0x25, INSTR_RRE_RR }, + { "lbr", 0x26, INSTR_RRE_RR }, + { "lhr", 0x27, INSTR_RRE_RR }, + { "cgfr", 0x30, INSTR_RRE_RR }, + { "clgfr", 0x31, INSTR_RRE_RR }, + { "bctgr", 0x46, INSTR_RRE_RR }, + { "ngr", 0x80, INSTR_RRE_RR }, + { "ogr", 0x81, INSTR_RRE_RR }, + { "xgr", 0x82, INSTR_RRE_RR }, + { "flogr", 0x83, INSTR_RRE_RR }, + { "llgcr", 0x84, INSTR_RRE_RR }, + { "llghr", 0x85, INSTR_RRE_RR }, + { "mlgr", 0x86, INSTR_RRE_RR }, + { "dlgr", 0x87, INSTR_RRE_RR }, + { "alcgr", 0x88, INSTR_RRE_RR }, + { "slbgr", 0x89, INSTR_RRE_RR }, + { "cspg", 0x8a, INSTR_RRE_RR }, + { "idte", 0x8e, INSTR_RRF_R0RR }, + { "llcr", 0x94, INSTR_RRE_RR }, + { "llhr", 0x95, INSTR_RRE_RR }, + { "esea", 0x9d, INSTR_RRE_R0 }, + { "lptea", 0xaa, INSTR_RRF_RURR }, + { "cu14", 0xb0, INSTR_RRF_M0RR }, + { "cu24", 0xb1, INSTR_RRF_M0RR }, + { "cu41", 0xb2, INSTR_RRF_M0RR }, + { "cu42", 0xb3, INSTR_RRF_M0RR }, +#endif + { "kmac", 0x1e, INSTR_RRE_RR }, + { "lrvr", 0x1f, INSTR_RRE_RR }, + { "km", 0x2e, INSTR_RRE_RR }, + { "kmc", 0x2f, INSTR_RRE_RR }, + { "kimd", 0x3e, INSTR_RRE_RR }, + { "klmd", 0x3f, INSTR_RRE_RR }, + { "epsw", 0x8d, INSTR_RRE_RR }, + { "trtt", 0x90, INSTR_RRE_RR }, + { "trtt", 0x90, INSTR_RRF_M0RR }, + { "trto", 0x91, INSTR_RRE_RR }, + { "trto", 0x91, INSTR_RRF_M0RR }, + { "trot", 0x92, INSTR_RRE_RR }, + { "trot", 0x92, INSTR_RRF_M0RR }, + { "troo", 0x93, INSTR_RRE_RR }, + { "troo", 0x93, INSTR_RRF_M0RR }, + { "mlr", 0x96, INSTR_RRE_RR }, + { "dlr", 0x97, INSTR_RRE_RR }, + { "alcr", 0x98, INSTR_RRE_RR }, + { "slbr", 0x99, INSTR_RRE_RR }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c0[] = { +#ifdef CONFIG_64BIT + { "lgfi", 0x01, INSTR_RIL_RI }, + { "xihf", 0x06, INSTR_RIL_RU }, + { "xilf", 0x07, INSTR_RIL_RU }, + { "iihf", 0x08, INSTR_RIL_RU }, + { "iilf", 0x09, INSTR_RIL_RU }, + { "nihf", 0x0a, INSTR_RIL_RU }, + { "nilf", 0x0b, INSTR_RIL_RU }, + { "oihf", 0x0c, INSTR_RIL_RU }, + { "oilf", 0x0d, INSTR_RIL_RU }, + { "llihf", 0x0e, INSTR_RIL_RU }, + { "llilf", 0x0f, INSTR_RIL_RU }, +#endif + { "larl", 0x00, INSTR_RIL_RP }, + { "brcl", 0x04, INSTR_RIL_UP }, + { "brasl", 0x05, INSTR_RIL_RP }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c2[] = { +#ifdef CONFIG_64BIT + { "slgfi", 0x04, INSTR_RIL_RU }, + { "slfi", 0x05, INSTR_RIL_RU }, + { "agfi", 0x08, INSTR_RIL_RI }, + { "afi", 0x09, INSTR_RIL_RI }, + { "algfi", 0x0a, INSTR_RIL_RU }, + { "alfi", 0x0b, INSTR_RIL_RU }, + { "cgfi", 0x0c, INSTR_RIL_RI }, + { "cfi", 0x0d, INSTR_RIL_RI }, + { "clgfi", 0x0e, INSTR_RIL_RU }, + { "clfi", 0x0f, INSTR_RIL_RU }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_c8[] = { +#ifdef CONFIG_64BIT + { "mvcos", 0x00, INSTR_SSF_RRDRD }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_e3[] = { +#ifdef CONFIG_64BIT + { "ltg", 0x02, INSTR_RXY_RRRD }, + { "lrag", 0x03, INSTR_RXY_RRRD }, + { "lg", 0x04, INSTR_RXY_RRRD }, + { "cvby", 0x06, INSTR_RXY_RRRD }, + { "ag", 0x08, INSTR_RXY_RRRD }, + { "sg", 0x09, INSTR_RXY_RRRD }, + { "alg", 0x0a, INSTR_RXY_RRRD }, + { "slg", 0x0b, INSTR_RXY_RRRD }, + { "msg", 0x0c, INSTR_RXY_RRRD }, + { "dsg", 0x0d, INSTR_RXY_RRRD }, + { "cvbg", 0x0e, INSTR_RXY_RRRD }, + { "lrvg", 0x0f, INSTR_RXY_RRRD }, + { "lt", 0x12, INSTR_RXY_RRRD }, + { "lray", 0x13, INSTR_RXY_RRRD }, + { "lgf", 0x14, INSTR_RXY_RRRD }, + { "lgh", 0x15, INSTR_RXY_RRRD }, + { "llgf", 0x16, INSTR_RXY_RRRD }, + { "llgt", 0x17, INSTR_RXY_RRRD }, + { "agf", 0x18, INSTR_RXY_RRRD }, + { "sgf", 0x19, INSTR_RXY_RRRD }, + { "algf", 0x1a, INSTR_RXY_RRRD }, + { "slgf", 0x1b, INSTR_RXY_RRRD }, + { "msgf", 0x1c, INSTR_RXY_RRRD }, + { "dsgf", 0x1d, INSTR_RXY_RRRD }, + { "cg", 0x20, INSTR_RXY_RRRD }, + { "clg", 0x21, INSTR_RXY_RRRD }, + { "stg", 0x24, INSTR_RXY_RRRD }, + { "cvdy", 0x26, INSTR_RXY_RRRD }, + { "cvdg", 0x2e, INSTR_RXY_RRRD }, + { "strvg", 0x2f, INSTR_RXY_RRRD }, + { "cgf", 0x30, INSTR_RXY_RRRD }, + { "clgf", 0x31, INSTR_RXY_RRRD }, + { "strvh", 0x3f, INSTR_RXY_RRRD }, + { "bctg", 0x46, INSTR_RXY_RRRD }, + { "sty", 0x50, INSTR_RXY_RRRD }, + { "msy", 0x51, INSTR_RXY_RRRD }, + { "ny", 0x54, INSTR_RXY_RRRD }, + { "cly", 0x55, INSTR_RXY_RRRD }, + { "oy", 0x56, INSTR_RXY_RRRD }, + { "xy", 0x57, INSTR_RXY_RRRD }, + { "ly", 0x58, INSTR_RXY_RRRD }, + { "cy", 0x59, INSTR_RXY_RRRD }, + { "ay", 0x5a, INSTR_RXY_RRRD }, + { "sy", 0x5b, INSTR_RXY_RRRD }, + { "aly", 0x5e, INSTR_RXY_RRRD }, + { "sly", 0x5f, INSTR_RXY_RRRD }, + { "sthy", 0x70, INSTR_RXY_RRRD }, + { "lay", 0x71, INSTR_RXY_RRRD }, + { "stcy", 0x72, INSTR_RXY_RRRD }, + { "icy", 0x73, INSTR_RXY_RRRD }, + { "lb", 0x76, INSTR_RXY_RRRD }, + { "lgb", 0x77, INSTR_RXY_RRRD }, + { "lhy", 0x78, INSTR_RXY_RRRD }, + { "chy", 0x79, INSTR_RXY_RRRD }, + { "ahy", 0x7a, INSTR_RXY_RRRD }, + { "shy", 0x7b, INSTR_RXY_RRRD }, + { "ng", 0x80, INSTR_RXY_RRRD }, + { "og", 0x81, INSTR_RXY_RRRD }, + { "xg", 0x82, INSTR_RXY_RRRD }, + { "mlg", 0x86, INSTR_RXY_RRRD }, + { "dlg", 0x87, INSTR_RXY_RRRD }, + { "alcg", 0x88, INSTR_RXY_RRRD }, + { "slbg", 0x89, INSTR_RXY_RRRD }, + { "stpq", 0x8e, INSTR_RXY_RRRD }, + { "lpq", 0x8f, INSTR_RXY_RRRD }, + { "llgc", 0x90, INSTR_RXY_RRRD }, + { "llgh", 0x91, INSTR_RXY_RRRD }, + { "llc", 0x94, INSTR_RXY_RRRD }, + { "llh", 0x95, INSTR_RXY_RRRD }, +#endif + { "lrv", 0x1e, INSTR_RXY_RRRD }, + { "lrvh", 0x1f, INSTR_RXY_RRRD }, + { "strv", 0x3e, INSTR_RXY_RRRD }, + { "ml", 0x96, INSTR_RXY_RRRD }, + { "dl", 0x97, INSTR_RXY_RRRD }, + { "alc", 0x98, INSTR_RXY_RRRD }, + { "slb", 0x99, INSTR_RXY_RRRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_e5[] = { +#ifdef CONFIG_64BIT + { "strag", 0x02, INSTR_SSE_RDRD }, +#endif + { "lasp", 0x00, INSTR_SSE_RDRD }, + { "tprot", 0x01, INSTR_SSE_RDRD }, + { "mvcsk", 0x0e, INSTR_SSE_RDRD }, + { "mvcdk", 0x0f, INSTR_SSE_RDRD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_eb[] = { +#ifdef CONFIG_64BIT + { "lmg", 0x04, INSTR_RSY_RRRD }, + { "srag", 0x0a, INSTR_RSY_RRRD }, + { "slag", 0x0b, INSTR_RSY_RRRD }, + { "srlg", 0x0c, INSTR_RSY_RRRD }, + { "sllg", 0x0d, INSTR_RSY_RRRD }, + { "tracg", 0x0f, INSTR_RSY_RRRD }, + { "csy", 0x14, INSTR_RSY_RRRD }, + { "rllg", 0x1c, INSTR_RSY_RRRD }, + { "clmh", 0x20, INSTR_RSY_RURD }, + { "clmy", 0x21, INSTR_RSY_RURD }, + { "stmg", 0x24, INSTR_RSY_RRRD }, + { "stctg", 0x25, INSTR_RSY_CCRD }, + { "stmh", 0x26, INSTR_RSY_RRRD }, + { "stcmh", 0x2c, INSTR_RSY_RURD }, + { "stcmy", 0x2d, INSTR_RSY_RURD }, + { "lctlg", 0x2f, INSTR_RSY_CCRD }, + { "csg", 0x30, INSTR_RSY_RRRD }, + { "cdsy", 0x31, INSTR_RSY_RRRD }, + { "cdsg", 0x3e, INSTR_RSY_RRRD }, + { "bxhg", 0x44, INSTR_RSY_RRRD }, + { "bxleg", 0x45, INSTR_RSY_RRRD }, + { "tmy", 0x51, INSTR_SIY_URD }, + { "mviy", 0x52, INSTR_SIY_URD }, + { "niy", 0x54, INSTR_SIY_URD }, + { "cliy", 0x55, INSTR_SIY_URD }, + { "oiy", 0x56, INSTR_SIY_URD }, + { "xiy", 0x57, INSTR_SIY_URD }, + { "icmh", 0x80, INSTR_RSE_RURD }, + { "icmh", 0x80, INSTR_RSY_RURD }, + { "icmy", 0x81, INSTR_RSY_RURD }, + { "clclu", 0x8f, INSTR_RSY_RRRD }, + { "stmy", 0x90, INSTR_RSY_RRRD }, + { "lmh", 0x96, INSTR_RSY_RRRD }, + { "lmy", 0x98, INSTR_RSY_RRRD }, + { "lamy", 0x9a, INSTR_RSY_AARD }, + { "stamy", 0x9b, INSTR_RSY_AARD }, +#endif + { "rll", 0x1d, INSTR_RSY_RRRD }, + { "mvclu", 0x8e, INSTR_RSY_RRRD }, + { "tp", 0xc0, INSTR_RSL_R0RD }, + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_ec[] = { +#ifdef CONFIG_64BIT + { "brxhg", 0x44, INSTR_RIE_RRP }, + { "brxlg", 0x45, INSTR_RIE_RRP }, +#endif + { "", 0, INSTR_INVALID } +}; + +static struct insn opcode_ed[] = { +#ifdef CONFIG_64BIT + { "mayl", 0x38, INSTR_RXF_FRRDF }, + { "myl", 0x39, INSTR_RXF_FRRDF }, + { "may", 0x3a, INSTR_RXF_FRRDF }, + { "my", 0x3b, INSTR_RXF_FRRDF }, + { "mayh", 0x3c, INSTR_RXF_FRRDF }, + { "myh", 0x3d, INSTR_RXF_FRRDF }, + { "ley", 0x64, INSTR_RXY_FRRD }, + { "ldy", 0x65, INSTR_RXY_FRRD }, + { "stey", 0x66, INSTR_RXY_FRRD }, + { "stdy", 0x67, INSTR_RXY_FRRD }, +#endif + { "ldeb", 0x04, INSTR_RXE_FRRD }, + { "lxdb", 0x05, INSTR_RXE_FRRD }, + { "lxeb", 0x06, INSTR_RXE_FRRD }, + { "mxdb", 0x07, INSTR_RXE_FRRD }, + { "keb", 0x08, INSTR_RXE_FRRD }, + { "ceb", 0x09, INSTR_RXE_FRRD }, + { "aeb", 0x0a, INSTR_RXE_FRRD }, + { "seb", 0x0b, INSTR_RXE_FRRD }, + { "mdeb", 0x0c, INSTR_RXE_FRRD }, + { "deb", 0x0d, INSTR_RXE_FRRD }, + { "maeb", 0x0e, INSTR_RXF_FRRDF }, + { "mseb", 0x0f, INSTR_RXF_FRRDF }, + { "tceb", 0x10, INSTR_RXE_FRRD }, + { "tcdb", 0x11, INSTR_RXE_FRRD }, + { "tcxb", 0x12, INSTR_RXE_FRRD }, + { "sqeb", 0x14, INSTR_RXE_FRRD }, + { "sqdb", 0x15, INSTR_RXE_FRRD }, + { "meeb", 0x17, INSTR_RXE_FRRD }, + { "kdb", 0x18, INSTR_RXE_FRRD }, + { "cdb", 0x19, INSTR_RXE_FRRD }, + { "adb", 0x1a, INSTR_RXE_FRRD }, + { "sdb", 0x1b, INSTR_RXE_FRRD }, + { "mdb", 0x1c, INSTR_RXE_FRRD }, + { "ddb", 0x1d, INSTR_RXE_FRRD }, + { "madb", 0x1e, INSTR_RXF_FRRDF }, + { "msdb", 0x1f, INSTR_RXF_FRRDF }, + { "lde", 0x24, INSTR_RXE_FRRD }, + { "lxd", 0x25, INSTR_RXE_FRRD }, + { "lxe", 0x26, INSTR_RXE_FRRD }, + { "mae", 0x2e, INSTR_RXF_FRRDF }, + { "mse", 0x2f, INSTR_RXF_FRRDF }, + { "sqe", 0x34, INSTR_RXE_FRRD }, + { "mee", 0x37, INSTR_RXE_FRRD }, + { "mad", 0x3e, INSTR_RXF_FRRDF }, + { "msd", 0x3f, INSTR_RXF_FRRDF }, + { "", 0, INSTR_INVALID } +}; + +/* Extracts an operand value from an instruction. */ +static unsigned int extract_operand(unsigned char *code, + const struct operand *operand) +{ + unsigned int val; + int bits; + + /* Extract fragments of the operand byte for byte. */ + code += operand->shift / 8; + bits = (operand->shift & 7) + operand->bits; + val = 0; + do { + val <<= 8; + val |= (unsigned int) *code++; + bits -= 8; + } while (bits > 0); + val >>= -bits; + val &= ((1U << (operand->bits - 1)) << 1) - 1; + + /* Check for special long displacement case. */ + if (operand->bits == 20 && operand->shift == 20) + val = (val & 0xff) << 12 | (val & 0xfff00) >> 8; + + /* Sign extend value if the operand is signed or pc relative. */ + if ((operand->flags & (OPERAND_SIGNED | OPERAND_PCREL)) && + (val & (1U << (operand->bits - 1)))) + val |= (-1U << (operand->bits - 1)) << 1; + + /* Double value if the operand is pc relative. */ + if (operand->flags & OPERAND_PCREL) + val <<= 1; + + /* Length x in an instructions has real length x + 1. */ + if (operand->flags & OPERAND_LENGTH) + val++; + return val; +} + +static inline int insn_length(unsigned char code) +{ + return ((((int) code + 64) >> 7) + 1) << 1; +} + +static struct insn *find_insn(unsigned char *code) +{ + unsigned char opfrag = code[1]; + unsigned char opmask; + struct insn *table; + + switch (code[0]) { + case 0x01: + table = opcode_01; + break; + case 0xa5: + table = opcode_a5; + break; + case 0xa7: + table = opcode_a7; + break; + case 0xb2: + table = opcode_b2; + break; + case 0xb3: + table = opcode_b3; + break; + case 0xb9: + table = opcode_b9; + break; + case 0xc0: + table = opcode_c0; + break; + case 0xc2: + table = opcode_c2; + break; + case 0xc8: + table = opcode_c8; + break; + case 0xe3: + table = opcode_e3; + opfrag = code[5]; + break; + case 0xe5: + table = opcode_e5; + break; + case 0xeb: + table = opcode_eb; + opfrag = code[5]; + break; + case 0xec: + table = opcode_ec; + opfrag = code[5]; + break; + case 0xed: + table = opcode_ed; + opfrag = code[5]; + break; + default: + table = opcode; + opfrag = code[0]; + break; + } + while (table->format != INSTR_INVALID) { + opmask = formats[table->format][0]; + if (table->opfrag == (opfrag & opmask)) + return table; + table++; + } + return NULL; +} + +static int print_insn(char *buffer, unsigned char *code, unsigned long addr) +{ + struct insn *insn; + const unsigned char *ops; + const struct operand *operand; + unsigned int value; + char separator; + char *ptr; + + ptr = buffer; + insn = find_insn(code); + if (insn) { + ptr += sprintf(ptr, "%.5s\t", insn->name); + /* Extract the operands. */ + separator = 0; + for (ops = formats[insn->format] + 1; *ops != 0; ops++) { + operand = operands + *ops; + value = extract_operand(code, operand); + if ((operand->flags & OPERAND_INDEX) && value == 0) + continue; + if ((operand->flags & OPERAND_BASE) && + value == 0 && separator == '(') { + separator = ','; + continue; + } + if (separator) + ptr += sprintf(ptr, "%c", separator); + if (operand->flags & OPERAND_GPR) + ptr += sprintf(ptr, "%%r%i", value); + else if (operand->flags & OPERAND_FPR) + ptr += sprintf(ptr, "%%f%i", value); + else if (operand->flags & OPERAND_AR) + ptr += sprintf(ptr, "%%a%i", value); + else if (operand->flags & OPERAND_CR) + ptr += sprintf(ptr, "%%c%i", value); + else if (operand->flags & OPERAND_PCREL) + ptr += sprintf(ptr, "%lx", value + addr); + else if (operand->flags & OPERAND_SIGNED) + ptr += sprintf(ptr, "%i", value); + else + ptr += sprintf(ptr, "%u", value); + if (operand->flags & OPERAND_DISP) + separator = '('; + else if (operand->flags & OPERAND_BASE) { + ptr += sprintf(ptr, ")"); + separator = ','; + } else + separator = ','; + } + } else + ptr += sprintf(ptr, "unknown"); + return (int) (ptr - buffer); +} + +void show_code(struct pt_regs *regs) +{ + char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; + unsigned char code[64]; + char buffer[64], *ptr; + mm_segment_t old_fs; + unsigned long addr; + int start, end, opsize, hops, i; + + /* Get a snapshot of the 64 bytes surrounding the fault address. */ + old_fs = get_fs(); + set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS); + for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { + addr = regs->psw.addr - 34 + start; + if (__copy_from_user(code + start - 2, + (char __user *) addr, 2)) + break; + } + for (end = 32; end < 64; end += 2) { + addr = regs->psw.addr + end - 32; + if (__copy_from_user(code + end, + (char __user *) addr, 2)) + break; + } + set_fs(old_fs); + /* Code snapshot useable ? */ + if ((regs->psw.addr & 1) || start >= end) { + printk("%s Code: Bad PSW.\n", mode); + return; + } + /* Find a starting point for the disassembly. */ + while (start < 32) { + hops = 0; + for (i = 0, hops = 0; start + i < 32 && hops < 3; hops++) { + if (!find_insn(code + start + i)) + break; + i += insn_length(code[start + i]); + } + if (start + i == 32) + /* Looks good, sequence ends at PSW. */ + break; + start += 2; + } + /* Decode the instructions. */ + ptr = buffer; + ptr += sprintf(ptr, "%s Code:", mode); + hops = 0; + while (start < end && hops < 8) { + *ptr++ = (start == 32) ? '>' : ' '; + addr = regs->psw.addr + start - 32; + ptr += sprintf(ptr, ONELONG, addr); + opsize = insn_length(code[start]); + if (start + opsize >= end) + break; + for (i = 0; i < opsize; i++) + ptr += sprintf(ptr, "%02x", code[start + i]); + *ptr++ = '\t'; + if (i < 6) + *ptr++ = '\t'; + ptr += print_insn(ptr, code + start, addr); + start += opsize; + printk(buffer); + ptr = buffer; + ptr += sprintf(ptr, "\n "); + hops++; + } + printk("\n"); +} diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 5e47936573f..50538e54561 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -253,11 +253,10 @@ static noinline __init void find_memory_chunks(unsigned long memsize) break; #endif /* - * Finish memory detection at the first hole, unless - * - we reached the hsa -> skip it. - * - we know there must be more. + * Finish memory detection at the first hole + * if storage size is unknown. */ - if (cc == -1UL && !memsize && old_addr != ADDR2G) + if (cc == -1UL && !memsize) break; if (memsize && addr >= memsize) break; diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index dddc3de3040..c8a2212014e 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -249,8 +249,6 @@ sysc_do_restart: bnz BASED(sysc_tracesys) basr %r14,%r8 # call sys_xxxx st %r2,SP_R2(%r15) # store return value (change R2 on stack) - # ATTENTION: check sys_execve_glue before - # changing anything here !! sysc_return: tm SP_PSW+1(%r15),0x01 # returning to user ? @@ -381,50 +379,37 @@ ret_from_fork: b BASED(sysc_return) # -# clone, fork, vfork, exec and sigreturn need glue, -# because they all expect pt_regs as parameter, -# but are called with different parameter. -# return-address is set up above +# kernel_execve function needs to deal with pt_regs that is not +# at the usual place # -sys_clone_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lclone) - br %r1 # branch to sys_clone - -sys_fork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lfork) - br %r1 # branch to sys_fork - -sys_vfork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lvfork) - br %r1 # branch to sys_vfork - -sys_execve_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Lexecve) - lr %r12,%r14 # save return address - basr %r14,%r1 # call sys_execve - ltr %r2,%r2 # check if execve failed - bnz 0(%r12) # it did fail -> store result in gpr2 - b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8 - # in system_call/sysc_tracesys - -sys_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - l %r1,BASED(.Lsigreturn) - br %r1 # branch to sys_sigreturn - -sys_rt_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - l %r1,BASED(.Lrt_sigreturn) - br %r1 # branch to sys_sigreturn - -sys_sigaltstack_glue: - la %r4,SP_PTREGS(%r15) # load pt_regs as parameter - l %r1,BASED(.Lsigaltstack) - br %r1 # branch to sys_sigreturn + .globl kernel_execve +kernel_execve: + stm %r12,%r15,48(%r15) + lr %r14,%r15 + l %r13,__LC_SVC_NEW_PSW+4 + s %r15,BASED(.Lc_spsize) + st %r14,__SF_BACKCHAIN(%r15) + la %r12,SP_PTREGS(%r15) + xc 0(__PT_SIZE,%r12),0(%r12) + l %r1,BASED(.Ldo_execve) + lr %r5,%r12 + basr %r14,%r1 + ltr %r2,%r2 + be BASED(0f) + a %r15,BASED(.Lc_spsize) + lm %r12,%r15,48(%r15) + br %r14 + # execve succeeded. +0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts + l %r15,__LC_KERNEL_STACK # load ksp + s %r15,BASED(.Lc_spsize) # make room for registers & psw + l %r9,__LC_THREAD_INFO + mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) + stosm __SF_EMPTY(%r15),0x03 # reenable interrupts + l %r1,BASED(.Lexecve_tail) + basr %r14,%r1 + b BASED(sysc_return) /* * Program check handler routine @@ -1031,19 +1016,11 @@ cleanup_io_leave_insn: .Ldo_extint: .long do_extint .Ldo_signal: .long do_signal .Lhandle_per: .long do_single_step +.Ldo_execve: .long do_execve +.Lexecve_tail: .long execve_tail .Ljump_table: .long pgm_check_table .Lschedule: .long schedule -.Lclone: .long sys_clone -.Lexecve: .long sys_execve -.Lfork: .long sys_fork -.Lrt_sigreturn: .long sys_rt_sigreturn -.Lrt_sigsuspend: - .long sys_rt_sigsuspend -.Lsigreturn: .long sys_sigreturn -.Lsigsuspend: .long sys_sigsuspend -.Lsigaltstack: .long sys_sigaltstack .Ltrace: .long syscall_trace -.Lvfork: .long sys_vfork .Lschedtail: .long schedule_tail .Lsysc_table: .long sys_call_table #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 0f758c329a5..93745fd8f55 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -244,8 +244,6 @@ sysc_noemu: jnz sysc_tracesys basr %r14,%r8 # call sys_xxxx stg %r2,SP_R2(%r15) # store return value (change R2 on stack) - # ATTENTION: check sys_execve_glue before - # changing anything here !! sysc_return: tm SP_PSW+1(%r15),0x01 # returning to user ? @@ -371,77 +369,35 @@ ret_from_fork: j sysc_return # -# clone, fork, vfork, exec and sigreturn need glue, -# because they all expect pt_regs as parameter, -# but are called with different parameter. -# return-address is set up above +# kernel_execve function needs to deal with pt_regs that is not +# at the usual place # -sys_clone_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys_clone # branch to sys_clone - -#ifdef CONFIG_COMPAT -sys32_clone_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys32_clone # branch to sys32_clone -#endif - -sys_fork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys_fork # branch to sys_fork - -sys_vfork_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - jg sys_vfork # branch to sys_vfork - -sys_execve_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - lgr %r12,%r14 # save return address - brasl %r14,sys_execve # call sys_execve - ltgr %r2,%r2 # check if execve failed - bnz 0(%r12) # it did fail -> store result in gpr2 - b 6(%r12) # SKIP STG 2,SP_R2(15) in - # system_call/sysc_tracesys -#ifdef CONFIG_COMPAT -sys32_execve_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs - lgr %r12,%r14 # save return address - brasl %r14,sys32_execve # call sys32_execve - ltgr %r2,%r2 # check if execve failed - bnz 0(%r12) # it did fail -> store result in gpr2 - b 6(%r12) # SKIP STG 2,SP_R2(15) in - # system_call/sysc_tracesys -#endif - -sys_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys_sigreturn # branch to sys_sigreturn - -#ifdef CONFIG_COMPAT -sys32_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys32_sigreturn # branch to sys32_sigreturn -#endif - -sys_rt_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys_rt_sigreturn # branch to sys_sigreturn - -#ifdef CONFIG_COMPAT -sys32_rt_sigreturn_glue: - la %r2,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys32_rt_sigreturn # branch to sys32_sigreturn -#endif - -sys_sigaltstack_glue: - la %r4,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys_sigaltstack # branch to sys_sigreturn - -#ifdef CONFIG_COMPAT -sys32_sigaltstack_glue: - la %r4,SP_PTREGS(%r15) # load pt_regs as parameter - jg sys32_sigaltstack_wrapper # branch to sys_sigreturn -#endif + .globl kernel_execve +kernel_execve: + stmg %r12,%r15,96(%r15) + lgr %r14,%r15 + aghi %r15,-SP_SIZE + stg %r14,__SF_BACKCHAIN(%r15) + la %r12,SP_PTREGS(%r15) + xc 0(__PT_SIZE,%r12),0(%r12) + lgr %r5,%r12 + brasl %r14,do_execve + ltgfr %r2,%r2 + je 0f + aghi %r15,SP_SIZE + lmg %r12,%r15,96(%r15) + br %r14 + # execve succeeded. +0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts + lg %r15,__LC_KERNEL_STACK # load ksp + aghi %r15,-SP_SIZE # make room for registers & psw + lg %r13,__LC_SVC_NEW_PSW+8 + lg %r9,__LC_THREAD_INFO + mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + stosm __SF_EMPTY(%r15),0x03 # reenable interrupts + brasl %r14,execve_tail + j sysc_return /* * Program check handler routine diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 37010709fe6..a87b1976d40 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -39,7 +39,69 @@ startup_continue: basr %r13,0 # get base .LPG1: sll %r13,1 # remove high order bit srl %r13,1 - lhi %r1,1 # mode 1 = esame + +#ifdef CONFIG_ZFCPDUMP + + # check if we have been ipled using zfcp dump: + + tm 0xb9,0x01 # test if subchannel is enabled + jno .nodump # subchannel disabled + l %r1,0xb8 + la %r5,.Lipl_schib-.LPG1(%r13) + stsch 0(%r5) # get schib of subchannel + jne .nodump # schib not available + tm 5(%r5),0x01 # devno valid? + jno .nodump + tm 4(%r5),0x80 # qdio capable device? + jno .nodump + l %r2,20(%r0) # address of ipl parameter block + lhi %r3,0 + ic %r3,0x148(%r2) # get opt field + chi %r3,0x20 # load with dump? + jne .nodump + + # store all prefix registers in case of load with dump: + + la %r7,0 # base register for 0 page + la %r8,0 # first cpu + l %r11,.Lpref_arr_ptr-.LPG1(%r13) # address of prefix array + ahi %r11,4 # skip boot cpu + lr %r12,%r11 + ahi %r12,(CONFIG_NR_CPUS*4) # end of prefix array + stap .Lcurrent_cpu+2-.LPG1(%r13) # store current cpu addr +1: + cl %r8,.Lcurrent_cpu-.LPG1(%r13) # is ipl cpu ? + je 4f # if yes get next cpu +2: + lr %r9,%r7 + sigp %r9,%r8,0x9 # stop & store status of cpu + brc 8,3f # accepted + brc 4,4f # status stored: next cpu + brc 2,2b # busy: try again + brc 1,4f # not op: next cpu +3: + mvc 0(4,%r11),264(%r7) # copy prefix register to prefix array + ahi %r11,4 # next element in prefix array + clr %r11,%r12 + je 5f # no more space in prefix array +4: + ahi %r8,1 # next cpu (r8 += 1) + cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ? + jl 1b # jump if not last cpu +5: + lhi %r1,2 # mode 2 = esame (dump) + j 6f + .align 4 +.Lipl_schib: + .rept 13 + .long 0 + .endr +.nodump: + lhi %r1,1 # mode 1 = esame (normal ipl) +6: +#else + lhi %r1,1 # mode 1 = esame (normal ipl) +#endif /* CONFIG_ZFCPDUMP */ mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero sigp %r1,%r0,0x12 # switch to esame mode @@ -149,6 +211,14 @@ startup_continue: .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lnop: .long 0x07000700 +#ifdef CONFIG_ZFCPDUMP +.Lcurrent_cpu: + .long 0x0 +.Llast_cpu: + .long 0x0000ffff +.Lpref_arr_ptr: + .long zfcpdump_prefix_array +#endif /* CONFIG_ZFCPDUMP */ .Lparmaddr: .quad PARMAREA .align 64 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index f731185bf2b..06833ac2b11 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -29,36 +29,21 @@ #define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm) #define SCCB_FLAG (s390_readinfo_sccb.flags) -enum ipl_type { - IPL_TYPE_NONE = 1, - IPL_TYPE_UNKNOWN = 2, - IPL_TYPE_CCW = 4, - IPL_TYPE_FCP = 8, - IPL_TYPE_NSS = 16, -}; - -#define IPL_NONE_STR "none" -#define IPL_UNKNOWN_STR "unknown" -#define IPL_CCW_STR "ccw" -#define IPL_FCP_STR "fcp" -#define IPL_NSS_STR "nss" - -/* - * Must be in data section since the bss section - * is not cleared when these are accessed. - */ -u16 ipl_devno __attribute__((__section__(".data"))) = 0; -u32 ipl_flags __attribute__((__section__(".data"))) = 0; +#define IPL_UNKNOWN_STR "unknown" +#define IPL_CCW_STR "ccw" +#define IPL_FCP_STR "fcp" +#define IPL_FCP_DUMP_STR "fcp_dump" +#define IPL_NSS_STR "nss" static char *ipl_type_str(enum ipl_type type) { switch (type) { - case IPL_TYPE_NONE: - return IPL_NONE_STR; case IPL_TYPE_CCW: return IPL_CCW_STR; case IPL_TYPE_FCP: return IPL_FCP_STR; + case IPL_TYPE_FCP_DUMP: + return IPL_FCP_DUMP_STR; case IPL_TYPE_NSS: return IPL_NSS_STR; case IPL_TYPE_UNKNOWN: @@ -67,15 +52,55 @@ static char *ipl_type_str(enum ipl_type type) } } +enum dump_type { + DUMP_TYPE_NONE = 1, + DUMP_TYPE_CCW = 2, + DUMP_TYPE_FCP = 4, +}; + +#define DUMP_NONE_STR "none" +#define DUMP_CCW_STR "ccw" +#define DUMP_FCP_STR "fcp" + +static char *dump_type_str(enum dump_type type) +{ + switch (type) { + case DUMP_TYPE_NONE: + return DUMP_NONE_STR; + case DUMP_TYPE_CCW: + return DUMP_CCW_STR; + case DUMP_TYPE_FCP: + return DUMP_FCP_STR; + default: + return NULL; + } +} + +/* + * Must be in data section since the bss section + * is not cleared when these are accessed. + */ +static u16 ipl_devno __attribute__((__section__(".data"))) = 0; +u32 ipl_flags __attribute__((__section__(".data"))) = 0; + enum ipl_method { - IPL_METHOD_NONE, - IPL_METHOD_CCW_CIO, - IPL_METHOD_CCW_DIAG, - IPL_METHOD_CCW_VM, - IPL_METHOD_FCP_RO_DIAG, - IPL_METHOD_FCP_RW_DIAG, - IPL_METHOD_FCP_RO_VM, - IPL_METHOD_NSS, + REIPL_METHOD_CCW_CIO, + REIPL_METHOD_CCW_DIAG, + REIPL_METHOD_CCW_VM, + REIPL_METHOD_FCP_RO_DIAG, + REIPL_METHOD_FCP_RW_DIAG, + REIPL_METHOD_FCP_RO_VM, + REIPL_METHOD_FCP_DUMP, + REIPL_METHOD_NSS, + REIPL_METHOD_DEFAULT, +}; + +enum dump_method { + DUMP_METHOD_NONE, + DUMP_METHOD_CCW_CIO, + DUMP_METHOD_CCW_DIAG, + DUMP_METHOD_CCW_VM, + DUMP_METHOD_FCP_DIAG, }; enum shutdown_action { @@ -107,15 +132,15 @@ static int diag308_set_works = 0; static int reipl_capabilities = IPL_TYPE_UNKNOWN; static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; -static enum ipl_method reipl_method = IPL_METHOD_NONE; +static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; static struct ipl_parameter_block *reipl_block_fcp; static struct ipl_parameter_block *reipl_block_ccw; static char reipl_nss_name[NSS_NAME_SIZE + 1]; -static int dump_capabilities = IPL_TYPE_NONE; -static enum ipl_type dump_type = IPL_TYPE_NONE; -static enum ipl_method dump_method = IPL_METHOD_NONE; +static int dump_capabilities = DUMP_TYPE_NONE; +static enum dump_type dump_type = DUMP_TYPE_NONE; +static enum dump_method dump_method = DUMP_METHOD_NONE; static struct ipl_parameter_block *dump_block_fcp; static struct ipl_parameter_block *dump_block_ccw; @@ -134,6 +159,7 @@ int diag308(unsigned long subcode, void *addr) : "d" (subcode) : "cc", "memory"); return _rc; } +EXPORT_SYMBOL_GPL(diag308); /* SYSFS */ @@ -197,7 +223,7 @@ static void make_attrs_ro(struct attribute **attrs) * ipl section */ -static enum ipl_type ipl_get_type(void) +static __init enum ipl_type get_ipl_type(void) { struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; @@ -211,12 +237,44 @@ static enum ipl_type ipl_get_type(void) return IPL_TYPE_UNKNOWN; if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP) return IPL_TYPE_UNKNOWN; + if (ipl->ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) + return IPL_TYPE_FCP_DUMP; return IPL_TYPE_FCP; } +void __init setup_ipl_info(void) +{ + ipl_info.type = get_ipl_type(); + switch (ipl_info.type) { + case IPL_TYPE_CCW: + ipl_info.data.ccw.dev_id.devno = ipl_devno; + ipl_info.data.ccw.dev_id.ssid = 0; + break; + case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: + ipl_info.data.fcp.dev_id.devno = + IPL_PARMBLOCK_START->ipl_info.fcp.devno; + ipl_info.data.fcp.dev_id.ssid = 0; + ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; + ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; + break; + case IPL_TYPE_NSS: + strncpy(ipl_info.data.nss.name, kernel_nss_name, + sizeof(ipl_info.data.nss.name)); + break; + case IPL_TYPE_UNKNOWN: + default: + /* We have no info to copy */ + break; + } +} + +struct ipl_info ipl_info; +EXPORT_SYMBOL_GPL(ipl_info); + static ssize_t ipl_type_show(struct subsystem *subsys, char *page) { - return sprintf(page, "%s\n", ipl_type_str(ipl_get_type())); + return sprintf(page, "%s\n", ipl_type_str(ipl_info.type)); } static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); @@ -225,10 +283,11 @@ static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page) { struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; - switch (ipl_get_type()) { + switch (ipl_info.type) { case IPL_TYPE_CCW: return sprintf(page, "0.0.%04x\n", ipl_devno); case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); default: return 0; @@ -485,23 +544,29 @@ static int reipl_set_type(enum ipl_type type) switch(type) { case IPL_TYPE_CCW: if (MACHINE_IS_VM) - reipl_method = IPL_METHOD_CCW_VM; + reipl_method = REIPL_METHOD_CCW_VM; else - reipl_method = IPL_METHOD_CCW_CIO; + reipl_method = REIPL_METHOD_CCW_CIO; break; case IPL_TYPE_FCP: if (diag308_set_works) - reipl_method = IPL_METHOD_FCP_RW_DIAG; + reipl_method = REIPL_METHOD_FCP_RW_DIAG; else if (MACHINE_IS_VM) - reipl_method = IPL_METHOD_FCP_RO_VM; + reipl_method = REIPL_METHOD_FCP_RO_VM; else - reipl_method = IPL_METHOD_FCP_RO_DIAG; + reipl_method = REIPL_METHOD_FCP_RO_DIAG; + break; + case IPL_TYPE_FCP_DUMP: + reipl_method = REIPL_METHOD_FCP_DUMP; break; case IPL_TYPE_NSS: - reipl_method = IPL_METHOD_NSS; + reipl_method = REIPL_METHOD_NSS; + break; + case IPL_TYPE_UNKNOWN: + reipl_method = REIPL_METHOD_DEFAULT; break; default: - reipl_method = IPL_METHOD_NONE; + BUG(); } reipl_type = type; return 0; @@ -579,22 +644,22 @@ static struct attribute_group dump_ccw_attr_group = { /* dump type */ -static int dump_set_type(enum ipl_type type) +static int dump_set_type(enum dump_type type) { if (!(dump_capabilities & type)) return -EINVAL; switch(type) { - case IPL_TYPE_CCW: + case DUMP_TYPE_CCW: if (MACHINE_IS_VM) - dump_method = IPL_METHOD_CCW_VM; + dump_method = DUMP_METHOD_CCW_VM; else - dump_method = IPL_METHOD_CCW_CIO; + dump_method = DUMP_METHOD_CCW_CIO; break; - case IPL_TYPE_FCP: - dump_method = IPL_METHOD_FCP_RW_DIAG; + case DUMP_TYPE_FCP: + dump_method = DUMP_METHOD_FCP_DIAG; break; default: - dump_method = IPL_METHOD_NONE; + dump_method = DUMP_METHOD_NONE; } dump_type = type; return 0; @@ -602,7 +667,7 @@ static int dump_set_type(enum ipl_type type) static ssize_t dump_type_show(struct subsystem *subsys, char *page) { - return sprintf(page, "%s\n", ipl_type_str(dump_type)); + return sprintf(page, "%s\n", dump_type_str(dump_type)); } static ssize_t dump_type_store(struct subsystem *subsys, const char *buf, @@ -610,12 +675,12 @@ static ssize_t dump_type_store(struct subsystem *subsys, const char *buf, { int rc = -EINVAL; - if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0) - rc = dump_set_type(IPL_TYPE_NONE); - else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0) - rc = dump_set_type(IPL_TYPE_CCW); - else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) - rc = dump_set_type(IPL_TYPE_FCP); + if (strncmp(buf, DUMP_NONE_STR, strlen(DUMP_NONE_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_NONE); + else if (strncmp(buf, DUMP_CCW_STR, strlen(DUMP_CCW_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_CCW); + else if (strncmp(buf, DUMP_FCP_STR, strlen(DUMP_FCP_STR)) == 0) + rc = dump_set_type(DUMP_TYPE_FCP); return (rc != 0) ? rc : len; } @@ -664,14 +729,14 @@ void do_reipl(void) char loadparm[LOADPARM_LEN + 1]; switch (reipl_method) { - case IPL_METHOD_CCW_CIO: + case REIPL_METHOD_CCW_CIO: devid.devno = reipl_block_ccw->ipl_info.ccw.devno; - if (ipl_get_type() == IPL_TYPE_CCW && devid.devno == ipl_devno) + if (ipl_info.type == IPL_TYPE_CCW && devid.devno == ipl_devno) diag308(DIAG308_IPL, NULL); devid.ssid = 0; reipl_ccw_dev(&devid); break; - case IPL_METHOD_CCW_VM: + case REIPL_METHOD_CCW_VM: reipl_get_ascii_loadparm(loadparm); if (strlen(loadparm) == 0) sprintf(buf, "IPL %X", @@ -681,30 +746,32 @@ void do_reipl(void) reipl_block_ccw->ipl_info.ccw.devno, loadparm); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_CCW_DIAG: + case REIPL_METHOD_CCW_DIAG: diag308(DIAG308_SET, reipl_block_ccw); diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RW_DIAG: + case REIPL_METHOD_FCP_RW_DIAG: diag308(DIAG308_SET, reipl_block_fcp); diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RO_DIAG: + case REIPL_METHOD_FCP_RO_DIAG: diag308(DIAG308_IPL, NULL); break; - case IPL_METHOD_FCP_RO_VM: + case REIPL_METHOD_FCP_RO_VM: __cpcmd("IPL", NULL, 0, NULL); break; - case IPL_METHOD_NSS: + case REIPL_METHOD_NSS: sprintf(buf, "IPL %s", reipl_nss_name); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_NONE: - default: + case REIPL_METHOD_DEFAULT: if (MACHINE_IS_VM) __cpcmd("IPL", NULL, 0, NULL); diag308(DIAG308_IPL, NULL); break; + case REIPL_METHOD_FCP_DUMP: + default: + break; } signal_processor(smp_processor_id(), sigp_stop_and_store_status); } @@ -715,28 +782,28 @@ static void do_dump(void) static char buf[100]; switch (dump_method) { - case IPL_METHOD_CCW_CIO: + case DUMP_METHOD_CCW_CIO: smp_send_stop(); devid.devno = dump_block_ccw->ipl_info.ccw.devno; devid.ssid = 0; reipl_ccw_dev(&devid); break; - case IPL_METHOD_CCW_VM: + case DUMP_METHOD_CCW_VM: smp_send_stop(); sprintf(buf, "STORE STATUS"); __cpcmd(buf, NULL, 0, NULL); sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); __cpcmd(buf, NULL, 0, NULL); break; - case IPL_METHOD_CCW_DIAG: + case DUMP_METHOD_CCW_DIAG: diag308(DIAG308_SET, dump_block_ccw); diag308(DIAG308_DUMP, NULL); break; - case IPL_METHOD_FCP_RW_DIAG: + case DUMP_METHOD_FCP_DIAG: diag308(DIAG308_SET, dump_block_fcp); diag308(DIAG308_DUMP, NULL); break; - case IPL_METHOD_NONE: + case DUMP_METHOD_NONE: default: return; } @@ -777,12 +844,13 @@ static int __init ipl_init(void) rc = firmware_register(&ipl_subsys); if (rc) return rc; - switch (ipl_get_type()) { + switch (ipl_info.type) { case IPL_TYPE_CCW: rc = sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group); break; case IPL_TYPE_FCP: + case IPL_TYPE_FCP_DUMP: rc = ipl_register_fcp_files(); break; case IPL_TYPE_NSS: @@ -852,7 +920,7 @@ static int __init reipl_ccw_init(void) /* FIXME: check for diag308_set_works when enabling diag ccw reipl */ if (!MACHINE_IS_VM) sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; - if (ipl_get_type() == IPL_TYPE_CCW) + if (ipl_info.type == IPL_TYPE_CCW) reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; reipl_capabilities |= IPL_TYPE_CCW; return 0; @@ -862,9 +930,9 @@ static int __init reipl_fcp_init(void) { int rc; - if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP)) + if ((!diag308_set_works) && (ipl_info.type != IPL_TYPE_FCP)) return 0; - if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP)) + if ((!diag308_set_works) && (ipl_info.type == IPL_TYPE_FCP)) make_attrs_ro(reipl_fcp_attrs); reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); @@ -875,7 +943,7 @@ static int __init reipl_fcp_init(void) free_page((unsigned long)reipl_block_fcp); return rc; } - if (ipl_get_type() == IPL_TYPE_FCP) { + if (ipl_info.type == IPL_TYPE_FCP) { memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); } else { reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; @@ -909,7 +977,7 @@ static int __init reipl_init(void) rc = reipl_nss_init(); if (rc) return rc; - rc = reipl_set_type(ipl_get_type()); + rc = reipl_set_type(ipl_info.type); if (rc) return rc; return 0; @@ -931,7 +999,7 @@ static int __init dump_ccw_init(void) dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; - dump_capabilities |= IPL_TYPE_CCW; + dump_capabilities |= DUMP_TYPE_CCW; return 0; } @@ -956,7 +1024,7 @@ static int __init dump_fcp_init(void) dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP; dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP; - dump_capabilities |= IPL_TYPE_FCP; + dump_capabilities |= DUMP_TYPE_FCP; return 0; } @@ -995,7 +1063,7 @@ static int __init dump_init(void) rc = dump_fcp_init(); if (rc) return rc; - dump_set_type(IPL_TYPE_NONE); + dump_set_type(DUMP_TYPE_NONE); return 0; } @@ -1038,6 +1106,27 @@ static int __init s390_ipl_init(void) __initcall(s390_ipl_init); +void __init ipl_save_parameters(void) +{ + struct cio_iplinfo iplinfo; + unsigned int *ipl_ptr; + void *src, *dst; + + if (cio_get_iplinfo(&iplinfo)) + return; + + ipl_devno = iplinfo.devno; + ipl_flags |= IPL_DEVNO_VALID; + if (!iplinfo.is_qdio) + return; + ipl_flags |= IPL_PARMBLOCK_VALID; + ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; + src = (void *)(unsigned long)*ipl_ptr; + dst = (void *)IPL_PARMBLOCK_ORIGIN; + memmove(dst, src, PAGE_SIZE); + *ipl_ptr = IPL_PARMBLOCK_ORIGIN; +} + static LIST_HEAD(rcall); static DEFINE_MUTEX(rcall_mutex); diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 39d1dd75252..59b4e796680 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -31,6 +31,7 @@ #include <linux/string.h> #include <linux/kernel.h> #include <linux/moduleloader.h> +#include <linux/bug.h> #if 0 #define DEBUGP printk @@ -398,9 +399,10 @@ int module_finalize(const Elf_Ehdr *hdr, struct module *me) { vfree(me->arch.syminfo); - return 0; + return module_bug_finalize(hdr, sechdrs, me); } void module_arch_cleanup(struct module *mod) { + module_bug_cleanup(mod); } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 5acfac654f9..11d9b019762 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -280,24 +280,26 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, return 0; } -asmlinkage long sys_fork(struct pt_regs regs) +asmlinkage long sys_fork(void) { - return do_fork(SIGCHLD, regs.gprs[15], ®s, 0, NULL, NULL); + struct pt_regs *regs = task_pt_regs(current); + return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL); } -asmlinkage long sys_clone(struct pt_regs regs) +asmlinkage long sys_clone(void) { - unsigned long clone_flags; - unsigned long newsp; + struct pt_regs *regs = task_pt_regs(current); + unsigned long clone_flags; + unsigned long newsp; int __user *parent_tidptr, *child_tidptr; - clone_flags = regs.gprs[3]; - newsp = regs.orig_gpr2; - parent_tidptr = (int __user *) regs.gprs[4]; - child_tidptr = (int __user *) regs.gprs[5]; - if (!newsp) - newsp = regs.gprs[15]; - return do_fork(clone_flags, newsp, ®s, 0, + clone_flags = regs->gprs[3]; + newsp = regs->orig_gpr2; + parent_tidptr = (int __user *) regs->gprs[4]; + child_tidptr = (int __user *) regs->gprs[5]; + if (!newsp) + newsp = regs->gprs[15]; + return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); } @@ -311,40 +313,52 @@ asmlinkage long sys_clone(struct pt_regs regs) * do not have enough call-clobbered registers to hold all * the information you need. */ -asmlinkage long sys_vfork(struct pt_regs regs) +asmlinkage long sys_vfork(void) { + struct pt_regs *regs = task_pt_regs(current); return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, - regs.gprs[15], ®s, 0, NULL, NULL); + regs->gprs[15], regs, 0, NULL, NULL); +} + +asmlinkage void execve_tail(void) +{ + task_lock(current); + current->ptrace &= ~PT_DTRACE; + task_unlock(current); + current->thread.fp_regs.fpc = 0; + if (MACHINE_HAS_IEEE) + asm volatile("sfpc %0,%0" : : "d" (0)); } /* * sys_execve() executes a new program. */ -asmlinkage long sys_execve(struct pt_regs regs) +asmlinkage long sys_execve(void) { - int error; - char * filename; - - filename = getname((char __user *) regs.orig_gpr2); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - error = do_execve(filename, (char __user * __user *) regs.gprs[3], - (char __user * __user *) regs.gprs[4], ®s); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; - task_unlock(current); - current->thread.fp_regs.fpc = 0; - if (MACHINE_HAS_IEEE) - asm volatile("sfpc %0,%0" : : "d" (0)); + struct pt_regs *regs = task_pt_regs(current); + char *filename; + unsigned long result; + int rc; + + filename = getname((char __user *) regs->orig_gpr2); + if (IS_ERR(filename)) { + result = PTR_ERR(filename); + goto out; } - putname(filename); + rc = do_execve(filename, (char __user * __user *) regs->gprs[3], + (char __user * __user *) regs->gprs[4], regs); + if (rc) { + result = rc; + goto out_putname; + } + execve_tail(); + result = regs->gprs[2]; +out_putname: + putname(filename); out: - return error; + return result; } - /* * fill in the FPU structure for a core dump. */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 863c8d08c02..3dfd0985861 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -285,6 +285,26 @@ static void __init conmode_default(void) } } +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) +static void __init setup_zfcpdump(unsigned int console_devno) +{ + static char str[64]; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return; + if (console_devno != -1) + sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x", + ipl_info.data.fcp.dev_id.devno, console_devno); + else + sprintf(str, "cio_ignore=all,!0.0.%04x", + ipl_info.data.fcp.dev_id.devno); + strcat(COMMAND_LINE, str); + console_loglevel = 2; +} +#else +static inline void setup_zfcpdump(unsigned int console_devno) {} +#endif /* CONFIG_ZFCPDUMP */ + #ifdef CONFIG_SMP void (*_machine_restart)(char *command) = machine_restart_smp; void (*_machine_halt)(void) = machine_halt_smp; @@ -586,13 +606,20 @@ setup_resources(void) } } +unsigned long real_memory_size; +EXPORT_SYMBOL_GPL(real_memory_size); + static void __init setup_memory_end(void) { - unsigned long real_size, memory_size; + unsigned long memory_size; unsigned long max_mem, max_phys; int i; - memory_size = real_size = 0; +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) + if (ipl_info.type == IPL_TYPE_FCP_DUMP) + memory_end = ZFCPDUMP_HSA_SIZE; +#endif + memory_size = 0; max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE; memory_end &= PAGE_MASK; @@ -601,7 +628,8 @@ static void __init setup_memory_end(void) for (i = 0; i < MEMORY_CHUNKS; i++) { struct mem_chunk *chunk = &memory_chunk[i]; - real_size = max(real_size, chunk->addr + chunk->size); + real_memory_size = max(real_memory_size, + chunk->addr + chunk->size); if (chunk->addr >= max_mem) { memset(chunk, 0, sizeof(*chunk)); continue; @@ -765,6 +793,7 @@ setup_arch(char **cmdline_p) parse_early_param(); + setup_ipl_info(); setup_memory_end(); setup_addressing_mode(); setup_memory(); @@ -782,6 +811,9 @@ setup_arch(char **cmdline_p) /* Setup default console */ conmode_default(); + + /* Setup zfcpdump support */ + setup_zfcpdump(console_devno); } void print_cpu_info(struct cpuinfo_S390 *cpuinfo) diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 554f9cf7499..3c41907799a 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -102,9 +102,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, } asmlinkage long -sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, - struct pt_regs *regs) +sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) { + struct pt_regs *regs = task_pt_regs(current); return do_sigaltstack(uss, uoss, regs->gprs[15]); } @@ -163,8 +163,9 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) return 0; } -asmlinkage long sys_sigreturn(struct pt_regs *regs) +asmlinkage long sys_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); sigframe __user *frame = (sigframe __user *)regs->gprs[15]; sigset_t set; @@ -189,8 +190,9 @@ badframe: return 0; } -asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) +asmlinkage long sys_rt_sigreturn(void) { + struct pt_regs *regs = task_pt_regs(current); rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15]; sigset_t set; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 97764f710bb..3754e2031b3 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1,12 +1,12 @@ /* * arch/s390/kernel/smp.c * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright IBM Corp. 1999,2007 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), - * Martin Schwidefsky (schwidefsky@de.ibm.com) - * Heiko Carstens (heiko.carstens@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Heiko Carstens (heiko.carstens@de.ibm.com) * - * based on other smp stuff by + * based on other smp stuff by * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> * (c) 1998 Ingo Molnar * @@ -31,6 +31,7 @@ #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/timex.h> +#include <linux/bootmem.h> #include <asm/ipl.h> #include <asm/setup.h> #include <asm/sigp.h> @@ -40,17 +41,19 @@ #include <asm/cpcmd.h> #include <asm/tlbflush.h> #include <asm/timer.h> - -extern volatile int __cpu_logical_map[]; +#include <asm/lowcore.h> /* * An array with a pointer the lowcore of every CPU. */ - struct _lowcore *lowcore_ptr[NR_CPUS]; +EXPORT_SYMBOL(lowcore_ptr); cpumask_t cpu_online_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_online_map); + cpumask_t cpu_possible_map = CPU_MASK_NONE; +EXPORT_SYMBOL(cpu_possible_map); static struct task_struct *current_set[NR_CPUS]; @@ -70,7 +73,7 @@ struct call_data_struct { int wait; }; -static struct call_data_struct * call_data; +static struct call_data_struct *call_data; /* * 'Call function' interrupt callback @@ -150,8 +153,8 @@ out: * * Run a function on all other CPUs. * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. You may call it from a bottom half. + * You must not call this function with disabled interrupts, from a + * hardware interrupt handler or from a bottom half. */ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int wait) @@ -177,11 +180,11 @@ EXPORT_SYMBOL(smp_call_function); * * Run a function on one processor. * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler. You may call it from a bottom half. + * You must not call this function with disabled interrupts, from a + * hardware interrupt handler or from a bottom half. */ int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, - int wait, int cpu) + int wait, int cpu) { cpumask_t map = CPU_MASK_NONE; @@ -195,9 +198,9 @@ EXPORT_SYMBOL(smp_call_function_on); static void do_send_stop(void) { - int cpu, rc; + int cpu, rc; - /* stop all processors */ + /* stop all processors */ for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; @@ -209,9 +212,9 @@ static void do_send_stop(void) static void do_store_status(void) { - int cpu, rc; + int cpu, rc; - /* store status of all processors in their lowcores (real 0) */ + /* store status of all processors in their lowcores (real 0) */ for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; @@ -219,8 +222,8 @@ static void do_store_status(void) rc = signal_processor_p( (__u32)(unsigned long) lowcore_ptr[cpu], cpu, sigp_store_status_at_address); - } while(rc == sigp_busy); - } + } while (rc == sigp_busy); + } } static void do_wait_for_stop(void) @@ -231,7 +234,7 @@ static void do_wait_for_stop(void) for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; - while(!smp_cpu_not_running(cpu)) + while (!smp_cpu_not_running(cpu)) cpu_relax(); } } @@ -245,7 +248,7 @@ void smp_send_stop(void) /* Disable all interrupts/machine checks */ __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); - /* write magic number to zero page (absolute 0) */ + /* write magic number to zero page (absolute 0) */ lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; /* stop other processors. */ @@ -261,8 +264,7 @@ void smp_send_stop(void) /* * Reboot, halt and power_off routines for SMP. */ - -void machine_restart_smp(char * __unused) +void machine_restart_smp(char *__unused) { smp_send_stop(); do_reipl(); @@ -293,17 +295,17 @@ void machine_power_off_smp(void) static void do_ext_call_interrupt(__u16 code) { - unsigned long bits; + unsigned long bits; - /* - * handle bit signal external calls - * - * For the ec_schedule signal we have to do nothing. All the work - * is done automatically when we return from the interrupt. - */ + /* + * handle bit signal external calls + * + * For the ec_schedule signal we have to do nothing. All the work + * is done automatically when we return from the interrupt. + */ bits = xchg(&S390_lowcore.ext_call_fast, 0); - if (test_bit(ec_call_function, &bits)) + if (test_bit(ec_call_function, &bits)) do_call_function(); } @@ -313,11 +315,11 @@ static void do_ext_call_interrupt(__u16 code) */ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) { - /* - * Set signaling bit in lowcore of target cpu and kick it - */ + /* + * Set signaling bit in lowcore of target cpu and kick it + */ set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); - while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) + while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) udelay(10); } @@ -332,7 +334,7 @@ void smp_ptlb_callback(void *info) void smp_ptlb_all(void) { - on_each_cpu(smp_ptlb_callback, NULL, 0, 1); + on_each_cpu(smp_ptlb_callback, NULL, 0, 1); } EXPORT_SYMBOL(smp_ptlb_all); #endif /* ! CONFIG_64BIT */ @@ -344,7 +346,7 @@ EXPORT_SYMBOL(smp_ptlb_all); */ void smp_send_reschedule(int cpu) { - smp_ext_bitcall(cpu, ec_schedule); + smp_ext_bitcall(cpu, ec_schedule); } /* @@ -358,11 +360,12 @@ struct ec_creg_mask_parms { /* * callback for setting/clearing control bits */ -static void smp_ctl_bit_callback(void *info) { +static void smp_ctl_bit_callback(void *info) +{ struct ec_creg_mask_parms *pp = info; unsigned long cregs[16]; int i; - + __ctl_store(cregs, 0, 15); for (i = 0; i <= 15; i++) cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; @@ -381,6 +384,7 @@ void smp_ctl_set_bit(int cr, int bit) parms.orvals[cr] = 1 << bit; on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +EXPORT_SYMBOL(smp_ctl_set_bit); /* * Clear a bit in a control register of all cpus @@ -394,13 +398,72 @@ void smp_ctl_clear_bit(int cr, int bit) parms.andvals[cr] = ~(1L << bit); on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } +EXPORT_SYMBOL(smp_ctl_clear_bit); + +#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) + +/* + * zfcpdump_prefix_array holds prefix registers for the following scenario: + * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to + * save its prefix registers, since they get lost, when switching from 31 bit + * to 64 bit. + */ +unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ + __attribute__((__section__(".data"))); + +static void __init smp_get_save_areas(void) +{ + unsigned int cpu, cpu_num, rc; + __u16 boot_cpu_addr; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return; + boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; + cpu_num = 1; + for (cpu = 0; cpu <= 65535; cpu++) { + if ((u16) cpu == boot_cpu_addr) + continue; + __cpu_logical_map[1] = (__u16) cpu; + if (signal_processor(1, sigp_sense) == sigp_not_operational) + continue; + if (cpu_num >= NR_CPUS) { + printk("WARNING: Registers for cpu %i are not " + "saved, since dump kernel was compiled with" + "NR_CPUS=%i!\n", cpu_num, NR_CPUS); + continue; + } + zfcpdump_save_areas[cpu_num] = + alloc_bootmem(sizeof(union save_area)); + while (1) { + rc = signal_processor(1, sigp_stop_and_store_status); + if (rc != sigp_busy) + break; + cpu_relax(); + } + memcpy(zfcpdump_save_areas[cpu_num], + (void *)(unsigned long) store_prefix() + + SAVE_AREA_BASE, SAVE_AREA_SIZE); +#ifdef __s390x__ + /* copy original prefix register */ + zfcpdump_save_areas[cpu_num]->s390x.pref_reg = + zfcpdump_prefix_array[cpu_num]; +#endif + cpu_num++; + } +} + +union save_area *zfcpdump_save_areas[NR_CPUS + 1]; +EXPORT_SYMBOL_GPL(zfcpdump_save_areas); + +#else +#define smp_get_save_areas() do { } while (0) +#endif /* * Lets check how many CPUs we have. */ -static unsigned int -__init smp_count_cpus(void) +static unsigned int __init smp_count_cpus(void) { unsigned int cpu, num_cpus; __u16 boot_cpu_addr; @@ -416,31 +479,30 @@ __init smp_count_cpus(void) if ((__u16) cpu == boot_cpu_addr) continue; __cpu_logical_map[1] = (__u16) cpu; - if (signal_processor(1, sigp_sense) == - sigp_not_operational) + if (signal_processor(1, sigp_sense) == sigp_not_operational) continue; num_cpus++; } - printk("Detected %d CPU's\n",(int) num_cpus); + printk("Detected %d CPU's\n", (int) num_cpus); printk("Boot cpu address %2X\n", boot_cpu_addr); return num_cpus; } /* - * Activate a secondary processor. + * Activate a secondary processor. */ int __devinit start_secondary(void *cpuvoid) { - /* Setup the cpu */ - cpu_init(); + /* Setup the cpu */ + cpu_init(); preempt_disable(); /* Enable TOD clock interrupts on the secondary cpu. */ - init_cpu_timer(); + init_cpu_timer(); #ifdef CONFIG_VIRT_TIMER /* Enable cpu timer interrupts on the secondary cpu. */ - init_cpu_vtimer(); + init_cpu_vtimer(); #endif /* Enable pfault pseudo page faults on this cpu. */ pfault_init(); @@ -449,11 +511,11 @@ int __devinit start_secondary(void *cpuvoid) cpu_set(smp_processor_id(), cpu_online_map); /* Switch on interrupts */ local_irq_enable(); - /* Print info about this processor */ - print_cpu_info(&S390_lowcore.cpu_data); - /* cpu_idle will call schedule for us */ - cpu_idle(); - return 0; + /* Print info about this processor */ + print_cpu_info(&S390_lowcore.cpu_data); + /* cpu_idle will call schedule for us */ + cpu_idle(); + return 0; } static void __init smp_create_idle(unsigned int cpu) @@ -470,56 +532,13 @@ static void __init smp_create_idle(unsigned int cpu) current_set[cpu] = p; } -/* Reserving and releasing of CPUs */ - -static DEFINE_SPINLOCK(smp_reserve_lock); -static int smp_cpu_reserved[NR_CPUS]; - -int -smp_get_cpu(cpumask_t cpu_mask) -{ - unsigned long flags; - int cpu; - - spin_lock_irqsave(&smp_reserve_lock, flags); - /* Try to find an already reserved cpu. */ - for_each_cpu_mask(cpu, cpu_mask) { - if (smp_cpu_reserved[cpu] != 0) { - smp_cpu_reserved[cpu]++; - /* Found one. */ - goto out; - } - } - /* Reserve a new cpu from cpu_mask. */ - for_each_cpu_mask(cpu, cpu_mask) { - if (cpu_online(cpu)) { - smp_cpu_reserved[cpu]++; - goto out; - } - } - cpu = -ENODEV; -out: - spin_unlock_irqrestore(&smp_reserve_lock, flags); - return cpu; -} - -void -smp_put_cpu(int cpu) -{ - unsigned long flags; - - spin_lock_irqsave(&smp_reserve_lock, flags); - smp_cpu_reserved[cpu]--; - spin_unlock_irqrestore(&smp_reserve_lock, flags); -} - -static int -cpu_stopped(int cpu) +static int cpu_stopped(int cpu) { __u32 status; /* Check for stopped state */ - if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { + if (signal_processor_ps(&status, 0, cpu, sigp_sense) == + sigp_status_stored) { if (status & 0x40) return 1; } @@ -528,14 +547,13 @@ cpu_stopped(int cpu) /* Upping and downing of CPUs */ -int -__cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu) { struct task_struct *idle; - struct _lowcore *cpu_lowcore; + struct _lowcore *cpu_lowcore; struct stack_frame *sf; - sigp_ccode ccode; - int curr_cpu; + sigp_ccode ccode; + int curr_cpu; for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { __cpu_logical_map[cpu] = (__u16) curr_cpu; @@ -548,7 +566,7 @@ __cpu_up(unsigned int cpu) ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), cpu, sigp_set_prefix); - if (ccode){ + if (ccode) { printk("sigp_set_prefix failed for cpu %d " "with condition code %d\n", (int) cpu, (int) ccode); @@ -556,9 +574,9 @@ __cpu_up(unsigned int cpu) } idle = current_set[cpu]; - cpu_lowcore = lowcore_ptr[cpu]; + cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore->kernel_stack = (unsigned long) - task_stack_page(idle) + (THREAD_SIZE); + task_stack_page(idle) + THREAD_SIZE; sf = (struct stack_frame *) (cpu_lowcore->kernel_stack - sizeof(struct pt_regs) - sizeof(struct stack_frame)); @@ -570,11 +588,11 @@ __cpu_up(unsigned int cpu) " stam 0,15,0(%0)" : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; - cpu_lowcore->current_task = (unsigned long) idle; - cpu_lowcore->cpu_data.cpu_nr = cpu; + cpu_lowcore->current_task = (unsigned long) idle; + cpu_lowcore->cpu_data.cpu_nr = cpu; eieio(); - while (signal_processor(cpu,sigp_restart) == sigp_busy) + while (signal_processor(cpu, sigp_restart) == sigp_busy) udelay(10); while (!cpu_online(cpu)) @@ -589,6 +607,7 @@ void __init smp_setup_cpu_possible_map(void) { unsigned int phy_cpus, pos_cpus, cpu; + smp_get_save_areas(); phy_cpus = smp_count_cpus(); pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); @@ -620,18 +639,11 @@ static int __init setup_possible_cpus(char *s) } early_param("possible_cpus", setup_possible_cpus); -int -__cpu_disable(void) +int __cpu_disable(void) { - unsigned long flags; struct ec_creg_mask_parms cr_parms; int cpu = smp_processor_id(); - spin_lock_irqsave(&smp_reserve_lock, flags); - if (smp_cpu_reserved[cpu] != 0) { - spin_unlock_irqrestore(&smp_reserve_lock, flags); - return -EBUSY; - } cpu_clear(cpu, cpu_online_map); /* Disable pfault pseudo page faults on this cpu. */ @@ -642,24 +654,23 @@ __cpu_disable(void) /* disable all external interrupts */ cr_parms.orvals[0] = 0; - cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | - 1<<11 | 1<<10 | 1<< 6 | 1<< 4); + cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | + 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); /* disable all I/O interrupts */ cr_parms.orvals[6] = 0; - cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | - 1<<27 | 1<<26 | 1<<25 | 1<<24); + cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | + 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); /* disable most machine checks */ cr_parms.orvals[14] = 0; - cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); + cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | + 1 << 25 | 1 << 24); smp_ctl_bit_callback(&cr_parms); - spin_unlock_irqrestore(&smp_reserve_lock, flags); return 0; } -void -__cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { /* Wait until target cpu is down */ while (!smp_cpu_not_running(cpu)) @@ -667,13 +678,12 @@ __cpu_die(unsigned int cpu) printk("Processor %d spun down\n", cpu); } -void -cpu_die(void) +void cpu_die(void) { idle_task_exit(); signal_processor(smp_processor_id(), sigp_stop); BUG(); - for(;;); + for (;;); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -686,36 +696,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned long stack; unsigned int cpu; - int i; - - /* request the 0x1201 emergency signal external interrupt */ - if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) - panic("Couldn't request external interrupt 0x1201"); - memset(lowcore_ptr,0,sizeof(lowcore_ptr)); - /* - * Initialize prefix pages and stacks for all possible cpus - */ + int i; + + /* request the 0x1201 emergency signal external interrupt */ + if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) + panic("Couldn't request external interrupt 0x1201"); + memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); + /* + * Initialize prefix pages and stacks for all possible cpus + */ print_cpu_info(&S390_lowcore.cpu_data); - for_each_possible_cpu(i) { + for_each_possible_cpu(i) { lowcore_ptr[i] = (struct _lowcore *) - __get_free_pages(GFP_KERNEL|GFP_DMA, - sizeof(void*) == 8 ? 1 : 0); - stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); - if (lowcore_ptr[i] == NULL || stack == 0ULL) + __get_free_pages(GFP_KERNEL | GFP_DMA, + sizeof(void*) == 8 ? 1 : 0); + stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); + if (!lowcore_ptr[i] || !stack) panic("smp_boot_cpus failed to allocate memory\n"); *(lowcore_ptr[i]) = S390_lowcore; - lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); - stack = __get_free_pages(GFP_KERNEL,0); - if (stack == 0ULL) + lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; + stack = __get_free_pages(GFP_KERNEL, 0); + if (!stack) panic("smp_boot_cpus failed to allocate memory\n"); - lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); + lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lowcore_ptr[i]->extended_save_area_addr = - (__u32) __get_free_pages(GFP_KERNEL,0); - if (lowcore_ptr[i]->extended_save_area_addr == 0) + (__u32) __get_free_pages(GFP_KERNEL, 0); + if (!lowcore_ptr[i]->extended_save_area_addr) panic("smp_boot_cpus failed to " "allocate memory\n"); } @@ -754,34 +764,63 @@ void smp_cpus_done(unsigned int max_cpus) */ int setup_profiling_timer(unsigned int multiplier) { - return 0; + return 0; } static DEFINE_PER_CPU(struct cpu, cpu_devices); +static ssize_t show_capability(struct sys_device *dev, char *buf) +{ + unsigned int capability; + int rc; + + rc = get_cpu_capability(&capability); + if (rc) + return rc; + return sprintf(buf, "%u\n", capability); +} +static SYSDEV_ATTR(capability, 0444, show_capability, NULL); + +static int __cpuinit smp_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)(long)hcpu; + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; + + switch (action) { + case CPU_ONLINE: + if (sysdev_create_file(s, &attr_capability)) + return NOTIFY_BAD; + break; + case CPU_DEAD: + sysdev_remove_file(s, &attr_capability); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata smp_cpu_nb = { + .notifier_call = smp_cpu_notify, +}; + static int __init topology_init(void) { int cpu; - int ret; + + register_cpu_notifier(&smp_cpu_nb); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; c->hotpluggable = 1; - ret = register_cpu(c, cpu); - if (ret) - printk(KERN_WARNING "topology_init: register_cpu %d " - "failed (%d)\n", cpu, ret); + register_cpu(c, cpu); + if (!cpu_online(cpu)) + continue; + s = &c->sysdev; + sysdev_create_file(s, &attr_capability); } return 0; } - subsys_initcall(topology_init); - -EXPORT_SYMBOL(cpu_online_map); -EXPORT_SYMBOL(cpu_possible_map); -EXPORT_SYMBOL(lowcore_ptr); -EXPORT_SYMBOL(smp_ctl_set_bit); -EXPORT_SYMBOL(smp_ctl_clear_bit); -EXPORT_SYMBOL(smp_get_cpu); -EXPORT_SYMBOL(smp_put_cpu); diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 584ed95f338..3a77c22cda7 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -266,23 +266,3 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args) return -EFAULT; return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); } - -/* - * Do a system call from kernel instead of calling sys_execve so we - * end up with proper pt_regs. - */ -int kernel_execve(const char *filename, char *const argv[], char *const envp[]) -{ - register const char *__arg1 asm("2") = filename; - register char *const*__arg2 asm("3") = argv; - register char *const*__arg3 asm("4") = envp; - register long __svcres asm("2"); - asm volatile( - "svc %b1" - : "=d" (__svcres) - : "i" (__NR_execve), - "0" (__arg1), - "d" (__arg2), - "d" (__arg3) : "memory"); - return __svcres; -} diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index c774f1069e1..cd8d321cd0c 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -10,7 +10,7 @@ NI_SYSCALL /* 0 */ SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper) -SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue) +SYSCALL(sys_fork,sys_fork,sys_fork) SYSCALL(sys_read,sys_read,sys32_read_wrapper) SYSCALL(sys_write,sys_write,sys32_write_wrapper) SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */ @@ -19,7 +19,7 @@ SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall) SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper) SYSCALL(sys_link,sys_link,sys32_link_wrapper) SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */ -SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue) +SYSCALL(sys_execve,sys_execve,sys32_execve) SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper) SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */ SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper) @@ -127,8 +127,8 @@ SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */ SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper) SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper) SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper) -SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue) -SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */ +SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn) +SYSCALL(sys_clone,sys_clone,sys32_clone) /* 120 */ SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper) NI_SYSCALL /* modify_ldt for i386 */ @@ -181,7 +181,7 @@ SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper) SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */ SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */ SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper) -SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue) +SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn) SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper) SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */ SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper) @@ -194,11 +194,11 @@ SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper) SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper) SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */ -SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue) +SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack) SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper) NI_SYSCALL /* streams1 */ NI_SYSCALL /* streams2 */ -SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue) /* 190 */ +SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */ SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper) SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper) SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index e1ad464b6f2..711dae8da7a 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -280,7 +280,6 @@ static void clock_comparator_interrupt(__u16 code) } static void etr_reset(void); -static void etr_init(void); static void etr_ext_handler(__u16); /* @@ -355,7 +354,6 @@ void __init time_init(void) #ifdef CONFIG_VIRT_TIMER vtime_init(); #endif - etr_init(); } /* @@ -426,11 +424,11 @@ static struct etr_aib etr_port1; static int etr_port1_uptodate; static unsigned long etr_events; static struct timer_list etr_timer; -static struct tasklet_struct etr_tasklet; static DEFINE_PER_CPU(atomic_t, etr_sync_word); static void etr_timeout(unsigned long dummy); -static void etr_tasklet_fn(unsigned long dummy); +static void etr_work_fn(struct work_struct *work); +static DECLARE_WORK(etr_work, etr_work_fn); /* * The etr get_clock function. It will write the current clock value @@ -507,29 +505,31 @@ static void etr_reset(void) } } -static void etr_init(void) +static int __init etr_init(void) { struct etr_aib aib; if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) - return; + return 0; /* Check if this machine has the steai instruction. */ if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) set_bit(ETR_FLAG_STEAI, &etr_flags); setup_timer(&etr_timer, etr_timeout, 0UL); - tasklet_init(&etr_tasklet, etr_tasklet_fn, 0); if (!etr_port0_online && !etr_port1_online) set_bit(ETR_FLAG_EACCES, &etr_flags); if (etr_port0_online) { set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } if (etr_port1_online) { set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } + return 0; } +arch_initcall(etr_init); + /* * Two sorts of ETR machine checks. The architecture reads: * "When a machine-check niterruption occurs and if a switch-to-local or @@ -549,7 +549,7 @@ void etr_switch_to_local(void) return; etr_disable_sync_clock(NULL); set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } /* @@ -564,7 +564,7 @@ void etr_sync_check(void) return; etr_disable_sync_clock(NULL); set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } /* @@ -591,13 +591,13 @@ static void etr_ext_handler(__u16 code) * Both ports are not up-to-date now. */ set_bit(ETR_EVENT_PORT_ALERT, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } static void etr_timeout(unsigned long dummy) { set_bit(ETR_EVENT_UPDATE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } /* @@ -927,7 +927,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib, if (!eacr.e0 && !eacr.e1) return eacr; - /* Update port0 or port1 with aib stored in etr_tasklet_fn. */ + /* Update port0 or port1 with aib stored in etr_work_fn. */ if (aib->esw.q == 0) { /* Information for port 0 stored. */ if (eacr.p0 && !etr_port0_uptodate) { @@ -1007,7 +1007,7 @@ static void etr_update_eacr(struct etr_eacr eacr) * particular this is the only function that calls etr_update_eacr(), * it "controls" the etr control register. */ -static void etr_tasklet_fn(unsigned long dummy) +static void etr_work_fn(struct work_struct *work) { unsigned long long now; struct etr_eacr eacr; @@ -1220,13 +1220,13 @@ static ssize_t etr_online_store(struct sys_device *dev, return count; /* Nothing to do. */ etr_port0_online = value; set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } else { if (etr_port1_online == value) return count; /* Nothing to do. */ etr_port1_online = value; set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); - tasklet_hi_schedule(&etr_tasklet); + schedule_work(&etr_work); } return count; } diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index f0e5a320e2e..49dec830373 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -30,7 +30,7 @@ #include <linux/kallsyms.h> #include <linux/reboot.h> #include <linux/kprobes.h> - +#include <linux/bug.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -188,18 +188,31 @@ void dump_stack(void) EXPORT_SYMBOL(dump_stack); +static inline int mask_bits(struct pt_regs *regs, unsigned long bits) +{ + return (regs->psw.mask & bits) / ((~bits + 1) & bits); +} + void show_registers(struct pt_regs *regs) { - mm_segment_t old_fs; char *mode; - int i; mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl"; printk("%s PSW : %p %p", mode, (void *) regs->psw.mask, (void *) regs->psw.addr); print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN); - printk("%s GPRS: " FOURLONG, mode, + printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " + "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), + mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), + mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY), + mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), + mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), + mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); +#ifdef CONFIG_64BIT + printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS)); +#endif + printk("\n%s GPRS: " FOURLONG, mode, regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); printk(" " FOURLONG, regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); @@ -208,41 +221,7 @@ void show_registers(struct pt_regs *regs) printk(" " FOURLONG, regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); -#if 0 - /* FIXME: this isn't needed any more but it changes the ksymoops - * input. To remove or not to remove ... */ - save_access_regs(regs->acrs); - printk("%s ACRS: %08x %08x %08x %08x\n", mode, - regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]); - printk(" %08x %08x %08x %08x\n", - regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]); -#endif - - /* - * Print the first 20 byte of the instruction stream at the - * time of the fault. - */ - old_fs = get_fs(); - if (regs->psw.mask & PSW_MASK_PSTATE) - set_fs(USER_DS); - else - set_fs(KERNEL_DS); - printk("%s Code: ", mode); - for (i = 0; i < 20; i++) { - unsigned char c; - if (__get_user(c, (char __user *)(regs->psw.addr + i))) { - printk(" Bad PSW."); - break; - } - printk("%02x ", c); - } - set_fs(old_fs); - - printk("\n"); + show_code(regs); } /* This is called from fs/proc/array.c */ @@ -318,6 +297,11 @@ report_user_fault(long interruption_code, struct pt_regs *regs) #endif } +int is_valid_bugaddr(unsigned long addr) +{ + return 1; +} + static void __kprobes inline do_trap(long interruption_code, int signr, char *str, struct pt_regs *regs, siginfo_t *info) @@ -344,8 +328,14 @@ static void __kprobes inline do_trap(long interruption_code, int signr, fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; - else - die(str, regs, interruption_code); + else { + enum bug_trap_type btt; + + btt = report_bug(regs->psw.addr & PSW_ADDR_INSN); + if (btt == BUG_TRAP_TYPE_WARN) + return; + die(str, regs, interruption_code); + } } } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index c30716ae130..418f6426a94 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -45,6 +45,8 @@ SECTIONS __ex_table : { *(__ex_table) } __stop___ex_table = .; + BUG_TABLE + .data : { /* Data */ *(.data) CONSTRUCTORS @@ -77,6 +79,12 @@ SECTIONS *(.init.text) _einittext = .; } + /* + * .exit.text is discarded at runtime, not link time, + * to deal with references from __bug_table + */ + .exit.text : { *(.exit.text) } + .init.data : { *(.init.data) } . = ALIGN(256); __setup_start = .; @@ -116,7 +124,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) *(.exit.data) *(.exitcall.exit) + *(.exit.data) *(.exitcall.exit) } /* Stabs debugging sections. */ diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 9d5b02801b4..1e1a6ee2cac 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -128,7 +128,7 @@ static inline void set_vtimer(__u64 expires) S390_lowcore.last_update_timer = expires; /* store expire time for this CPU timer */ - per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; + __get_cpu_var(virt_cpu_timer).to_expire = expires; } #else static inline void set_vtimer(__u64 expires) @@ -137,7 +137,7 @@ static inline void set_vtimer(__u64 expires) asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); /* store expire time for this CPU timer */ - per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; + __get_cpu_var(virt_cpu_timer).to_expire = expires; } #endif @@ -145,7 +145,7 @@ static void start_cpu_timer(void) { struct vtimer_queue *vt_list; - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); /* CPU timer interrupt is pending, don't reprogramm it */ if (vt_list->idle & 1LL<<63) @@ -159,7 +159,7 @@ static void stop_cpu_timer(void) { struct vtimer_queue *vt_list; - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); /* nothing to do */ if (list_empty(&vt_list->list)) { @@ -219,7 +219,7 @@ static void do_callbacks(struct list_head *cb_list) if (list_empty(cb_list)) return; - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); list_for_each_entry_safe(event, tmp, cb_list, entry) { fn = event->function; @@ -244,7 +244,6 @@ static void do_callbacks(struct list_head *cb_list) */ static void do_cpu_timer_interrupt(__u16 error_code) { - int cpu; __u64 next, delta; struct vtimer_queue *vt_list; struct vtimer_list *event, *tmp; @@ -253,8 +252,7 @@ static void do_cpu_timer_interrupt(__u16 error_code) struct list_head cb_list; INIT_LIST_HEAD(&cb_list); - cpu = smp_processor_id(); - vt_list = &per_cpu(virt_cpu_timer, cpu); + vt_list = &__get_cpu_var(virt_cpu_timer); /* walk timer list, fire all expired events */ spin_lock(&vt_list->lock); @@ -534,7 +532,7 @@ void init_cpu_vtimer(void) /* enable cpu timer interrupts */ __ctl_set_bit(0,10); - vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + vt_list = &__get_cpu_var(virt_cpu_timer); INIT_LIST_HEAD(&vt_list->list); spin_lock_init(&vt_list->lock); vt_list->to_expire = 0; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 7462aebd3eb..2b76a879a7b 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -26,9 +26,9 @@ #include <linux/module.h> #include <linux/hardirq.h> #include <linux/kprobes.h> +#include <linux/uaccess.h> #include <asm/system.h> -#include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/kdebug.h> #include <asm/s390_ext.h> @@ -63,21 +63,25 @@ int unregister_page_fault_notifier(struct notifier_block *nb) return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); } -static inline int notify_page_fault(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) +static int __kprobes __notify_page_fault(struct pt_regs *regs, long err) { - struct die_args args = { - .regs = regs, - .str = str, - .err = err, - .trapnr = trap, - .signr = sig - }; - return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); + struct die_args args = { .str = "page fault", + .trapnr = 14, + .signr = SIGSEGV }; + args.regs = regs; + args.err = err; + return atomic_notifier_call_chain(¬ify_page_fault_chain, + DIE_PAGE_FAULT, &args); +} + +static inline int notify_page_fault(struct pt_regs *regs, long err) +{ + if (unlikely(kprobe_running())) + return __notify_page_fault(regs, err); + return NOTIFY_DONE; } #else -static inline int notify_page_fault(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) +static inline int notify_page_fault(struct pt_regs *regs, long err) { return NOTIFY_DONE; } @@ -170,74 +174,127 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, force_sig_info(SIGSEGV, &si, current); } +static void do_no_context(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + const struct exception_table_entry *fixup; + + /* Are we prepared to handle this kernel fault? */ + fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); + if (fixup) { + regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; + return; + } + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + if (check_space(current) == 0) + printk(KERN_ALERT "Unable to handle kernel pointer dereference" + " at virtual kernel address %p\n", (void *)address); + else + printk(KERN_ALERT "Unable to handle kernel paging request" + " at virtual user address %p\n", (void *)address); + + die("Oops", regs, error_code); + do_exit(SIGKILL); +} + +static void do_low_address(struct pt_regs *regs, unsigned long error_code) +{ + /* Low-address protection hit in kernel mode means + NULL pointer write access in kernel mode. */ + if (regs->psw.mask & PSW_MASK_PSTATE) { + /* Low-address protection hit in user mode 'cannot happen'. */ + die ("Low-address protection", regs, error_code); + do_exit(SIGKILL); + } + + do_no_context(regs, error_code, 0); +} + +/* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + + up_read(&mm->mmap_sem); + if (is_init(tsk)) { + yield(); + down_read(&mm->mmap_sem); + return 1; + } + printk("VM: killing process %s\n", tsk->comm); + if (regs->psw.mask & PSW_MASK_PSTATE) + do_exit(SIGKILL); + do_no_context(regs, error_code, address); + return 0; +} + +static void do_sigbus(struct pt_regs *regs, unsigned long error_code, + unsigned long address) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + + up_read(&mm->mmap_sem); + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + tsk->thread.prot_addr = address; + tsk->thread.trap_no = error_code; + force_sig(SIGBUS, tsk); + + /* Kernel mode? Handle exceptions or die */ + if (!(regs->psw.mask & PSW_MASK_PSTATE)) + do_no_context(regs, error_code, address); +} + #ifdef CONFIG_S390_EXEC_PROTECT extern long sys_sigreturn(struct pt_regs *regs); extern long sys_rt_sigreturn(struct pt_regs *regs); extern long sys32_sigreturn(struct pt_regs *regs); extern long sys32_rt_sigreturn(struct pt_regs *regs); -static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs, - int rt) +static int signal_return(struct mm_struct *mm, struct pt_regs *regs, + unsigned long address, unsigned long error_code) { + u16 instruction; + int rc, compat; + + pagefault_disable(); + rc = __get_user(instruction, (u16 __user *) regs->psw.addr); + pagefault_enable(); + if (rc) + return -EFAULT; + up_read(&mm->mmap_sem); clear_tsk_thread_flag(current, TIF_SINGLE_STEP); #ifdef CONFIG_COMPAT - if (test_tsk_thread_flag(current, TIF_31BIT)) { - if (rt) - sys32_rt_sigreturn(regs); - else - sys32_sigreturn(regs); - return; - } -#endif /* CONFIG_COMPAT */ - if (rt) - sys_rt_sigreturn(regs); + compat = test_tsk_thread_flag(current, TIF_31BIT); + if (compat && instruction == 0x0a77) + sys32_sigreturn(regs); + else if (compat && instruction == 0x0aad) + sys32_rt_sigreturn(regs); else +#endif + if (instruction == 0x0a77) sys_sigreturn(regs); - return; -} - -static int signal_return(struct mm_struct *mm, struct pt_regs *regs, - unsigned long address, unsigned long error_code) -{ - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - u16 *instruction; - unsigned long pfn, uaddr = regs->psw.addr; - - spin_lock(&mm->page_table_lock); - pgd = pgd_offset(mm, uaddr); - if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - goto out_fault; - pmd = pmd_offset(pgd, uaddr); - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - goto out_fault; - pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr); - if (!pte || !pte_present(*pte)) - goto out_fault; - pfn = pte_pfn(*pte); - if (!pfn_valid(pfn)) - goto out_fault; - spin_unlock(&mm->page_table_lock); - - instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1))); - if (*instruction == 0x0a77) - do_sigreturn(mm, regs, 0); - else if (*instruction == 0x0aad) - do_sigreturn(mm, regs, 1); + else if (instruction == 0x0aad) + sys_rt_sigreturn(regs); else { - printk("- XXX - do_exception: task = %s, primary, NO EXEC " - "-> SIGSEGV\n", current->comm); - up_read(&mm->mmap_sem); current->thread.prot_addr = address; current->thread.trap_no = error_code; do_sigsegv(regs, error_code, SEGV_MAPERR, address); } return 0; -out_fault: - spin_unlock(&mm->page_table_lock); - return -EFAULT; } #endif /* CONFIG_S390_EXEC_PROTECT */ @@ -253,49 +310,23 @@ out_fault: * 3b Region third trans. -> Not present (nullification) */ static inline void -do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) +do_exception(struct pt_regs *regs, unsigned long error_code, int write) { - struct task_struct *tsk; - struct mm_struct *mm; - struct vm_area_struct * vma; - unsigned long address; - const struct exception_table_entry *fixup; - int si_code; + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct *vma; + unsigned long address; int space; + int si_code; - tsk = current; - mm = tsk->mm; - - if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, - SIGSEGV) == NOTIFY_STOP) + if (notify_page_fault(regs, error_code) == NOTIFY_STOP) return; - /* - * Check for low-address protection. This needs to be treated - * as a special case because the translation exception code - * field is not guaranteed to contain valid data in this case. - */ - if (is_protection && !(S390_lowcore.trans_exc_code & 4)) { - - /* Low-address protection hit in kernel mode means - NULL pointer write access in kernel mode. */ - if (!(regs->psw.mask & PSW_MASK_PSTATE)) { - address = 0; - space = 0; - goto no_context; - } - - /* Low-address protection hit in user mode 'cannot happen'. */ - die ("Low-address protection", regs, error_code); - do_exit(SIGKILL); - } + tsk = current; + mm = tsk->mm; - /* - * get the failing address - * more specific the segment and page table portion of - * the address - */ - address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; + /* get the failing address and the affected space */ + address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; space = check_space(tsk); /* @@ -313,7 +344,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) */ local_irq_enable(); - down_read(&mm->mmap_sem); + down_read(&mm->mmap_sem); si_code = SEGV_MAPERR; vma = find_vma(mm, address); @@ -330,19 +361,19 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) return; #endif - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (expand_stack(vma, address)) + goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: si_code = SEGV_ACCERR; - if (!is_protection) { + if (!write) { /* page not present, check vm flags */ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) goto bad_area; @@ -357,7 +388,7 @@ survive: * make sure we exit gracefully rather than endlessly redo * the fault. */ - switch (handle_mm_fault(mm, vma, address, is_protection)) { + switch (handle_mm_fault(mm, vma, address, write)) { case VM_FAULT_MINOR: tsk->min_flt++; break; @@ -365,9 +396,12 @@ survive: tsk->maj_flt++; break; case VM_FAULT_SIGBUS: - goto do_sigbus; + do_sigbus(regs, error_code, address); + return; case VM_FAULT_OOM: - goto out_of_memory; + if (do_out_of_memory(regs, error_code, address)) + goto survive; + return; default: BUG(); } @@ -385,75 +419,34 @@ survive: * Fix it, but check if it's kernel or user first.. */ bad_area: - up_read(&mm->mmap_sem); + up_read(&mm->mmap_sem); - /* User mode accesses just cause a SIGSEGV */ - if (regs->psw.mask & PSW_MASK_PSTATE) { - tsk->thread.prot_addr = address; - tsk->thread.trap_no = error_code; + /* User mode accesses just cause a SIGSEGV */ + if (regs->psw.mask & PSW_MASK_PSTATE) { + tsk->thread.prot_addr = address; + tsk->thread.trap_no = error_code; do_sigsegv(regs, error_code, si_code, address); - return; + return; } no_context: - /* Are we prepared to handle this kernel fault? */ - fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); - if (fixup) { - regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; - return; - } - -/* - * Oops. The kernel tried to access some bad page. We'll have to - * terminate things with extreme prejudice. - */ - if (space == 0) - printk(KERN_ALERT "Unable to handle kernel pointer dereference" - " at virtual kernel address %p\n", (void *)address); - else - printk(KERN_ALERT "Unable to handle kernel paging request" - " at virtual user address %p\n", (void *)address); - - die("Oops", regs, error_code); - do_exit(SIGKILL); - - -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. -*/ -out_of_memory: - up_read(&mm->mmap_sem); - if (is_init(tsk)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: killing process %s\n", tsk->comm); - if (regs->psw.mask & PSW_MASK_PSTATE) - do_exit(SIGKILL); - goto no_context; - -do_sigbus: - up_read(&mm->mmap_sem); - - /* - * Send a sigbus, regardless of whether we were in kernel - * or user mode. - */ - tsk->thread.prot_addr = address; - tsk->thread.trap_no = error_code; - force_sig(SIGBUS, tsk); - - /* Kernel mode? Handle exceptions or die */ - if (!(regs->psw.mask & PSW_MASK_PSTATE)) - goto no_context; + do_no_context(regs, error_code, address); } void __kprobes do_protection_exception(struct pt_regs *regs, unsigned long error_code) { + /* Protection exception is supressing, decrement psw address. */ regs->psw.addr -= (error_code >> 16); + /* + * Check for low-address protection. This needs to be treated + * as a special case because the translation exception code + * field is not guaranteed to contain valid data in this case. + */ + if (unlikely(!(S390_lowcore.trans_exc_code & 4))) { + do_low_address(regs, error_code); + return; + } do_exception(regs, 4, 1); } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index eb5dc62f0d9..e71929db8b0 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -398,6 +398,9 @@ dasd_change_state(struct dasd_device *device) if (device->state == device->target) wake_up(&dasd_init_waitq); + + /* let user-space know that the device status changed */ + kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); } /* diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index ed70852cc91..6a89cefe99b 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -19,6 +19,7 @@ #include <asm/debug.h> #include <asm/uaccess.h> +#include <asm/ipl.h> /* This is ugly... */ #define PRINTK_HEADER "dasd_devmap:" @@ -133,6 +134,8 @@ dasd_call_setup(char *str) __setup ("dasd=", dasd_call_setup); #endif /* #ifndef MODULE */ +#define DASD_IPLDEV "ipldev" + /* * Read a device busid/devno from a string. */ @@ -141,6 +144,20 @@ dasd_busid(char **str, int *id0, int *id1, int *devno) { int val, old_style; + /* Interpret ipldev busid */ + if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) { + if (ipl_info.type != IPL_TYPE_CCW) { + MESSAGE(KERN_ERR, "%s", "ipl device is not a ccw " + "device"); + return -EINVAL; + } + *id0 = 0; + *id1 = ipl_info.data.ccw.dev_id.ssid; + *devno = ipl_info.data.ccw.dev_id.devno; + *str += strlen(DASD_IPLDEV); + + return 0; + } /* check for leading '0x' */ old_style = 0; if ((*str)[0] == '0' && (*str)[1] == 'x') { @@ -829,6 +846,46 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); static ssize_t +dasd_device_status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dasd_device *device; + ssize_t len; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (!IS_ERR(device)) { + switch (device->state) { + case DASD_STATE_NEW: + len = snprintf(buf, PAGE_SIZE, "new\n"); + break; + case DASD_STATE_KNOWN: + len = snprintf(buf, PAGE_SIZE, "detected\n"); + break; + case DASD_STATE_BASIC: + len = snprintf(buf, PAGE_SIZE, "basic\n"); + break; + case DASD_STATE_UNFMT: + len = snprintf(buf, PAGE_SIZE, "unformatted\n"); + break; + case DASD_STATE_READY: + len = snprintf(buf, PAGE_SIZE, "ready\n"); + break; + case DASD_STATE_ONLINE: + len = snprintf(buf, PAGE_SIZE, "online\n"); + break; + default: + len = snprintf(buf, PAGE_SIZE, "no stat\n"); + break; + } + dasd_put_device(device); + } else + len = snprintf(buf, PAGE_SIZE, "unknown\n"); + return len; +} + +static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL); + +static ssize_t dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dasd_devmap *devmap; @@ -939,6 +996,7 @@ static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store); static struct attribute * dasd_attrs[] = { &dev_attr_readonly.attr, &dev_attr_discipline.attr, + &dev_attr_status.attr, &dev_attr_alias.attr, &dev_attr_vendor.attr, &dev_attr_uid.attr, diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 293e667b50f..c210784bdf4 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -3,7 +3,7 @@ # obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ - sclp_info.o + sclp_info.o sclp_config.o sclp_chp.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o @@ -29,3 +29,6 @@ obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o obj-$(CONFIG_MONREADER) += monreader.o obj-$(CONFIG_MONWRITER) += monwriter.o + +zcore_mod-objs := sclp_sdias.o zcore.o +obj-$(CONFIG_ZFCPDUMP) += zcore_mod.o diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 9a328f14a64..6000bdee408 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -813,12 +813,6 @@ con3215_unblank(void) spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } -static int __init -con3215_consetup(struct console *co, char *options) -{ - return 0; -} - /* * The console structure for the 3215 console */ @@ -827,7 +821,6 @@ static struct console con3215 = { .write = con3215_write, .device = con3215_device, .unblank = con3215_unblank, - .setup = con3215_consetup, .flags = CON_PRINTBUFFER, }; diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 8e7f2d7633d..fd3479119eb 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -555,12 +555,6 @@ con3270_unblank(void) spin_unlock_irqrestore(&cp->view.lock, flags); } -static int __init -con3270_consetup(struct console *co, char *options) -{ - return 0; -} - /* * The console structure for the 3270 console */ @@ -569,7 +563,6 @@ static struct console con3270 = { .write = con3270_write, .device = con3270_device, .unblank = con3270_unblank, - .setup = con3270_consetup, .flags = CON_PRINTBUFFER, }; diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index f171de3b0b1..fa62e694405 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -15,6 +15,7 @@ #include <linux/timer.h> #include <linux/reboot.h> #include <linux/jiffies.h> +#include <linux/init.h> #include <asm/types.h> #include <asm/s390_ext.h> @@ -510,7 +511,7 @@ sclp_state_change_cb(struct evbuf_header *evbuf) } static struct sclp_register sclp_state_change_event = { - .receive_mask = EvTyp_StateChange_Mask, + .receive_mask = EVTYP_STATECHANGE_MASK, .receiver_fn = sclp_state_change_cb }; @@ -930,3 +931,10 @@ sclp_init(void) sclp_init_mask(1); return 0; } + +static __init int sclp_initcall(void) +{ + return sclp_init(); +} + +arch_initcall(sclp_initcall); diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 7d29ab45a6e..87ac4a3ad49 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -19,33 +19,37 @@ #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) #define MAX_CONSOLE_PAGES 4 -#define EvTyp_OpCmd 0x01 -#define EvTyp_Msg 0x02 -#define EvTyp_StateChange 0x08 -#define EvTyp_PMsgCmd 0x09 -#define EvTyp_CntlProgOpCmd 0x20 -#define EvTyp_CntlProgIdent 0x0B -#define EvTyp_SigQuiesce 0x1D -#define EvTyp_VT220Msg 0x1A - -#define EvTyp_OpCmd_Mask 0x80000000 -#define EvTyp_Msg_Mask 0x40000000 -#define EvTyp_StateChange_Mask 0x01000000 -#define EvTyp_PMsgCmd_Mask 0x00800000 -#define EvTyp_CtlProgOpCmd_Mask 0x00000001 -#define EvTyp_CtlProgIdent_Mask 0x00200000 -#define EvTyp_SigQuiesce_Mask 0x00000008 -#define EvTyp_VT220Msg_Mask 0x00000040 - -#define GnrlMsgFlgs_DOM 0x8000 -#define GnrlMsgFlgs_SndAlrm 0x4000 -#define GnrlMsgFlgs_HoldMsg 0x2000 - -#define LnTpFlgs_CntlText 0x8000 -#define LnTpFlgs_LabelText 0x4000 -#define LnTpFlgs_DataText 0x2000 -#define LnTpFlgs_EndText 0x1000 -#define LnTpFlgs_PromptText 0x0800 +#define EVTYP_OPCMD 0x01 +#define EVTYP_MSG 0x02 +#define EVTYP_STATECHANGE 0x08 +#define EVTYP_PMSGCMD 0x09 +#define EVTYP_CNTLPROGOPCMD 0x20 +#define EVTYP_CNTLPROGIDENT 0x0B +#define EVTYP_SIGQUIESCE 0x1D +#define EVTYP_VT220MSG 0x1A +#define EVTYP_CONFMGMDATA 0x04 +#define EVTYP_SDIAS 0x1C + +#define EVTYP_OPCMD_MASK 0x80000000 +#define EVTYP_MSG_MASK 0x40000000 +#define EVTYP_STATECHANGE_MASK 0x01000000 +#define EVTYP_PMSGCMD_MASK 0x00800000 +#define EVTYP_CTLPROGOPCMD_MASK 0x00000001 +#define EVTYP_CTLPROGIDENT_MASK 0x00200000 +#define EVTYP_SIGQUIESCE_MASK 0x00000008 +#define EVTYP_VT220MSG_MASK 0x00000040 +#define EVTYP_CONFMGMDATA_MASK 0x10000000 +#define EVTYP_SDIAS_MASK 0x00000010 + +#define GNRLMSGFLGS_DOM 0x8000 +#define GNRLMSGFLGS_SNDALRM 0x4000 +#define GNRLMSGFLGS_HOLDMSG 0x2000 + +#define LNTPFLGS_CNTLTEXT 0x8000 +#define LNTPFLGS_LABELTEXT 0x4000 +#define LNTPFLGS_DATATEXT 0x2000 +#define LNTPFLGS_ENDTEXT 0x1000 +#define LNTPFLGS_PROMPTTEXT 0x0800 typedef unsigned int sclp_cmdw_t; @@ -56,15 +60,15 @@ typedef unsigned int sclp_cmdw_t; #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 #define GDS_ID_MDSMU 0x1310 -#define GDS_ID_MDSRouteInfo 0x1311 -#define GDS_ID_AgUnWrkCorr 0x1549 -#define GDS_ID_SNACondReport 0x1532 +#define GDS_ID_MDSROUTEINFO 0x1311 +#define GDS_ID_AGUNWRKCORR 0x1549 +#define GDS_ID_SNACONDREPORT 0x1532 #define GDS_ID_CPMSU 0x1212 -#define GDS_ID_RoutTargInstr 0x154D -#define GDS_ID_OpReq 0x8070 -#define GDS_ID_TextCmd 0x1320 +#define GDS_ID_ROUTTARGINSTR 0x154D +#define GDS_ID_OPREQ 0x8070 +#define GDS_ID_TEXTCMD 0x1320 -#define GDS_KEY_SelfDefTextMsg 0x31 +#define GDS_KEY_SELFDEFTEXTMSG 0x31 typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ diff --git a/drivers/s390/char/sclp_chp.c b/drivers/s390/char/sclp_chp.c new file mode 100644 index 00000000000..a66b914519b --- /dev/null +++ b/drivers/s390/char/sclp_chp.c @@ -0,0 +1,196 @@ +/* + * drivers/s390/char/sclp_chp.c + * + * Copyright IBM Corp. 2007 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/types.h> +#include <linux/gfp.h> +#include <linux/errno.h> +#include <linux/completion.h> +#include <asm/sclp.h> +#include <asm/chpid.h> + +#include "sclp.h" + +#define TAG "sclp_chp: " + +#define SCLP_CMDW_CONFIGURE_CHANNEL_PATH 0x000f0001 +#define SCLP_CMDW_DECONFIGURE_CHANNEL_PATH 0x000e0001 +#define SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION 0x00030001 + +static inline sclp_cmdw_t get_configure_cmdw(struct chp_id chpid) +{ + return SCLP_CMDW_CONFIGURE_CHANNEL_PATH | chpid.id << 8; +} + +static inline sclp_cmdw_t get_deconfigure_cmdw(struct chp_id chpid) +{ + return SCLP_CMDW_DECONFIGURE_CHANNEL_PATH | chpid.id << 8; +} + +static void chp_callback(struct sclp_req *req, void *data) +{ + struct completion *completion = data; + + complete(completion); +} + +struct chp_cfg_sccb { + struct sccb_header header; + u8 ccm; + u8 reserved[6]; + u8 cssid; +} __attribute__((packed)); + +struct chp_cfg_data { + struct chp_cfg_sccb sccb; + struct sclp_req req; + struct completion completion; +} __attribute__((packed)); + +static int do_configure(sclp_cmdw_t cmd) +{ + struct chp_cfg_data *data; + int rc; + + /* Prepare sccb. */ + data = (struct chp_cfg_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!data) + return -ENOMEM; + data->sccb.header.length = sizeof(struct chp_cfg_sccb); + data->req.command = cmd; + data->req.sccb = &(data->sccb); + data->req.status = SCLP_REQ_FILLED; + data->req.callback = chp_callback; + data->req.callback_data = &(data->completion); + init_completion(&data->completion); + + /* Perform sclp request. */ + rc = sclp_add_request(&(data->req)); + if (rc) + goto out; + wait_for_completion(&data->completion); + + /* Check response .*/ + if (data->req.status != SCLP_REQ_DONE) { + printk(KERN_WARNING TAG "configure channel-path request failed " + "(status=0x%02x)\n", data->req.status); + rc = -EIO; + goto out; + } + switch (data->sccb.header.response_code) { + case 0x0020: + case 0x0120: + case 0x0440: + case 0x0450: + break; + default: + printk(KERN_WARNING TAG "configure channel-path failed " + "(cmd=0x%08x, response=0x%04x)\n", cmd, + data->sccb.header.response_code); + rc = -EIO; + break; + } +out: + free_page((unsigned long) data); + + return rc; +} + +/** + * sclp_chp_configure - perform configure channel-path sclp command + * @chpid: channel-path ID + * + * Perform configure channel-path command sclp command for specified chpid. + * Return 0 after command successfully finished, non-zero otherwise. + */ +int sclp_chp_configure(struct chp_id chpid) +{ + return do_configure(get_configure_cmdw(chpid)); +} + +/** + * sclp_chp_deconfigure - perform deconfigure channel-path sclp command + * @chpid: channel-path ID + * + * Perform deconfigure channel-path command sclp command for specified chpid + * and wait for completion. On success return 0. Return non-zero otherwise. + */ +int sclp_chp_deconfigure(struct chp_id chpid) +{ + return do_configure(get_deconfigure_cmdw(chpid)); +} + +struct chp_info_sccb { + struct sccb_header header; + u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; + u8 standby[SCLP_CHP_INFO_MASK_SIZE]; + u8 configured[SCLP_CHP_INFO_MASK_SIZE]; + u8 ccm; + u8 reserved[6]; + u8 cssid; +} __attribute__((packed)); + +struct chp_info_data { + struct chp_info_sccb sccb; + struct sclp_req req; + struct completion completion; +} __attribute__((packed)); + +/** + * sclp_chp_read_info - perform read channel-path information sclp command + * @info: resulting channel-path information data + * + * Perform read channel-path information sclp command and wait for completion. + * On success, store channel-path information in @info and return 0. Return + * non-zero otherwise. + */ +int sclp_chp_read_info(struct sclp_chp_info *info) +{ + struct chp_info_data *data; + int rc; + + /* Prepare sccb. */ + data = (struct chp_info_data *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!data) + return -ENOMEM; + data->sccb.header.length = sizeof(struct chp_info_sccb); + data->req.command = SCLP_CMDW_READ_CHANNEL_PATH_INFORMATION; + data->req.sccb = &(data->sccb); + data->req.status = SCLP_REQ_FILLED; + data->req.callback = chp_callback; + data->req.callback_data = &(data->completion); + init_completion(&data->completion); + + /* Perform sclp request. */ + rc = sclp_add_request(&(data->req)); + if (rc) + goto out; + wait_for_completion(&data->completion); + + /* Check response .*/ + if (data->req.status != SCLP_REQ_DONE) { + printk(KERN_WARNING TAG "read channel-path info request failed " + "(status=0x%02x)\n", data->req.status); + rc = -EIO; + goto out; + } + if (data->sccb.header.response_code != 0x0010) { + printk(KERN_WARNING TAG "read channel-path info failed " + "(response=0x%04x)\n", data->sccb.header.response_code); + rc = -EIO; + goto out; + } + memcpy(info->recognized, data->sccb.recognized, + SCLP_CHP_INFO_MASK_SIZE); + memcpy(info->standby, data->sccb.standby, + SCLP_CHP_INFO_MASK_SIZE); + memcpy(info->configured, data->sccb.configured, + SCLP_CHP_INFO_MASK_SIZE); +out: + free_page((unsigned long) data); + + return rc; +} diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c new file mode 100644 index 00000000000..5322e5e54a9 --- /dev/null +++ b/drivers/s390/char/sclp_config.c @@ -0,0 +1,75 @@ +/* + * drivers/s390/char/sclp_config.c + * + * Copyright IBM Corp. 2007 + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> + */ + +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/cpu.h> +#include <linux/sysdev.h> +#include <linux/workqueue.h> +#include "sclp.h" + +#define TAG "sclp_config: " + +struct conf_mgm_data { + u8 reserved; + u8 ev_qualifier; +} __attribute__((packed)); + +#define EV_QUAL_CAP_CHANGE 3 + +static struct work_struct sclp_cpu_capability_work; + +static void sclp_cpu_capability_notify(struct work_struct *work) +{ + int cpu; + struct sys_device *sysdev; + + printk(KERN_WARNING TAG "cpu capability changed.\n"); + lock_cpu_hotplug(); + for_each_online_cpu(cpu) { + sysdev = get_cpu_sysdev(cpu); + kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); + } + unlock_cpu_hotplug(); +} + +static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) +{ + struct conf_mgm_data *cdata; + + cdata = (struct conf_mgm_data *)(evbuf + 1); + if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE) + schedule_work(&sclp_cpu_capability_work); +} + +static struct sclp_register sclp_conf_register = +{ + .receive_mask = EVTYP_CONFMGMDATA_MASK, + .receiver_fn = sclp_conf_receiver_fn, +}; + +static int __init sclp_conf_init(void) +{ + int rc; + + INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify); + + rc = sclp_register(&sclp_conf_register); + if (rc) { + printk(KERN_ERR TAG "failed to register (%d).\n", rc); + return rc; + } + + if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) { + printk(KERN_WARNING TAG "no configuration management.\n"); + sclp_unregister(&sclp_conf_register); + rc = -ENOSYS; + } + return rc; +} + +__initcall(sclp_conf_init); diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c index 65aa2c85737..29fe2a5ec2f 100644 --- a/drivers/s390/char/sclp_cpi.c +++ b/drivers/s390/char/sclp_cpi.c @@ -46,7 +46,7 @@ struct cpi_sccb { /* Event type structure for write message and write priority message */ static struct sclp_register sclp_cpi_event = { - .send_mask = EvTyp_CtlProgIdent_Mask + .send_mask = EVTYP_CTLPROGIDENT_MASK }; MODULE_LICENSE("GPL"); @@ -201,7 +201,7 @@ cpi_module_init(void) "console.\n"); return -EINVAL; } - if (!(sclp_cpi_event.sclp_send_mask & EvTyp_CtlProgIdent_Mask)) { + if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) { printk(KERN_WARNING "cpi: no control program identification " "support\n"); sclp_unregister(&sclp_cpi_event); diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c index baa8fe669ed..45ff25e787c 100644 --- a/drivers/s390/char/sclp_quiesce.c +++ b/drivers/s390/char/sclp_quiesce.c @@ -43,7 +43,7 @@ sclp_quiesce_handler(struct evbuf_header *evbuf) } static struct sclp_register sclp_quiesce_event = { - .receive_mask = EvTyp_SigQuiesce_Mask, + .receive_mask = EVTYP_SIGQUIESCE_MASK, .receiver_fn = sclp_quiesce_handler }; diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 2486783ea58..bbd5b8b66f4 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c @@ -30,7 +30,7 @@ /* Event type structure for write message and write priority message */ static struct sclp_register sclp_rw_event = { - .send_mask = EvTyp_Msg_Mask | EvTyp_PMsgCmd_Mask + .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK }; /* @@ -64,7 +64,7 @@ sclp_make_buffer(void *page, unsigned short columns, unsigned short htab) memset(sccb, 0, sizeof(struct write_sccb)); sccb->header.length = sizeof(struct write_sccb); sccb->msg_buf.header.length = sizeof(struct msg_buf); - sccb->msg_buf.header.type = EvTyp_Msg; + sccb->msg_buf.header.type = EVTYP_MSG; sccb->msg_buf.mdb.header.length = sizeof(struct mdb); sccb->msg_buf.mdb.header.type = 1; sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ @@ -114,7 +114,7 @@ sclp_initialize_mto(struct sclp_buffer *buffer, int max_len) memset(mto, 0, sizeof(struct mto)); mto->length = sizeof(struct mto); mto->type = 4; /* message text object */ - mto->line_type_flags = LnTpFlgs_EndText; /* end text */ + mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */ /* set pointer to first byte after struct mto. */ buffer->current_line = (char *) (mto + 1); @@ -215,7 +215,7 @@ sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count) case '\a': /* bell, one for several times */ /* set SCLP sound alarm bit in General Object */ buffer->sccb->msg_buf.mdb.go.general_msg_flags |= - GnrlMsgFlgs_SndAlrm; + GNRLMSGFLGS_SNDALRM; break; case '\t': /* horizontal tabulator */ /* check if new mto needs to be created */ @@ -452,12 +452,12 @@ sclp_emit_buffer(struct sclp_buffer *buffer, return -EIO; sccb = buffer->sccb; - if (sclp_rw_event.sclp_send_mask & EvTyp_Msg_Mask) + if (sclp_rw_event.sclp_send_mask & EVTYP_MSG_MASK) /* Use normal write message */ - sccb->msg_buf.header.type = EvTyp_Msg; - else if (sclp_rw_event.sclp_send_mask & EvTyp_PMsgCmd_Mask) + sccb->msg_buf.header.type = EVTYP_MSG; + else if (sclp_rw_event.sclp_send_mask & EVTYP_PMSGCMD_MASK) /* Use write priority message */ - sccb->msg_buf.header.type = EvTyp_PMsgCmd; + sccb->msg_buf.header.type = EVTYP_PMSGCMD; else return -ENOSYS; buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c new file mode 100644 index 00000000000..52283daddae --- /dev/null +++ b/drivers/s390/char/sclp_sdias.c @@ -0,0 +1,255 @@ +/* + * Sclp "store data in absolut storage" + * + * Copyright IBM Corp. 2003,2007 + * Author(s): Michael Holzheu + */ + +#include <linux/sched.h> +#include <asm/sclp.h> +#include <asm/debug.h> +#include <asm/ipl.h> +#include "sclp.h" +#include "sclp_rw.h" + +#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) +#define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x ) + +#define SDIAS_RETRIES 300 +#define SDIAS_SLEEP_TICKS 50 + +#define EQ_STORE_DATA 0x0 +#define EQ_SIZE 0x1 +#define DI_FCP_DUMP 0x0 +#define ASA_SIZE_32 0x0 +#define ASA_SIZE_64 0x1 +#define EVSTATE_ALL_STORED 0x0 +#define EVSTATE_NO_DATA 0x3 +#define EVSTATE_PART_STORED 0x10 + +static struct debug_info *sdias_dbf; + +static struct sclp_register sclp_sdias_register = { + .send_mask = EVTYP_SDIAS_MASK, +}; + +struct sdias_evbuf { + struct evbuf_header hdr; + u8 event_qual; + u8 data_id; + u64 reserved2; + u32 event_id; + u16 reserved3; + u8 asa_size; + u8 event_status; + u32 reserved4; + u32 blk_cnt; + u64 asa; + u32 reserved5; + u32 fbn; + u32 reserved6; + u32 lbn; + u16 reserved7; + u16 dbs; +} __attribute__((packed)); + +struct sdias_sccb { + struct sccb_header hdr; + struct sdias_evbuf evbuf; +} __attribute__((packed)); + +static struct sdias_sccb sccb __attribute__((aligned(4096))); + +static int sclp_req_done; +static wait_queue_head_t sdias_wq; +static DEFINE_MUTEX(sdias_mutex); + +static void sdias_callback(struct sclp_req *request, void *data) +{ + struct sdias_sccb *sccb; + + sccb = (struct sdias_sccb *) request->sccb; + sclp_req_done = 1; + wake_up(&sdias_wq); /* Inform caller, that request is complete */ + TRACE("callback done\n"); +} + +static int sdias_sclp_send(struct sclp_req *req) +{ + int retries; + int rc; + + for (retries = SDIAS_RETRIES; retries; retries--) { + sclp_req_done = 0; + TRACE("add request\n"); + rc = sclp_add_request(req); + if (rc) { + /* not initiated, wait some time and retry */ + set_current_state(TASK_INTERRUPTIBLE); + TRACE("add request failed: rc = %i\n",rc); + schedule_timeout(SDIAS_SLEEP_TICKS); + continue; + } + /* initiated, wait for completion of service call */ + wait_event(sdias_wq, (sclp_req_done == 1)); + if (req->status == SCLP_REQ_FAILED) { + TRACE("sclp request failed\n"); + rc = -EIO; + continue; + } + TRACE("request done\n"); + break; + } + return rc; +} + +/* + * Get number of blocks (4K) available in the HSA + */ +int sclp_sdias_blk_count(void) +{ + struct sclp_req request; + int rc; + + mutex_lock(&sdias_mutex); + + memset(&sccb, 0, sizeof(sccb)); + memset(&request, 0, sizeof(request)); + + sccb.hdr.length = sizeof(sccb); + sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb.evbuf.hdr.type = EVTYP_SDIAS; + sccb.evbuf.event_qual = EQ_SIZE; + sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_id = 4712; + sccb.evbuf.dbs = 1; + + request.sccb = &sccb; + request.command = SCLP_CMDW_WRITE_EVENT_DATA; + request.status = SCLP_REQ_FILLED; + request.callback = sdias_callback; + + rc = sdias_sclp_send(&request); + if (rc) { + ERROR_MSG("sclp_send failed for get_nr_blocks\n"); + goto out; + } + if (sccb.hdr.response_code != 0x0020) { + TRACE("send failed: %x\n", sccb.hdr.response_code); + rc = -EIO; + goto out; + } + + switch (sccb.evbuf.event_status) { + case 0: + rc = sccb.evbuf.blk_cnt; + break; + default: + ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status); + rc = -EIO; + goto out; + } + TRACE("%i blocks\n", rc); +out: + mutex_unlock(&sdias_mutex); + return rc; +} + +/* + * Copy from HSA to absolute storage (not reentrant): + * + * @dest : Address of buffer where data should be copied + * @start_blk: Start Block (beginning with 1) + * @nr_blks : Number of 4K blocks to copy + * + * Return Value: 0 : Requested 'number' of blocks of data copied + * <0: ERROR - negative event status + */ +int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) +{ + struct sclp_req request; + int rc; + + mutex_lock(&sdias_mutex); + + memset(&sccb, 0, sizeof(sccb)); + memset(&request, 0, sizeof(request)); + + sccb.hdr.length = sizeof(sccb); + sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb.evbuf.hdr.type = EVTYP_SDIAS; + sccb.evbuf.hdr.flags = 0; + sccb.evbuf.event_qual = EQ_STORE_DATA; + sccb.evbuf.data_id = DI_FCP_DUMP; + sccb.evbuf.event_id = 4712; +#ifdef __s390x__ + sccb.evbuf.asa_size = ASA_SIZE_64; +#else + sccb.evbuf.asa_size = ASA_SIZE_32; +#endif + sccb.evbuf.event_status = 0; + sccb.evbuf.blk_cnt = nr_blks; + sccb.evbuf.asa = (unsigned long)dest; + sccb.evbuf.fbn = start_blk; + sccb.evbuf.lbn = 0; + sccb.evbuf.dbs = 1; + + request.sccb = &sccb; + request.command = SCLP_CMDW_WRITE_EVENT_DATA; + request.status = SCLP_REQ_FILLED; + request.callback = sdias_callback; + + rc = sdias_sclp_send(&request); + if (rc) { + ERROR_MSG("sclp_send failed: %x\n", rc); + goto out; + } + if (sccb.hdr.response_code != 0x0020) { + TRACE("copy failed: %x\n", sccb.hdr.response_code); + rc = -EIO; + goto out; + } + + switch (sccb.evbuf.event_status) { + case EVSTATE_ALL_STORED: + TRACE("all stored\n"); + case EVSTATE_PART_STORED: + TRACE("part stored: %i\n", sccb.evbuf.blk_cnt); + break; + case EVSTATE_NO_DATA: + TRACE("no data\n"); + default: + ERROR_MSG("Error from SCLP while copying hsa. " + "Event status = %x\n", + sccb.evbuf.event_status); + rc = -EIO; + } +out: + mutex_unlock(&sdias_mutex); + return rc; +} + +int __init sdias_init(void) +{ + int rc; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return 0; + sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long)); + debug_register_view(sdias_dbf, &debug_sprintf_view); + debug_set_level(sdias_dbf, 6); + rc = sclp_register(&sclp_sdias_register); + if (rc) { + ERROR_MSG("sclp register failed\n"); + return rc; + } + init_waitqueue_head(&sdias_wq); + TRACE("init done\n"); + return 0; +} + +void __exit sdias_exit(void) +{ + debug_unregister(sdias_dbf); + sclp_unregister(&sclp_sdias_register); +} diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 076816b9d52..e3b3d390b4a 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -648,7 +648,7 @@ sclp_eval_textcmd(struct gds_subvector *start, subvec = start; while (subvec < end) { subvec = find_gds_subvector(subvec, end, - GDS_KEY_SelfDefTextMsg); + GDS_KEY_SELFDEFTEXTMSG); if (!subvec) break; sclp_eval_selfdeftextmsg((struct gds_subvector *)(subvec + 1), @@ -664,7 +664,7 @@ sclp_eval_cpmsu(struct gds_vector *start, struct gds_vector *end) vec = start; while (vec < end) { - vec = find_gds_vector(vec, end, GDS_ID_TextCmd); + vec = find_gds_vector(vec, end, GDS_ID_TEXTCMD); if (!vec) break; sclp_eval_textcmd((struct gds_subvector *)(vec + 1), @@ -703,7 +703,7 @@ sclp_tty_state_change(struct sclp_register *reg) static struct sclp_register sclp_input_event = { - .receive_mask = EvTyp_OpCmd_Mask | EvTyp_PMsgCmd_Mask, + .receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK, .state_change_fn = sclp_tty_state_change, .receiver_fn = sclp_tty_receiver }; diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index f77dc33b5f8..726334757bb 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -99,8 +99,8 @@ static void sclp_vt220_emit_current(void); /* Registration structure for our interest in SCLP event buffers */ static struct sclp_register sclp_vt220_register = { - .send_mask = EvTyp_VT220Msg_Mask, - .receive_mask = EvTyp_VT220Msg_Mask, + .send_mask = EVTYP_VT220MSG_MASK, + .receive_mask = EVTYP_VT220MSG_MASK, .state_change_fn = NULL, .receiver_fn = sclp_vt220_receiver_fn }; @@ -202,7 +202,7 @@ sclp_vt220_callback(struct sclp_req *request, void *data) static int __sclp_vt220_emit(struct sclp_vt220_request *request) { - if (!(sclp_vt220_register.sclp_send_mask & EvTyp_VT220Msg_Mask)) { + if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) { request->sclp_req.status = SCLP_REQ_FAILED; return -EIO; } @@ -284,7 +284,7 @@ sclp_vt220_initialize_page(void *page) sccb->header.length = sizeof(struct sclp_vt220_sccb); sccb->header.function_code = SCLP_NORMAL_WRITE; sccb->header.response_code = 0x0000; - sccb->evbuf.type = EvTyp_VT220Msg; + sccb->evbuf.type = EVTYP_VT220MSG; sccb->evbuf.length = sizeof(struct evbuf_header); return request; diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index b87d3b01993..a5a00e9ae4d 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -125,7 +125,7 @@ static struct vmlogrdr_priv_t sys_ser[] = { .recording_name = "EREP", .minor_num = 0, .buffer_free = 1, - .priv_lock = SPIN_LOCK_UNLOCKED, + .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock), .autorecording = 1, .autopurge = 1, }, @@ -134,7 +134,7 @@ static struct vmlogrdr_priv_t sys_ser[] = { .recording_name = "ACCOUNT", .minor_num = 1, .buffer_free = 1, - .priv_lock = SPIN_LOCK_UNLOCKED, + .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock), .autorecording = 1, .autopurge = 1, }, @@ -143,7 +143,7 @@ static struct vmlogrdr_priv_t sys_ser[] = { .recording_name = "SYMPTOM", .minor_num = 2, .buffer_free = 1, - .priv_lock = SPIN_LOCK_UNLOCKED, + .priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock), .autorecording = 1, .autopurge = 1, } @@ -385,6 +385,9 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp) struct vmlogrdr_priv_t * logptr = filp->private_data; + iucv_path_sever(logptr->path, NULL); + kfree(logptr->path); + logptr->path = NULL; if (logptr->autorecording) { ret = vmlogrdr_recording(logptr,0,logptr->autopurge); if (ret) diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c new file mode 100644 index 00000000000..89d439316a5 --- /dev/null +++ b/drivers/s390/char/zcore.c @@ -0,0 +1,651 @@ +/* + * zcore module to export memory content and register sets for creating system + * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same + * dump format as s390 standalone dumps. + * + * For more information please refer to Documentation/s390/zfcpdump.txt + * + * Copyright IBM Corp. 2003,2007 + * Author(s): Michael Holzheu + */ + +#include <linux/init.h> +#include <linux/miscdevice.h> +#include <linux/utsname.h> +#include <linux/debugfs.h> +#include <asm/ipl.h> +#include <asm/sclp.h> +#include <asm/setup.h> +#include <asm/sigp.h> +#include <asm/uaccess.h> +#include <asm/debug.h> +#include <asm/processor.h> +#include <asm/irqflags.h> + +#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) +#define MSG(x...) printk( KERN_ALERT x ) +#define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x ) + +#define TO_USER 0 +#define TO_KERNEL 1 + +enum arch_id { + ARCH_S390 = 0, + ARCH_S390X = 1, +}; + +/* dump system info */ + +struct sys_info { + enum arch_id arch; + unsigned long sa_base; + u32 sa_size; + int cpu_map[NR_CPUS]; + unsigned long mem_size; + union save_area lc_mask; +}; + +static struct sys_info sys_info; +static struct debug_info *zcore_dbf; +static int hsa_available; +static struct dentry *zcore_dir; +static struct dentry *zcore_file; + +/* + * Copy memory from HSA to kernel or user memory (not reentrant): + * + * @dest: Kernel or user buffer where memory should be copied to + * @src: Start address within HSA where data should be copied + * @count: Size of buffer, which should be copied + * @mode: Either TO_KERNEL or TO_USER + */ +static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) +{ + int offs, blk_num; + static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); + + if (count == 0) + return 0; + + /* copy first block */ + offs = 0; + if ((src % PAGE_SIZE) != 0) { + blk_num = src / PAGE_SIZE + 2; + if (sclp_sdias_copy(buf, blk_num, 1)) { + TRACE("sclp_sdias_copy() failed\n"); + return -EIO; + } + offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count); + if (mode == TO_USER) { + if (copy_to_user((__force __user void*) dest, + buf + (src % PAGE_SIZE), offs)) + return -EFAULT; + } else + memcpy(dest, buf + (src % PAGE_SIZE), offs); + } + if (offs == count) + goto out; + + /* copy middle */ + for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) { + blk_num = (src + offs) / PAGE_SIZE + 2; + if (sclp_sdias_copy(buf, blk_num, 1)) { + TRACE("sclp_sdias_copy() failed\n"); + return -EIO; + } + if (mode == TO_USER) { + if (copy_to_user((__force __user void*) dest + offs, + buf, PAGE_SIZE)) + return -EFAULT; + } else + memcpy(dest + offs, buf, PAGE_SIZE); + } + if (offs == count) + goto out; + + /* copy last block */ + blk_num = (src + offs) / PAGE_SIZE + 2; + if (sclp_sdias_copy(buf, blk_num, 1)) { + TRACE("sclp_sdias_copy() failed\n"); + return -EIO; + } + if (mode == TO_USER) { + if (copy_to_user((__force __user void*) dest + offs, buf, + PAGE_SIZE)) + return -EFAULT; + } else + memcpy(dest + offs, buf, count - offs); +out: + return 0; +} + +static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count) +{ + return memcpy_hsa((void __force *) dest, src, count, TO_USER); +} + +static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) +{ + return memcpy_hsa(dest, src, count, TO_KERNEL); +} + +static int memcpy_real(void *dest, unsigned long src, size_t count) +{ + unsigned long flags; + int rc = -EFAULT; + register unsigned long _dest asm("2") = (unsigned long) dest; + register unsigned long _len1 asm("3") = (unsigned long) count; + register unsigned long _src asm("4") = src; + register unsigned long _len2 asm("5") = (unsigned long) count; + + if (count == 0) + return 0; + flags = __raw_local_irq_stnsm(0xf8); /* switch to real mode */ + asm volatile ( + "0: mvcle %1,%2,0x0\n" + "1: jo 0b\n" + " lhi %0,0x0\n" + "2:\n" + EX_TABLE(1b,2b) + : "+d" (rc) + : "d" (_dest), "d" (_src), "d" (_len1), "d" (_len2) + : "cc", "memory"); + __raw_local_irq_ssm(flags); + + return rc; +} + +static int memcpy_real_user(__user void *dest, unsigned long src, size_t count) +{ + static char buf[4096]; + int offs = 0, size; + + while (offs < count) { + size = min(sizeof(buf), count - offs); + if (memcpy_real(buf, src + offs, size)) + return -EFAULT; + if (copy_to_user(dest + offs, buf, size)) + return -EFAULT; + offs += size; + } + return 0; +} + +#ifdef __s390x__ +/* + * Convert s390x (64 bit) cpu info to s390 (32 bit) cpu info + */ +static void __init s390x_to_s390_regs(union save_area *out, union save_area *in, + int cpu) +{ + int i; + + for (i = 0; i < 16; i++) { + out->s390.gp_regs[i] = in->s390x.gp_regs[i] & 0x00000000ffffffff; + out->s390.acc_regs[i] = in->s390x.acc_regs[i]; + out->s390.ctrl_regs[i] = + in->s390x.ctrl_regs[i] & 0x00000000ffffffff; + } + /* locore for 31 bit has only space for fpregs 0,2,4,6 */ + out->s390.fp_regs[0] = in->s390x.fp_regs[0]; + out->s390.fp_regs[1] = in->s390x.fp_regs[2]; + out->s390.fp_regs[2] = in->s390x.fp_regs[4]; + out->s390.fp_regs[3] = in->s390x.fp_regs[6]; + memcpy(&(out->s390.psw[0]), &(in->s390x.psw[0]), 4); + out->s390.psw[1] |= 0x8; /* set bit 12 */ + memcpy(&(out->s390.psw[4]),&(in->s390x.psw[12]), 4); + out->s390.psw[4] |= 0x80; /* set (31bit) addressing bit */ + out->s390.pref_reg = in->s390x.pref_reg; + out->s390.timer = in->s390x.timer; + out->s390.clk_cmp = in->s390x.clk_cmp; +} + +static void __init s390x_to_s390_save_areas(void) +{ + int i = 1; + static union save_area tmp; + + while (zfcpdump_save_areas[i]) { + s390x_to_s390_regs(&tmp, zfcpdump_save_areas[i], i); + memcpy(zfcpdump_save_areas[i], &tmp, sizeof(tmp)); + i++; + } +} + +#endif /* __s390x__ */ + +static int __init init_cpu_info(enum arch_id arch) +{ + union save_area *sa; + + /* get info for boot cpu from lowcore, stored in the HSA */ + + sa = kmalloc(sizeof(*sa), GFP_KERNEL); + if (!sa) { + ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__); + return -ENOMEM; + } + if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { + ERROR_MSG("could not copy from HSA\n"); + kfree(sa); + return -EIO; + } + zfcpdump_save_areas[0] = sa; + +#ifdef __s390x__ + /* convert s390x regs to s390, if we are dumping an s390 Linux */ + + if (arch == ARCH_S390) + s390x_to_s390_save_areas(); +#endif + + return 0; +} + +static DEFINE_MUTEX(zcore_mutex); + +#define DUMP_VERSION 0x3 +#define DUMP_MAGIC 0xa8190173618f23fdULL +#define DUMP_ARCH_S390X 2 +#define DUMP_ARCH_S390 1 +#define HEADER_SIZE 4096 + +/* dump header dumped according to s390 crash dump format */ + +struct zcore_header { + u64 magic; + u32 version; + u32 header_size; + u32 dump_level; + u32 page_size; + u64 mem_size; + u64 mem_start; + u64 mem_end; + u32 num_pages; + u32 pad1; + u64 tod; + cpuid_t cpu_id; + u32 arch_id; + u32 build_arch; + char pad2[4016]; +} __attribute__((packed,__aligned__(16))); + +static struct zcore_header zcore_header = { + .magic = DUMP_MAGIC, + .version = DUMP_VERSION, + .header_size = 4096, + .dump_level = 0, + .page_size = PAGE_SIZE, + .mem_start = 0, +#ifdef __s390x__ + .build_arch = DUMP_ARCH_S390X, +#else + .build_arch = DUMP_ARCH_S390, +#endif +}; + +/* + * Copy lowcore info to buffer. Use map in order to copy only register parts. + * + * @buf: User buffer + * @sa: Pointer to save area + * @sa_off: Offset in save area to copy + * @len: Number of bytes to copy + */ +static int copy_lc(void __user *buf, void *sa, int sa_off, int len) +{ + int i; + char *lc_mask = (char*)&sys_info.lc_mask; + + for (i = 0; i < len; i++) { + if (!lc_mask[i + sa_off]) + continue; + if (copy_to_user(buf + i, sa + sa_off + i, 1)) + return -EFAULT; + } + return 0; +} + +/* + * Copy lowcores info to memory, if necessary + * + * @buf: User buffer + * @addr: Start address of buffer in dump memory + * @count: Size of buffer + */ +static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) +{ + unsigned long end; + int i = 0; + + if (count == 0) + return 0; + + end = start + count; + while (zfcpdump_save_areas[i]) { + unsigned long cp_start, cp_end; /* copy range */ + unsigned long sa_start, sa_end; /* save area range */ + unsigned long prefix; + unsigned long sa_off, len, buf_off; + + if (sys_info.arch == ARCH_S390) + prefix = zfcpdump_save_areas[i]->s390.pref_reg; + else + prefix = zfcpdump_save_areas[i]->s390x.pref_reg; + + sa_start = prefix + sys_info.sa_base; + sa_end = prefix + sys_info.sa_base + sys_info.sa_size; + + if ((end < sa_start) || (start > sa_end)) + goto next; + cp_start = max(start, sa_start); + cp_end = min(end, sa_end); + + buf_off = cp_start - start; + sa_off = cp_start - sa_start; + len = cp_end - cp_start; + + TRACE("copy_lc for: %lx\n", start); + if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len)) + return -EFAULT; +next: + i++; + } + return 0; +} + +/* + * Read routine for zcore character device + * First 4K are dump header + * Next 32MB are HSA Memory + * Rest is read from absolute Memory + */ +static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + unsigned long mem_start; /* Start address in memory */ + size_t mem_offs; /* Offset in dump memory */ + size_t hdr_count; /* Size of header part of output buffer */ + size_t size; + int rc; + + mutex_lock(&zcore_mutex); + + if (*ppos > (sys_info.mem_size + HEADER_SIZE)) { + rc = -EINVAL; + goto fail; + } + + count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos)); + + /* Copy dump header */ + if (*ppos < HEADER_SIZE) { + size = min(count, (size_t) (HEADER_SIZE - *ppos)); + if (copy_to_user(buf, &zcore_header + *ppos, size)) { + rc = -EFAULT; + goto fail; + } + hdr_count = size; + mem_start = 0; + } else { + hdr_count = 0; + mem_start = *ppos - HEADER_SIZE; + } + + mem_offs = 0; + + /* Copy from HSA data */ + if (*ppos < (ZFCPDUMP_HSA_SIZE + HEADER_SIZE)) { + size = min((count - hdr_count), (size_t) (ZFCPDUMP_HSA_SIZE + - mem_start)); + rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); + if (rc) + goto fail; + + mem_offs += size; + } + + /* Copy from real mem */ + size = count - mem_offs - hdr_count; + rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs, + size); + if (rc) + goto fail; + + /* + * Since s390 dump analysis tools like lcrash or crash + * expect register sets in the prefix pages of the cpus, + * we copy them into the read buffer, if necessary. + * buf + hdr_count: Start of memory part of output buffer + * mem_start: Start memory address to copy from + * count - hdr_count: Size of memory area to copy + */ + if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) { + rc = -EFAULT; + goto fail; + } + *ppos += count; +fail: + mutex_unlock(&zcore_mutex); + return (rc < 0) ? rc : count; +} + +static int zcore_open(struct inode *inode, struct file *filp) +{ + if (!hsa_available) + return -ENODATA; + else + return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; +} + +static int zcore_release(struct inode *inode, struct file *filep) +{ + diag308(DIAG308_REL_HSA, NULL); + hsa_available = 0; + return 0; +} + +static loff_t zcore_lseek(struct file *file, loff_t offset, int orig) +{ + loff_t rc; + + mutex_lock(&zcore_mutex); + switch (orig) { + case 0: + file->f_pos = offset; + rc = file->f_pos; + break; + case 1: + file->f_pos += offset; + rc = file->f_pos; + break; + default: + rc = -EINVAL; + } + mutex_unlock(&zcore_mutex); + return rc; +} + +static struct file_operations zcore_fops = { + .owner = THIS_MODULE, + .llseek = zcore_lseek, + .read = zcore_read, + .open = zcore_open, + .release = zcore_release, +}; + + +static void __init set_s390_lc_mask(union save_area *map) +{ + memset(&map->s390.ext_save, 0xff, sizeof(map->s390.ext_save)); + memset(&map->s390.timer, 0xff, sizeof(map->s390.timer)); + memset(&map->s390.clk_cmp, 0xff, sizeof(map->s390.clk_cmp)); + memset(&map->s390.psw, 0xff, sizeof(map->s390.psw)); + memset(&map->s390.pref_reg, 0xff, sizeof(map->s390.pref_reg)); + memset(&map->s390.acc_regs, 0xff, sizeof(map->s390.acc_regs)); + memset(&map->s390.fp_regs, 0xff, sizeof(map->s390.fp_regs)); + memset(&map->s390.gp_regs, 0xff, sizeof(map->s390.gp_regs)); + memset(&map->s390.ctrl_regs, 0xff, sizeof(map->s390.ctrl_regs)); +} + +static void __init set_s390x_lc_mask(union save_area *map) +{ + memset(&map->s390x.fp_regs, 0xff, sizeof(map->s390x.fp_regs)); + memset(&map->s390x.gp_regs, 0xff, sizeof(map->s390x.gp_regs)); + memset(&map->s390x.psw, 0xff, sizeof(map->s390x.psw)); + memset(&map->s390x.pref_reg, 0xff, sizeof(map->s390x.pref_reg)); + memset(&map->s390x.fp_ctrl_reg, 0xff, sizeof(map->s390x.fp_ctrl_reg)); + memset(&map->s390x.tod_reg, 0xff, sizeof(map->s390x.tod_reg)); + memset(&map->s390x.timer, 0xff, sizeof(map->s390x.timer)); + memset(&map->s390x.clk_cmp, 0xff, sizeof(map->s390x.clk_cmp)); + memset(&map->s390x.acc_regs, 0xff, sizeof(map->s390x.acc_regs)); + memset(&map->s390x.ctrl_regs, 0xff, sizeof(map->s390x.ctrl_regs)); +} + +/* + * Initialize dump globals for a given architecture + */ +static int __init sys_info_init(enum arch_id arch) +{ + switch (arch) { + case ARCH_S390X: + MSG("DETECTED 'S390X (64 bit) OS'\n"); + sys_info.sa_base = SAVE_AREA_BASE_S390X; + sys_info.sa_size = sizeof(struct save_area_s390x); + set_s390x_lc_mask(&sys_info.lc_mask); + break; + case ARCH_S390: + MSG("DETECTED 'S390 (32 bit) OS'\n"); + sys_info.sa_base = SAVE_AREA_BASE_S390; + sys_info.sa_size = sizeof(struct save_area_s390); + set_s390_lc_mask(&sys_info.lc_mask); + break; + default: + ERROR_MSG("unknown architecture 0x%x.\n",arch); + return -EINVAL; + } + sys_info.arch = arch; + if (init_cpu_info(arch)) { + ERROR_MSG("get cpu info failed\n"); + return -ENOMEM; + } + sys_info.mem_size = real_memory_size; + + return 0; +} + +static int __init check_sdias(void) +{ + int rc, act_hsa_size; + + rc = sclp_sdias_blk_count(); + if (rc < 0) { + ERROR_MSG("Could not determine HSA size\n"); + return rc; + } + act_hsa_size = (rc - 1) * PAGE_SIZE; + if (act_hsa_size < ZFCPDUMP_HSA_SIZE) { + ERROR_MSG("HSA size too small: %i\n", act_hsa_size); + return -EINVAL; + } + return 0; +} + +static void __init zcore_header_init(int arch, struct zcore_header *hdr) +{ + if (arch == ARCH_S390X) + hdr->arch_id = DUMP_ARCH_S390X; + else + hdr->arch_id = DUMP_ARCH_S390; + hdr->mem_size = sys_info.mem_size; + hdr->mem_end = sys_info.mem_size; + hdr->num_pages = sys_info.mem_size / PAGE_SIZE; + hdr->tod = get_clock(); + get_cpu_id(&hdr->cpu_id); +} + +extern int sdias_init(void); + +static int __init zcore_init(void) +{ + unsigned char arch; + int rc; + + if (ipl_info.type != IPL_TYPE_FCP_DUMP) + return -ENODATA; + + zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); + debug_register_view(zcore_dbf, &debug_sprintf_view); + debug_set_level(zcore_dbf, 6); + + TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno); + TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn); + TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun); + + rc = sdias_init(); + if (rc) + goto fail; + + rc = check_sdias(); + if (rc) { + ERROR_MSG("Dump initialization failed\n"); + goto fail; + } + + rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); + if (rc) { + ERROR_MSG("sdial memcpy for arch id failed\n"); + goto fail; + } + +#ifndef __s390x__ + if (arch == ARCH_S390X) { + ERROR_MSG("32 bit dumper can't dump 64 bit system!\n"); + rc = -EINVAL; + goto fail; + } +#endif + + rc = sys_info_init(arch); + if (rc) { + ERROR_MSG("arch init failed\n"); + goto fail; + } + + zcore_header_init(arch, &zcore_header); + + zcore_dir = debugfs_create_dir("zcore" , NULL); + if (!zcore_dir) { + rc = -ENOMEM; + goto fail; + } + zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL, + &zcore_fops); + if (!zcore_file) { + debugfs_remove(zcore_dir); + rc = -ENOMEM; + goto fail; + } + hsa_available = 1; + return 0; + +fail: + diag308(DIAG308_REL_HSA, NULL); + return rc; +} + +extern void sdias_exit(void); + +static void __exit zcore_exit(void) +{ + debug_unregister(zcore_dbf); + sdias_exit(); + diag308(DIAG308_REL_HSA, NULL); +} + +MODULE_AUTHOR("Copyright IBM Corp. 2003,2007"); +MODULE_DESCRIPTION("zcore module for zfcpdump support"); +MODULE_LICENSE("GPL"); + +subsys_initcall(zcore_init); +module_exit(zcore_exit); diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index c490c2a1c2f..cfaf77b320f 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -2,7 +2,7 @@ # Makefile for the S/390 common i/o drivers # -obj-y += airq.o blacklist.o chsc.o cio.o css.o +obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o ccw_device-objs += device.o device_fsm.o device_ops.o ccw_device-objs += device_id.o device_pgid.o device_status.o obj-y += ccw_device.o cmf.o diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 5aeb68e732b..e5ccda63e88 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + mutex_lock(&gdev->reg_mutex); __ccwgroup_remove_symlinks(gdev); device_unregister(dev); + mutex_unlock(&gdev->reg_mutex); } static ssize_t @@ -173,7 +175,8 @@ ccwgroup_create(struct device *root, return -ENOMEM; atomic_set(&gdev->onoff, 0); - + mutex_init(&gdev->reg_mutex); + mutex_lock(&gdev->reg_mutex); for (i = 0; i < argc; i++) { gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); @@ -183,12 +186,12 @@ ccwgroup_create(struct device *root, || gdev->cdev[i]->id.driver_info != gdev->cdev[0]->id.driver_info) { rc = -EINVAL; - goto free_dev; + goto error; } /* Don't allow a device to belong to more than one group. */ if (gdev->cdev[i]->dev.driver_data) { rc = -EINVAL; - goto free_dev; + goto error; } gdev->cdev[i]->dev.driver_data = gdev; } @@ -203,9 +206,8 @@ ccwgroup_create(struct device *root, gdev->cdev[0]->dev.bus_id); rc = device_register(&gdev->dev); - if (rc) - goto free_dev; + goto error; get_device(&gdev->dev); rc = device_create_file(&gdev->dev, &dev_attr_ungroup); @@ -216,6 +218,7 @@ ccwgroup_create(struct device *root, rc = __ccwgroup_create_symlinks(gdev); if (!rc) { + mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); return 0; } @@ -224,19 +227,12 @@ ccwgroup_create(struct device *root, error: for (i = 0; i < argc; i++) if (gdev->cdev[i]) { - put_device(&gdev->cdev[i]->dev); - gdev->cdev[i]->dev.driver_data = NULL; - } - put_device(&gdev->dev); - return rc; -free_dev: - for (i = 0; i < argc; i++) - if (gdev->cdev[i]) { if (gdev->cdev[i]->dev.driver_data == gdev) gdev->cdev[i]->dev.driver_data = NULL; put_device(&gdev->cdev[i]->dev); } - kfree(gdev); + mutex_unlock(&gdev->reg_mutex); + put_device(&gdev->dev); return rc; } @@ -422,8 +418,12 @@ ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver) get_driver(&cdriver->driver); while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, __ccwgroup_match_all))) { - __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + + mutex_lock(&gdev->reg_mutex); + __ccwgroup_remove_symlinks(gdev); device_unregister(dev); + mutex_unlock(&gdev->reg_mutex); put_device(dev); } put_driver(&cdriver->driver); @@ -444,8 +444,10 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) if (cdev->dev.driver_data) { gdev = (struct ccwgroup_device *)cdev->dev.driver_data; if (get_device(&gdev->dev)) { + mutex_lock(&gdev->reg_mutex); if (device_is_registered(&gdev->dev)) return gdev; + mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); } return NULL; @@ -465,6 +467,7 @@ ccwgroup_remove_ccwdev(struct ccw_device *cdev) if (gdev) { __ccwgroup_remove_symlinks(gdev); device_unregister(&gdev->dev); + mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); } } diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c new file mode 100644 index 00000000000..ac289e6eadf --- /dev/null +++ b/drivers/s390/cio/chp.c @@ -0,0 +1,683 @@ +/* + * drivers/s390/cio/chp.c + * + * Copyright IBM Corp. 1999,2007 + * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) + * Arnd Bergmann (arndb@de.ibm.com) + * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/bug.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/wait.h> +#include <linux/mutex.h> +#include <asm/errno.h> +#include <asm/chpid.h> +#include <asm/sclp.h> + +#include "cio.h" +#include "css.h" +#include "ioasm.h" +#include "cio_debug.h" +#include "chp.h" + +#define to_channelpath(device) container_of(device, struct channel_path, dev) +#define CHP_INFO_UPDATE_INTERVAL 1*HZ + +enum cfg_task_t { + cfg_none, + cfg_configure, + cfg_deconfigure +}; + +/* Map for pending configure tasks. */ +static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1]; +static DEFINE_MUTEX(cfg_lock); +static int cfg_busy; + +/* Map for channel-path status. */ +static struct sclp_chp_info chp_info; +static DEFINE_MUTEX(info_lock); + +/* Time after which channel-path status may be outdated. */ +static unsigned long chp_info_expires; + +/* Workqueue to perform pending configure tasks. */ +static struct workqueue_struct *chp_wq; +static struct work_struct cfg_work; + +/* Wait queue for configure completion events. */ +static wait_queue_head_t cfg_wait_queue; + +/* Return channel_path struct for given chpid. */ +static inline struct channel_path *chpid_to_chp(struct chp_id chpid) +{ + return css[chpid.cssid]->chps[chpid.id]; +} + +/* Set vary state for given chpid. */ +static void set_chp_logically_online(struct chp_id chpid, int onoff) +{ + chpid_to_chp(chpid)->state = onoff; +} + +/* On succes return 0 if channel-path is varied offline, 1 if it is varied + * online. Return -ENODEV if channel-path is not registered. */ +int chp_get_status(struct chp_id chpid) +{ + return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV); +} + +/** + * chp_get_sch_opm - return opm for subchannel + * @sch: subchannel + * + * Calculate and return the operational path mask (opm) based on the chpids + * used by the subchannel and the status of the associated channel-paths. + */ +u8 chp_get_sch_opm(struct subchannel *sch) +{ + struct chp_id chpid; + int opm; + int i; + + opm = 0; + chp_id_init(&chpid); + for (i=0; i < 8; i++) { + opm <<= 1; + chpid.id = sch->schib.pmcw.chpid[i]; + if (chp_get_status(chpid) != 0) + opm |= 1; + } + return opm; +} + +/** + * chp_is_registered - check if a channel-path is registered + * @chpid: channel-path ID + * + * Return non-zero if a channel-path with the given chpid is registered, + * zero otherwise. + */ +int chp_is_registered(struct chp_id chpid) +{ + return chpid_to_chp(chpid) != NULL; +} + +/* + * Function: s390_vary_chpid + * Varies the specified chpid online or offline + */ +static int s390_vary_chpid(struct chp_id chpid, int on) +{ + char dbf_text[15]; + int status; + + sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid, + chpid.id); + CIO_TRACE_EVENT( 2, dbf_text); + + status = chp_get_status(chpid); + if (status < 0) { + printk(KERN_ERR "Can't vary unknown chpid %x.%02x\n", + chpid.cssid, chpid.id); + return -EINVAL; + } + + if (!on && !status) { + printk(KERN_ERR "chpid %x.%02x is already offline\n", + chpid.cssid, chpid.id); + return -EINVAL; + } + + set_chp_logically_online(chpid, on); + chsc_chp_vary(chpid, on); + return 0; +} + +/* + * Channel measurement related functions + */ +static ssize_t chp_measurement_chars_read(struct kobject *kobj, char *buf, + loff_t off, size_t count) +{ + struct channel_path *chp; + unsigned int size; + + chp = to_channelpath(container_of(kobj, struct device, kobj)); + if (!chp->cmg_chars) + return 0; + + size = sizeof(struct cmg_chars); + + if (off > size) + return 0; + if (off + count > size) + count = size - off; + memcpy(buf, chp->cmg_chars + off, count); + return count; +} + +static struct bin_attribute chp_measurement_chars_attr = { + .attr = { + .name = "measurement_chars", + .mode = S_IRUSR, + .owner = THIS_MODULE, + }, + .size = sizeof(struct cmg_chars), + .read = chp_measurement_chars_read, +}; + +static void chp_measurement_copy_block(struct cmg_entry *buf, + struct channel_subsystem *css, + struct chp_id chpid) +{ + void *area; + struct cmg_entry *entry, reference_buf; + int idx; + + if (chpid.id < 128) { + area = css->cub_addr1; + idx = chpid.id; + } else { + area = css->cub_addr2; + idx = chpid.id - 128; + } + entry = area + (idx * sizeof(struct cmg_entry)); + do { + memcpy(buf, entry, sizeof(*entry)); + memcpy(&reference_buf, entry, sizeof(*entry)); + } while (reference_buf.values[0] != buf->values[0]); +} + +static ssize_t chp_measurement_read(struct kobject *kobj, char *buf, + loff_t off, size_t count) +{ + struct channel_path *chp; + struct channel_subsystem *css; + unsigned int size; + + chp = to_channelpath(container_of(kobj, struct device, kobj)); + css = to_css(chp->dev.parent); + + size = sizeof(struct cmg_entry); + + /* Only allow single reads. */ + if (off || count < size) + return 0; + chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid); + count = size; + return count; +} + +static struct bin_attribute chp_measurement_attr = { + .attr = { + .name = "measurement", + .mode = S_IRUSR, + .owner = THIS_MODULE, + }, + .size = sizeof(struct cmg_entry), + .read = chp_measurement_read, +}; + +void chp_remove_cmg_attr(struct channel_path *chp) +{ + device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); + device_remove_bin_file(&chp->dev, &chp_measurement_attr); +} + +int chp_add_cmg_attr(struct channel_path *chp) +{ + int ret; + + ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); + if (ret) + return ret; + ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); + if (ret) + device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); + return ret; +} + +/* + * Files for the channel path entries. + */ +static ssize_t chp_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = container_of(dev, struct channel_path, dev); + + if (!chp) + return 0; + return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") : + sprintf(buf, "offline\n")); +} + +static ssize_t chp_status_write(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct channel_path *cp = container_of(dev, struct channel_path, dev); + char cmd[10]; + int num_args; + int error; + + num_args = sscanf(buf, "%5s", cmd); + if (!num_args) + return count; + + if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) + error = s390_vary_chpid(cp->chpid, 1); + else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) + error = s390_vary_chpid(cp->chpid, 0); + else + error = -EINVAL; + + return error < 0 ? error : count; + +} + +static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); + +static ssize_t chp_configure_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *cp; + int status; + + cp = container_of(dev, struct channel_path, dev); + status = chp_info_get_status(cp->chpid); + if (status < 0) + return status; + + return snprintf(buf, PAGE_SIZE, "%d\n", status); +} + +static int cfg_wait_idle(void); + +static ssize_t chp_configure_write(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct channel_path *cp; + int val; + char delim; + + if (sscanf(buf, "%d %c", &val, &delim) != 1) + return -EINVAL; + if (val != 0 && val != 1) + return -EINVAL; + cp = container_of(dev, struct channel_path, dev); + chp_cfg_schedule(cp->chpid, val); + cfg_wait_idle(); + + return count; +} + +static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write); + +static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_path *chp = container_of(dev, struct channel_path, dev); + + if (!chp) + return 0; + return sprintf(buf, "%x\n", chp->desc.desc); +} + +static DEVICE_ATTR(type, 0444, chp_type_show, NULL); + +static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + + if (!chp) + return 0; + if (chp->cmg == -1) /* channel measurements not available */ + return sprintf(buf, "unknown\n"); + return sprintf(buf, "%x\n", chp->cmg); +} + +static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); + +static ssize_t chp_shared_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + + if (!chp) + return 0; + if (chp->shared == -1) /* channel measurements not available */ + return sprintf(buf, "unknown\n"); + return sprintf(buf, "%x\n", chp->shared); +} + +static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); + +static struct attribute * chp_attrs[] = { + &dev_attr_status.attr, + &dev_attr_configure.attr, + &dev_attr_type.attr, + &dev_attr_cmg.attr, + &dev_attr_shared.attr, + NULL, +}; + +static struct attribute_group chp_attr_group = { + .attrs = chp_attrs, +}; + +static void chp_release(struct device *dev) +{ + struct channel_path *cp; + + cp = container_of(dev, struct channel_path, dev); + kfree(cp); +} + +/** + * chp_new - register a new channel-path + * @chpid - channel-path ID + * + * Create and register data structure representing new channel-path. Return + * zero on success, non-zero otherwise. + */ +int chp_new(struct chp_id chpid) +{ + struct channel_path *chp; + int ret; + + if (chp_is_registered(chpid)) + return 0; + chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); + if (!chp) + return -ENOMEM; + + /* fill in status, etc. */ + chp->chpid = chpid; + chp->state = 1; + chp->dev.parent = &css[chpid.cssid]->device; + chp->dev.release = chp_release; + snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp%x.%02x", chpid.cssid, + chpid.id); + + /* Obtain channel path description and fill it in. */ + ret = chsc_determine_channel_path_description(chpid, &chp->desc); + if (ret) + goto out_free; + if ((chp->desc.flags & 0x80) == 0) { + ret = -ENODEV; + goto out_free; + } + /* Get channel-measurement characteristics. */ + if (css_characteristics_avail && css_chsc_characteristics.scmc + && css_chsc_characteristics.secm) { + ret = chsc_get_channel_measurement_chars(chp); + if (ret) + goto out_free; + } else { + static int msg_done; + + if (!msg_done) { + printk(KERN_WARNING "cio: Channel measurements not " + "available, continuing.\n"); + msg_done = 1; + } + chp->cmg = -1; + } + + /* make it known to the system */ + ret = device_register(&chp->dev); + if (ret) { + printk(KERN_WARNING "%s: could not register %x.%02x\n", + __func__, chpid.cssid, chpid.id); + goto out_free; + } + ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); + if (ret) { + device_unregister(&chp->dev); + goto out_free; + } + mutex_lock(&css[chpid.cssid]->mutex); + if (css[chpid.cssid]->cm_enabled) { + ret = chp_add_cmg_attr(chp); + if (ret) { + sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); + device_unregister(&chp->dev); + mutex_unlock(&css[chpid.cssid]->mutex); + goto out_free; + } + } + css[chpid.cssid]->chps[chpid.id] = chp; + mutex_unlock(&css[chpid.cssid]->mutex); + return ret; +out_free: + kfree(chp); + return ret; +} + +/** + * chp_get_chp_desc - return newly allocated channel-path description + * @chpid: channel-path ID + * + * On success return a newly allocated copy of the channel-path description + * data associated with the given channel-path ID. Return %NULL on error. + */ +void *chp_get_chp_desc(struct chp_id chpid) +{ + struct channel_path *chp; + struct channel_path_desc *desc; + + chp = chpid_to_chp(chpid); + if (!chp) + return NULL; + desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); + if (!desc) + return NULL; + memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); + return desc; +} + +/** + * chp_process_crw - process channel-path status change + * @id: channel-path ID number + * @status: non-zero if channel-path has become available, zero otherwise + * + * Handle channel-report-words indicating that the status of a channel-path + * has changed. + */ +void chp_process_crw(int id, int status) +{ + struct chp_id chpid; + + chp_id_init(&chpid); + chpid.id = id; + if (status) { + if (!chp_is_registered(chpid)) + chp_new(chpid); + chsc_chp_online(chpid); + } else + chsc_chp_offline(chpid); +} + +static inline int info_bit_num(struct chp_id id) +{ + return id.id + id.cssid * (__MAX_CHPID + 1); +} + +/* Force chp_info refresh on next call to info_validate(). */ +static void info_expire(void) +{ + mutex_lock(&info_lock); + chp_info_expires = jiffies - 1; + mutex_unlock(&info_lock); +} + +/* Ensure that chp_info is up-to-date. */ +static int info_update(void) +{ + int rc; + + mutex_lock(&info_lock); + rc = 0; + if (time_after(jiffies, chp_info_expires)) { + /* Data is too old, update. */ + rc = sclp_chp_read_info(&chp_info); + chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ; + } + mutex_unlock(&info_lock); + + return rc; +} + +/** + * chp_info_get_status - retrieve configure status of a channel-path + * @chpid: channel-path ID + * + * On success, return 0 for standby, 1 for configured, 2 for reserved, + * 3 for not recognized. Return negative error code on error. + */ +int chp_info_get_status(struct chp_id chpid) +{ + int rc; + int bit; + + rc = info_update(); + if (rc) + return rc; + + bit = info_bit_num(chpid); + mutex_lock(&info_lock); + if (!chp_test_bit(chp_info.recognized, bit)) + rc = CHP_STATUS_NOT_RECOGNIZED; + else if (chp_test_bit(chp_info.configured, bit)) + rc = CHP_STATUS_CONFIGURED; + else if (chp_test_bit(chp_info.standby, bit)) + rc = CHP_STATUS_STANDBY; + else + rc = CHP_STATUS_RESERVED; + mutex_unlock(&info_lock); + + return rc; +} + +/* Return configure task for chpid. */ +static enum cfg_task_t cfg_get_task(struct chp_id chpid) +{ + return chp_cfg_task[chpid.cssid][chpid.id]; +} + +/* Set configure task for chpid. */ +static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg) +{ + chp_cfg_task[chpid.cssid][chpid.id] = cfg; +} + +/* Perform one configure/deconfigure request. Reschedule work function until + * last request. */ +static void cfg_func(struct work_struct *work) +{ + struct chp_id chpid; + enum cfg_task_t t; + + mutex_lock(&cfg_lock); + t = cfg_none; + chp_id_for_each(&chpid) { + t = cfg_get_task(chpid); + if (t != cfg_none) { + cfg_set_task(chpid, cfg_none); + break; + } + } + mutex_unlock(&cfg_lock); + + switch (t) { + case cfg_configure: + sclp_chp_configure(chpid); + info_expire(); + chsc_chp_online(chpid); + break; + case cfg_deconfigure: + sclp_chp_deconfigure(chpid); + info_expire(); + chsc_chp_offline(chpid); + break; + case cfg_none: + /* Get updated information after last change. */ + info_update(); + mutex_lock(&cfg_lock); + cfg_busy = 0; + mutex_unlock(&cfg_lock); + wake_up_interruptible(&cfg_wait_queue); + return; + } + queue_work(chp_wq, &cfg_work); +} + +/** + * chp_cfg_schedule - schedule chpid configuration request + * @chpid - channel-path ID + * @configure - Non-zero for configure, zero for deconfigure + * + * Schedule a channel-path configuration/deconfiguration request. + */ +void chp_cfg_schedule(struct chp_id chpid, int configure) +{ + CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id, + configure); + mutex_lock(&cfg_lock); + cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure); + cfg_busy = 1; + mutex_unlock(&cfg_lock); + queue_work(chp_wq, &cfg_work); +} + +/** + * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request + * @chpid - channel-path ID + * + * Cancel an active channel-path deconfiguration request if it has not yet + * been performed. + */ +void chp_cfg_cancel_deconfigure(struct chp_id chpid) +{ + CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id); + mutex_lock(&cfg_lock); + if (cfg_get_task(chpid) == cfg_deconfigure) + cfg_set_task(chpid, cfg_none); + mutex_unlock(&cfg_lock); +} + +static int cfg_wait_idle(void) +{ + if (wait_event_interruptible(cfg_wait_queue, !cfg_busy)) + return -ERESTARTSYS; + return 0; +} + +static int __init chp_init(void) +{ + struct chp_id chpid; + + chp_wq = create_singlethread_workqueue("cio_chp"); + if (!chp_wq) + return -ENOMEM; + INIT_WORK(&cfg_work, cfg_func); + init_waitqueue_head(&cfg_wait_queue); + if (info_update()) + return 0; + /* Register available channel-paths. */ + chp_id_for_each(&chpid) { + if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED) + chp_new(chpid); + } + + return 0; +} + +subsys_initcall(chp_init); diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h new file mode 100644 index 00000000000..65286563c59 --- /dev/null +++ b/drivers/s390/cio/chp.h @@ -0,0 +1,53 @@ +/* + * drivers/s390/cio/chp.h + * + * Copyright IBM Corp. 2007 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#ifndef S390_CHP_H +#define S390_CHP_H S390_CHP_H + +#include <linux/types.h> +#include <linux/device.h> +#include <asm/chpid.h> +#include "chsc.h" + +#define CHP_STATUS_STANDBY 0 +#define CHP_STATUS_CONFIGURED 1 +#define CHP_STATUS_RESERVED 2 +#define CHP_STATUS_NOT_RECOGNIZED 3 + +static inline int chp_test_bit(u8 *bitmap, int num) +{ + int byte = num >> 3; + int mask = 128 >> (num & 7); + + return (bitmap[byte] & mask) ? 1 : 0; +} + + +struct channel_path { + struct chp_id chpid; + int state; + struct channel_path_desc desc; + /* Channel-measurement related stuff: */ + int cmg; + int shared; + void *cmg_chars; + struct device dev; +}; + +int chp_get_status(struct chp_id chpid); +u8 chp_get_sch_opm(struct subchannel *sch); +int chp_is_registered(struct chp_id chpid); +void *chp_get_chp_desc(struct chp_id chpid); +void chp_process_crw(int id, int available); +void chp_remove_cmg_attr(struct channel_path *chp); +int chp_add_cmg_attr(struct channel_path *chp); +int chp_new(struct chp_id chpid); +void chp_cfg_schedule(struct chp_id chpid, int configure); +void chp_cfg_cancel_deconfigure(struct chp_id chpid); +int chp_info_get_status(struct chp_id chpid); + +#endif /* S390_CHP_H */ diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 6f05a44e381..ea92ac4d657 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -15,202 +15,124 @@ #include <linux/device.h> #include <asm/cio.h> +#include <asm/chpid.h> #include "css.h" #include "cio.h" #include "cio_debug.h" #include "ioasm.h" +#include "chp.h" #include "chsc.h" static void *sei_page; -static int new_channel_path(int chpid); - -static inline void -set_chp_logically_online(int chp, int onoff) -{ - css[0]->chps[chp]->state = onoff; -} - -static int -get_chp_status(int chp) -{ - return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); -} - -void -chsc_validate_chpids(struct subchannel *sch) -{ - int mask, chp; - - for (chp = 0; chp <= 7; chp++) { - mask = 0x80 >> chp; - if (!get_chp_status(sch->schib.pmcw.chpid[chp])) - /* disable using this path */ - sch->opm &= ~mask; - } -} - -void -chpid_is_actually_online(int chp) -{ - int state; - - state = get_chp_status(chp); - if (state < 0) { - need_rescan = 1; - queue_work(slow_path_wq, &slow_path_work); - } else - WARN_ON(!state); -} +struct chsc_ssd_area { + struct chsc_header request; + u16 :10; + u16 ssid:2; + u16 :4; + u16 f_sch; /* first subchannel */ + u16 :16; + u16 l_sch; /* last subchannel */ + u32 :32; + struct chsc_header response; + u32 :32; + u8 sch_valid : 1; + u8 dev_valid : 1; + u8 st : 3; /* subchannel type */ + u8 zeroes : 3; + u8 unit_addr; /* unit address */ + u16 devno; /* device number */ + u8 path_mask; + u8 fla_valid_mask; + u16 sch; /* subchannel */ + u8 chpid[8]; /* chpids 0-7 */ + u16 fla[8]; /* full link addresses 0-7 */ +} __attribute__ ((packed)); -/* FIXME: this is _always_ called for every subchannel. shouldn't we - * process more than one at a time? */ -static int -chsc_get_sch_desc_irq(struct subchannel *sch, void *page) +int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) { - int ccode, j; - - struct { - struct chsc_header request; - u16 reserved1a:10; - u16 ssid:2; - u16 reserved1b:4; - u16 f_sch; /* first subchannel */ - u16 reserved2; - u16 l_sch; /* last subchannel */ - u32 reserved3; - struct chsc_header response; - u32 reserved4; - u8 sch_valid : 1; - u8 dev_valid : 1; - u8 st : 3; /* subchannel type */ - u8 zeroes : 3; - u8 unit_addr; /* unit address */ - u16 devno; /* device number */ - u8 path_mask; - u8 fla_valid_mask; - u16 sch; /* subchannel */ - u8 chpid[8]; /* chpids 0-7 */ - u16 fla[8]; /* full link addresses 0-7 */ - } __attribute__ ((packed)) *ssd_area; - - ssd_area = page; + unsigned long page; + struct chsc_ssd_area *ssd_area; + int ccode; + int ret; + int i; + int mask; + page = get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!page) + return -ENOMEM; + ssd_area = (struct chsc_ssd_area *) page; ssd_area->request.length = 0x0010; ssd_area->request.code = 0x0004; - - ssd_area->ssid = sch->schid.ssid; - ssd_area->f_sch = sch->schid.sch_no; - ssd_area->l_sch = sch->schid.sch_no; + ssd_area->ssid = schid.ssid; + ssd_area->f_sch = schid.sch_no; + ssd_area->l_sch = schid.sch_no; ccode = chsc(ssd_area); + /* Check response. */ if (ccode > 0) { - pr_debug("chsc returned with ccode = %d\n", ccode); - return (ccode == 3) ? -ENODEV : -EBUSY; + ret = (ccode == 3) ? -ENODEV : -EBUSY; + goto out_free; } - - switch (ssd_area->response.code) { - case 0x0001: /* everything ok */ - break; - case 0x0002: - CIO_CRW_EVENT(2, "Invalid command!\n"); - return -EINVAL; - case 0x0003: - CIO_CRW_EVENT(2, "Error in chsc request block!\n"); - return -EINVAL; - case 0x0004: - CIO_CRW_EVENT(2, "Model does not provide ssd\n"); - return -EOPNOTSUPP; - default: - CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", + if (ssd_area->response.code != 0x0001) { + CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", + schid.ssid, schid.sch_no, ssd_area->response.code); - return -EIO; + ret = -EIO; + goto out_free; } - - /* - * ssd_area->st stores the type of the detected - * subchannel, with the following definitions: - * - * 0: I/O subchannel: All fields have meaning - * 1: CHSC subchannel: Only sch_val, st and sch - * have meaning - * 2: Message subchannel: All fields except unit_addr - * have meaning - * 3: ADM subchannel: Only sch_val, st and sch - * have meaning - * - * Other types are currently undefined. - */ - if (ssd_area->st > 3) { /* uhm, that looks strange... */ - CIO_CRW_EVENT(0, "Strange subchannel type %d" - " for sch 0.%x.%04x\n", ssd_area->st, - sch->schid.ssid, sch->schid.sch_no); - /* - * There may have been a new subchannel type defined in the - * time since this code was written; since we don't know which - * fields have meaning and what to do with it we just jump out - */ - return 0; - } else { - const char *type[4] = {"I/O", "chsc", "message", "ADM"}; - CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", - sch->schid.ssid, sch->schid.sch_no, - type[ssd_area->st]); - - sch->ssd_info.valid = 1; - sch->ssd_info.type = ssd_area->st; + if (!ssd_area->sch_valid) { + ret = -ENODEV; + goto out_free; } - - if (ssd_area->st == 0 || ssd_area->st == 2) { - for (j = 0; j < 8; j++) { - if (!((0x80 >> j) & ssd_area->path_mask & - ssd_area->fla_valid_mask)) - continue; - sch->ssd_info.chpid[j] = ssd_area->chpid[j]; - sch->ssd_info.fla[j] = ssd_area->fla[j]; + /* Copy data */ + ret = 0; + memset(ssd, 0, sizeof(struct chsc_ssd_info)); + if ((ssd_area->st != 0) && (ssd_area->st != 2)) + goto out_free; + ssd->path_mask = ssd_area->path_mask; + ssd->fla_valid_mask = ssd_area->fla_valid_mask; + for (i = 0; i < 8; i++) { + mask = 0x80 >> i; + if (ssd_area->path_mask & mask) { + chp_id_init(&ssd->chpid[i]); + ssd->chpid[i].id = ssd_area->chpid[i]; } + if (ssd_area->fla_valid_mask & mask) + ssd->fla[i] = ssd_area->fla[i]; } - return 0; +out_free: + free_page(page); + return ret; } -int -css_get_ssd_info(struct subchannel *sch) +static int check_for_io_on_path(struct subchannel *sch, int mask) { - int ret; - void *page; + int cc; - page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!page) - return -ENOMEM; - spin_lock_irq(sch->lock); - ret = chsc_get_sch_desc_irq(sch, page); - if (ret) { - static int cio_chsc_err_msg; - - if (!cio_chsc_err_msg) { - printk(KERN_ERR - "chsc_get_sch_descriptions:" - " Error %d while doing chsc; " - "processing some machine checks may " - "not work\n", ret); - cio_chsc_err_msg = 1; - } - } - spin_unlock_irq(sch->lock); - free_page((unsigned long)page); - if (!ret) { - int j, chpid, mask; - /* Allocate channel path structures, if needed. */ - for (j = 0; j < 8; j++) { - mask = 0x80 >> j; - chpid = sch->ssd_info.chpid[j]; - if ((sch->schib.pmcw.pim & mask) && - (get_chp_status(chpid) < 0)) - new_channel_path(chpid); - } + cc = stsch(sch->schid, &sch->schib); + if (cc) + return 0; + if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask) + return 1; + return 0; +} + +static void terminate_internal_io(struct subchannel *sch) +{ + if (cio_clear(sch)) { + /* Recheck device in case clear failed. */ + sch->lpm = 0; + if (device_trigger_verify(sch) != 0) + css_schedule_eval(sch->schid); + return; } - return ret; + /* Request retry of internal operation. */ + device_set_intretry(sch); + /* Call handler. */ + if (sch->driver && sch->driver->termination) + sch->driver->termination(&sch->dev); } static int @@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) int j; int mask; struct subchannel *sch; - struct channel_path *chpid; + struct chp_id *chpid; struct schib schib; sch = to_subchannel(dev); @@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) if (sch->schib.pmcw.pim == 0x80) goto out_unreg; - if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && - (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && - (sch->schib.pmcw.lpum == mask)) { - int cc; - - cc = cio_clear(sch); - if (cc == -ENODEV) + if (check_for_io_on_path(sch, mask)) { + if (device_is_online(sch)) + device_kill_io(sch); + else { + terminate_internal_io(sch); + /* Re-start path verification. */ + if (sch->driver && sch->driver->verify) + sch->driver->verify(&sch->dev); + } + } else { + /* trigger path verification. */ + if (sch->driver && sch->driver->verify) + sch->driver->verify(&sch->dev); + else if (sch->lpm == mask) goto out_unreg; - /* Request retry of internal operation. */ - device_set_intretry(sch); - /* Call handler. */ - if (sch->driver && sch->driver->termination) - sch->driver->termination(&sch->dev); - goto out_unlock; } - /* trigger path verification. */ - if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); - else if (sch->lpm == mask) - goto out_unreg; -out_unlock: spin_unlock_irq(sch->lock); return 0; + out_unreg: - spin_unlock_irq(sch->lock); sch->lpm = 0; - if (css_enqueue_subchannel_slow(sch->schid)) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } + spin_unlock_irq(sch->lock); + css_schedule_eval(sch->schid); return 0; } -static void -s390_set_chpid_offline( __u8 chpid) +void chsc_chp_offline(struct chp_id chpid) { char dbf_txt[15]; - struct device *dev; - sprintf(dbf_txt, "chpr%x", chpid); + sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); CIO_TRACE_EVENT(2, dbf_txt); - if (get_chp_status(chpid) <= 0) + if (chp_get_status(chpid) <= 0) return; - dev = get_device(&css[0]->chps[chpid]->dev); - bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev), + bus_for_each_dev(&css_bus_type, NULL, &chpid, s390_subchannel_remove_chpid); - - if (need_rescan || css_slow_subchannels_exist()) - queue_work(slow_path_wq, &slow_path_work); - put_device(dev); -} - -struct res_acc_data { - struct channel_path *chp; - u32 fla_mask; - u16 fla; -}; - -static int -s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) -{ - int found; - int chp; - int ccode; - - found = 0; - for (chp = 0; chp <= 7; chp++) - /* - * check if chpid is in information updated by ssd - */ - if (sch->ssd_info.valid && - sch->ssd_info.chpid[chp] == res_data->chp->id && - (sch->ssd_info.fla[chp] & res_data->fla_mask) - == res_data->fla) { - found = 1; - break; - } - - if (found == 0) - return 0; - - /* - * Do a stsch to update our subchannel structure with the - * new path information and eventually check for logically - * offline chpids. - */ - ccode = stsch(sch->schid, &sch->schib); - if (ccode > 0) - return 0; - - return 0x80 >> chp; } static int s390_process_res_acc_new_sch(struct subchannel_id schid) { struct schib schib; - int ret; /* * We don't know the device yet, but since a path * may be available now to the device we'll have @@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid) */ if (stsch_err(schid, &schib)) /* We're through */ - return need_rescan ? -EAGAIN : -ENXIO; + return -ENXIO; /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(schid); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - return -EAGAIN; + css_schedule_eval(schid); + return 0; +} + +struct res_acc_data { + struct chp_id chpid; + u32 fla_mask; + u16 fla; +}; + +static int get_res_chpid_mask(struct chsc_ssd_info *ssd, + struct res_acc_data *data) +{ + int i; + int mask; + + for (i = 0; i < 8; i++) { + mask = 0x80 >> i; + if (!(ssd->path_mask & mask)) + continue; + if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid)) + continue; + if ((ssd->fla_valid_mask & mask) && + ((ssd->fla[i] & data->fla_mask) != data->fla)) + continue; + return mask; } return 0; } @@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) return s390_process_res_acc_new_sch(schid); spin_lock_irq(sch->lock); - - chp_mask = s390_process_res_acc_sch(res_data, sch); - - if (chp_mask == 0) { - spin_unlock_irq(sch->lock); - put_device(&sch->dev); - return 0; - } + chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); + if (chp_mask == 0) + goto out; + if (stsch(sch->schid, &sch->schib)) + goto out; old_lpm = sch->lpm; sch->lpm = ((sch->schib.pmcw.pim & sch->schib.pmcw.pam & @@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) device_trigger_reprobe(sch); else if (sch->driver && sch->driver->verify) sch->driver->verify(&sch->dev); - +out: spin_unlock_irq(sch->lock); put_device(&sch->dev); return 0; } - -static int -s390_process_res_acc (struct res_acc_data *res_data) +static void s390_process_res_acc (struct res_acc_data *res_data) { - int rc; char dbf_txt[15]; - sprintf(dbf_txt, "accpr%x", res_data->chp->id); + sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, + res_data->chpid.id); CIO_TRACE_EVENT( 2, dbf_txt); if (res_data->fla != 0) { sprintf(dbf_txt, "fla%x", res_data->fla); @@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data) * The more information we have (info), the less scanning * will we have to do. */ - rc = for_each_subchannel(__s390_process_res_acc, res_data); - if (css_slow_subchannels_exist()) - rc = -EAGAIN; - else if (rc != -EAGAIN) - rc = 0; - return rc; + for_each_subchannel(__s390_process_res_acc, res_data); } static int @@ -480,43 +357,45 @@ struct chsc_sei_area { /* ccdf has to be big enough for a link-incident record */ } __attribute__ ((packed)); -static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) +static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) { - int chpid; + struct chp_id chpid; + int id; CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", sei_area->rs, sei_area->rsid); if (sei_area->rs != 4) - return 0; - chpid = __get_chpid_from_lir(sei_area->ccdf); - if (chpid < 0) + return; + id = __get_chpid_from_lir(sei_area->ccdf); + if (id < 0) CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); - else - s390_set_chpid_offline(chpid); - - return 0; + else { + chp_id_init(&chpid); + chpid.id = id; + chsc_chp_offline(chpid); + } } -static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) +static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) { struct res_acc_data res_data; - struct device *dev; + struct chp_id chpid; int status; - int rc; CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); if (sei_area->rs != 4) - return 0; + return; + chp_id_init(&chpid); + chpid.id = sei_area->rsid; /* allocate a new channel path structure, if needed */ - status = get_chp_status(sei_area->rsid); + status = chp_get_status(chpid); if (status < 0) - new_channel_path(sei_area->rsid); + chp_new(chpid); else if (!status) - return 0; - dev = get_device(&css[0]->chps[sei_area->rsid]->dev); + return; memset(&res_data, 0, sizeof(struct res_acc_data)); - res_data.chp = to_channelpath(dev); + res_data.chpid = chpid; if ((sei_area->vf & 0xc0) != 0) { res_data.fla = sei_area->fla; if ((sei_area->vf & 0xc0) == 0xc0) @@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) /* link address */ res_data.fla_mask = 0xff00; } - rc = s390_process_res_acc(&res_data); - put_device(dev); - - return rc; + s390_process_res_acc(&res_data); } -static int chsc_process_sei(struct chsc_sei_area *sei_area) +struct chp_config_data { + u8 map[32]; + u8 op; + u8 pc; +}; + +static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) { - int rc; + struct chp_config_data *data; + struct chp_id chpid; + int num; + + CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); + if (sei_area->rs != 0) + return; + data = (struct chp_config_data *) &(sei_area->ccdf); + chp_id_init(&chpid); + for (num = 0; num <= __MAX_CHPID; num++) { + if (!chp_test_bit(data->map, num)) + continue; + chpid.id = num; + printk(KERN_WARNING "cio: processing configure event %d for " + "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id); + switch (data->op) { + case 0: + chp_cfg_schedule(chpid, 1); + break; + case 1: + chp_cfg_schedule(chpid, 0); + break; + case 2: + chp_cfg_cancel_deconfigure(chpid); + break; + } + } +} +static void chsc_process_sei(struct chsc_sei_area *sei_area) +{ /* Check if we might have lost some information. */ - if (sei_area->flags & 0x40) + if (sei_area->flags & 0x40) { CIO_CRW_EVENT(2, "chsc: event overflow\n"); + css_schedule_eval_all(); + } /* which kind of information was stored? */ - rc = 0; switch (sei_area->cc) { case 1: /* link incident*/ - rc = chsc_process_sei_link_incident(sei_area); + chsc_process_sei_link_incident(sei_area); break; case 2: /* i/o resource accessibiliy */ - rc = chsc_process_sei_res_acc(sei_area); + chsc_process_sei_res_acc(sei_area); + break; + case 8: /* channel-path-configuration notification */ + chsc_process_sei_chp_config(sei_area); break; default: /* other stuff */ CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", sei_area->cc); break; } - - return rc; } -int chsc_process_crw(void) +void chsc_process_crw(void) { struct chsc_sei_area *sei_area; - int ret; - int rc; if (!sei_page) - return 0; + return; /* Access to sei_page is serialized through machine check handler * thread, so no need for locking. */ sei_area = sei_page; CIO_TRACE_EVENT( 2, "prcss"); - ret = 0; do { memset(sei_area, 0, sizeof(*sei_area)); sei_area->request.length = 0x0010; @@ -580,37 +490,26 @@ int chsc_process_crw(void) if (sei_area->response.code == 0x0001) { CIO_CRW_EVENT(4, "chsc: sei successful\n"); - rc = chsc_process_sei(sei_area); - if (rc) - ret = rc; + chsc_process_sei(sei_area); } else { CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", sei_area->response.code); - ret = 0; break; } } while (sei_area->flags & 0x80); - - return ret; } static int __chp_add_new_sch(struct subchannel_id schid) { struct schib schib; - int ret; if (stsch_err(schid, &schib)) /* We're through */ - return need_rescan ? -EAGAIN : -ENXIO; + return -ENXIO; /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(schid); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - return -EAGAIN; - } + css_schedule_eval(schid); return 0; } @@ -619,10 +518,10 @@ static int __chp_add(struct subchannel_id schid, void *data) { int i, mask; - struct channel_path *chp; + struct chp_id *chpid; struct subchannel *sch; - chp = data; + chpid = data; sch = get_subchannel_by_schid(schid); if (!sch) /* Check if the subchannel is now available. */ @@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data) for (i=0; i<8; i++) { mask = 0x80 >> i; if ((sch->schib.pmcw.pim & mask) && - (sch->schib.pmcw.chpid[i] == chp->id)) { + (sch->schib.pmcw.chpid[i] == chpid->id)) { if (stsch(sch->schid, &sch->schib) != 0) { /* Endgame. */ spin_unlock_irq(sch->lock); @@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data) return 0; } -static int -chp_add(int chpid) +void chsc_chp_online(struct chp_id chpid) { - int rc; char dbf_txt[15]; - struct device *dev; - if (!get_chp_status(chpid)) - return 0; /* no need to do the rest */ - - sprintf(dbf_txt, "cadd%x", chpid); + sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); CIO_TRACE_EVENT(2, dbf_txt); - dev = get_device(&css[0]->chps[chpid]->dev); - rc = for_each_subchannel(__chp_add, to_channelpath(dev)); - if (css_slow_subchannels_exist()) - rc = -EAGAIN; - if (rc != -EAGAIN) - rc = 0; - put_device(dev); - return rc; + if (chp_get_status(chpid) != 0) + for_each_subchannel(__chp_add, &chpid); } -/* - * Handling of crw machine checks with channel path source. - */ -int -chp_process_crw(int chpid, int on) -{ - if (on == 0) { - /* Path has gone. We use the link incident routine.*/ - s390_set_chpid_offline(chpid); - return 0; /* De-register is async anyway. */ - } - /* - * Path has come. Allocate a new channel path structure, - * if needed. - */ - if (get_chp_status(chpid) < 0) - new_channel_path(chpid); - /* Avoid the extra overhead in process_rec_acc. */ - return chp_add(chpid); -} - -static int check_for_io_on_path(struct subchannel *sch, int index) -{ - int cc; - - cc = stsch(sch->schid, &sch->schib); - if (cc) - return 0; - if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) - return 1; - return 0; -} - -static void terminate_internal_io(struct subchannel *sch) -{ - if (cio_clear(sch)) { - /* Recheck device in case clear failed. */ - sch->lpm = 0; - if (device_trigger_verify(sch) != 0) { - if(css_enqueue_subchannel_slow(sch->schid)) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - } - return; - } - /* Request retry of internal operation. */ - device_set_intretry(sch); - /* Call handler. */ - if (sch->driver && sch->driver->termination) - sch->driver->termination(&sch->dev); -} - -static void -__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) +static void __s390_subchannel_vary_chpid(struct subchannel *sch, + struct chp_id chpid, int on) { int chp, old_lpm; + int mask; unsigned long flags; - if (!sch->ssd_info.valid) - return; - spin_lock_irqsave(sch->lock, flags); old_lpm = sch->lpm; for (chp = 0; chp < 8; chp++) { - if (sch->ssd_info.chpid[chp] != chpid) + mask = 0x80 >> chp; + if (!(sch->ssd_info.path_mask & mask)) + continue; + if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid)) continue; if (on) { - sch->opm |= (0x80 >> chp); - sch->lpm |= (0x80 >> chp); + sch->opm |= mask; + sch->lpm |= mask; if (!old_lpm) device_trigger_reprobe(sch); else if (sch->driver && sch->driver->verify) sch->driver->verify(&sch->dev); break; } - sch->opm &= ~(0x80 >> chp); - sch->lpm &= ~(0x80 >> chp); - if (check_for_io_on_path(sch, chp)) { + sch->opm &= ~mask; + sch->lpm &= ~mask; + if (check_for_io_on_path(sch, mask)) { if (device_is_online(sch)) /* Path verification is done after killing. */ device_kill_io(sch); - else + else { /* Kill and retry internal I/O. */ terminate_internal_io(sch); - } else if (!sch->lpm) { - if (device_trigger_verify(sch) != 0) { - if (css_enqueue_subchannel_slow(sch->schid)) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } + /* Re-start path verification. */ + if (sch->driver && sch->driver->verify) + sch->driver->verify(&sch->dev); } + } else if (!sch->lpm) { + if (device_trigger_verify(sch) != 0) + css_schedule_eval(sch->schid); } else if (sch->driver && sch->driver->verify) sch->driver->verify(&sch->dev); break; @@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) spin_unlock_irqrestore(sch->lock, flags); } -static int -s390_subchannel_vary_chpid_off(struct device *dev, void *data) +static int s390_subchannel_vary_chpid_off(struct device *dev, void *data) { struct subchannel *sch; - __u8 *chpid; + struct chp_id *chpid; sch = to_subchannel(dev); chpid = data; @@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data) return 0; } -static int -s390_subchannel_vary_chpid_on(struct device *dev, void *data) +static int s390_subchannel_vary_chpid_on(struct device *dev, void *data) { struct subchannel *sch; - __u8 *chpid; + struct chp_id *chpid; sch = to_subchannel(dev); chpid = data; @@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) /* We're through */ return -ENXIO; /* Put it on the slow path. */ - if (css_enqueue_subchannel_slow(schid)) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - return -EAGAIN; - } + css_schedule_eval(schid); return 0; } -/* - * Function: s390_vary_chpid - * Varies the specified chpid online or offline +/** + * chsc_chp_vary - propagate channel-path vary operation to subchannels + * @chpid: channl-path ID + * @on: non-zero for vary online, zero for vary offline */ -static int -s390_vary_chpid( __u8 chpid, int on) +int chsc_chp_vary(struct chp_id chpid, int on) { - char dbf_text[15]; - int status; - - sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); - CIO_TRACE_EVENT( 2, dbf_text); - - status = get_chp_status(chpid); - if (status < 0) { - printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); - return -EINVAL; - } - - if (!on && !status) { - printk(KERN_ERR "chpid %x is already offline\n", chpid); - return -EINVAL; - } - - set_chp_logically_online(chpid, on); - /* * Redo PathVerification on the devices the chpid connects to */ @@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on) if (on) /* Scan for new devices on varied on path. */ for_each_subchannel(__s390_vary_chpid_on, NULL); - if (need_rescan || css_slow_subchannels_exist()) - queue_work(slow_path_wq, &slow_path_work); return 0; } -/* - * Channel measurement related functions - */ -static ssize_t -chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off, - size_t count) -{ - struct channel_path *chp; - unsigned int size; - - chp = to_channelpath(container_of(kobj, struct device, kobj)); - if (!chp->cmg_chars) - return 0; - - size = sizeof(struct cmg_chars); - - if (off > size) - return 0; - if (off + count > size) - count = size - off; - memcpy(buf, chp->cmg_chars + off, count); - return count; -} - -static struct bin_attribute chp_measurement_chars_attr = { - .attr = { - .name = "measurement_chars", - .mode = S_IRUSR, - .owner = THIS_MODULE, - }, - .size = sizeof(struct cmg_chars), - .read = chp_measurement_chars_read, -}; - -static void -chp_measurement_copy_block(struct cmg_entry *buf, - struct channel_subsystem *css, int chpid) -{ - void *area; - struct cmg_entry *entry, reference_buf; - int idx; - - if (chpid < 128) { - area = css->cub_addr1; - idx = chpid; - } else { - area = css->cub_addr2; - idx = chpid - 128; - } - entry = area + (idx * sizeof(struct cmg_entry)); - do { - memcpy(buf, entry, sizeof(*entry)); - memcpy(&reference_buf, entry, sizeof(*entry)); - } while (reference_buf.values[0] != buf->values[0]); -} - -static ssize_t -chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) -{ - struct channel_path *chp; - struct channel_subsystem *css; - unsigned int size; - - chp = to_channelpath(container_of(kobj, struct device, kobj)); - css = to_css(chp->dev.parent); - - size = sizeof(struct cmg_entry); - - /* Only allow single reads. */ - if (off || count < size) - return 0; - chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); - count = size; - return count; -} - -static struct bin_attribute chp_measurement_attr = { - .attr = { - .name = "measurement", - .mode = S_IRUSR, - .owner = THIS_MODULE, - }, - .size = sizeof(struct cmg_entry), - .read = chp_measurement_read, -}; - -static void -chsc_remove_chp_cmg_attr(struct channel_path *chp) -{ - device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); - device_remove_bin_file(&chp->dev, &chp_measurement_attr); -} - -static int -chsc_add_chp_cmg_attr(struct channel_path *chp) -{ - int ret; - - ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); - if (ret) - return ret; - ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); - if (ret) - device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); - return ret; -} - static void chsc_remove_cmg_attr(struct channel_subsystem *css) { @@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css) for (i = 0; i <= __MAX_CHPID; i++) { if (!css->chps[i]) continue; - chsc_remove_chp_cmg_attr(css->chps[i]); + chp_remove_cmg_attr(css->chps[i]); } } @@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css) for (i = 0; i <= __MAX_CHPID; i++) { if (!css->chps[i]) continue; - ret = chsc_add_chp_cmg_attr(css->chps[i]); + ret = chp_add_cmg_attr(css->chps[i]); if (ret) goto cleanup; } @@ -1007,12 +708,11 @@ cleanup: for (--i; i >= 0; i--) { if (!css->chps[i]) continue; - chsc_remove_chp_cmg_attr(css->chps[i]); + chp_remove_cmg_attr(css->chps[i]); } return ret; } - static int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) { @@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable) } else chsc_remove_cmg_attr(css); } - if (enable && !css->cm_enabled) { + if (!css->cm_enabled) { free_page((unsigned long)css->cub_addr1); free_page((unsigned long)css->cub_addr2); } @@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable) return ret; } -/* - * Files for the channel path entries. - */ -static ssize_t -chp_status_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct channel_path *chp = container_of(dev, struct channel_path, dev); - - if (!chp) - return 0; - return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : - sprintf(buf, "offline\n")); -} - -static ssize_t -chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct channel_path *cp = container_of(dev, struct channel_path, dev); - char cmd[10]; - int num_args; - int error; - - num_args = sscanf(buf, "%5s", cmd); - if (!num_args) - return count; - - if (!strnicmp(cmd, "on", 2)) - error = s390_vary_chpid(cp->id, 1); - else if (!strnicmp(cmd, "off", 3)) - error = s390_vary_chpid(cp->id, 0); - else - error = -EINVAL; - - return error < 0 ? error : count; - -} - -static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); - -static ssize_t -chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct channel_path *chp = container_of(dev, struct channel_path, dev); - - if (!chp) - return 0; - return sprintf(buf, "%x\n", chp->desc.desc); -} - -static DEVICE_ATTR(type, 0444, chp_type_show, NULL); - -static ssize_t -chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct channel_path *chp = to_channelpath(dev); - - if (!chp) - return 0; - if (chp->cmg == -1) /* channel measurements not available */ - return sprintf(buf, "unknown\n"); - return sprintf(buf, "%x\n", chp->cmg); -} - -static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); - -static ssize_t -chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct channel_path *chp = to_channelpath(dev); - - if (!chp) - return 0; - if (chp->shared == -1) /* channel measurements not available */ - return sprintf(buf, "unknown\n"); - return sprintf(buf, "%x\n", chp->shared); -} - -static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); - -static struct attribute * chp_attrs[] = { - &dev_attr_status.attr, - &dev_attr_type.attr, - &dev_attr_cmg.attr, - &dev_attr_shared.attr, - NULL, -}; - -static struct attribute_group chp_attr_group = { - .attrs = chp_attrs, -}; - -static void -chp_release(struct device *dev) -{ - struct channel_path *cp; - - cp = container_of(dev, struct channel_path, dev); - kfree(cp); -} - -static int -chsc_determine_channel_path_description(int chpid, - struct channel_path_desc *desc) +int chsc_determine_channel_path_description(struct chp_id chpid, + struct channel_path_desc *desc) { int ccode, ret; @@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid, scpd_area->request.length = 0x0010; scpd_area->request.code = 0x0002; - scpd_area->first_chpid = chpid; - scpd_area->last_chpid = chpid; + scpd_area->first_chpid = chpid.id; + scpd_area->last_chpid = chpid.id; ccode = chsc(scpd_area); if (ccode > 0) { @@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, } } -static int -chsc_get_channel_measurement_chars(struct channel_path *chp) +int chsc_get_channel_measurement_chars(struct channel_path *chp) { int ccode, ret; @@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp) scmc_area->request.length = 0x0010; scmc_area->request.code = 0x0022; - scmc_area->first_chpid = chp->id; - scmc_area->last_chpid = chp->id; + scmc_area->first_chpid = chp->chpid.id; + scmc_area->last_chpid = chp->chpid.id; ccode = chsc(scmc_area); if (ccode > 0) { @@ -1392,94 +990,6 @@ out: return ret; } -/* - * Entries for chpids on the system bus. - * This replaces /proc/chpids. - */ -static int -new_channel_path(int chpid) -{ - struct channel_path *chp; - int ret; - - chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); - if (!chp) - return -ENOMEM; - - /* fill in status, etc. */ - chp->id = chpid; - chp->state = 1; - chp->dev.parent = &css[0]->device; - chp->dev.release = chp_release; - snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); - - /* Obtain channel path description and fill it in. */ - ret = chsc_determine_channel_path_description(chpid, &chp->desc); - if (ret) - goto out_free; - /* Get channel-measurement characteristics. */ - if (css_characteristics_avail && css_chsc_characteristics.scmc - && css_chsc_characteristics.secm) { - ret = chsc_get_channel_measurement_chars(chp); - if (ret) - goto out_free; - } else { - static int msg_done; - - if (!msg_done) { - printk(KERN_WARNING "cio: Channel measurements not " - "available, continuing.\n"); - msg_done = 1; - } - chp->cmg = -1; - } - - /* make it known to the system */ - ret = device_register(&chp->dev); - if (ret) { - printk(KERN_WARNING "%s: could not register %02x\n", - __func__, chpid); - goto out_free; - } - ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); - if (ret) { - device_unregister(&chp->dev); - goto out_free; - } - mutex_lock(&css[0]->mutex); - if (css[0]->cm_enabled) { - ret = chsc_add_chp_cmg_attr(chp); - if (ret) { - sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); - device_unregister(&chp->dev); - mutex_unlock(&css[0]->mutex); - goto out_free; - } - } - css[0]->chps[chpid] = chp; - mutex_unlock(&css[0]->mutex); - return ret; -out_free: - kfree(chp); - return ret; -} - -void * -chsc_get_chp_desc(struct subchannel *sch, int chp_no) -{ - struct channel_path *chp; - struct channel_path_desc *desc; - - chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]]; - if (!chp) - return NULL; - desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); - if (!desc) - return NULL; - memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); - return desc; -} - static int __init chsc_alloc_sei_area(void) { diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 0fb2b024208..2ad81d11cf7 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -1,9 +1,10 @@ #ifndef S390_CHSC_H #define S390_CHSC_H -#define CHSC_SEI_ACC_CHPID 1 -#define CHSC_SEI_ACC_LINKADDR 2 -#define CHSC_SEI_ACC_FULLLINKADDR 3 +#include <linux/types.h> +#include <linux/device.h> +#include <asm/chpid.h> +#include "schid.h" #define CHSC_SDA_OC_MSS 0x2 @@ -33,23 +34,9 @@ struct channel_path_desc { u8 chpp; } __attribute__ ((packed)); -struct channel_path { - int id; - int state; - struct channel_path_desc desc; - /* Channel-measurement related stuff: */ - int cmg; - int shared; - void *cmg_chars; - struct device dev; -}; +struct channel_path; -extern void s390_process_css( void ); -extern void chsc_validate_chpids(struct subchannel *); -extern void chpid_is_actually_online(int); -extern int css_get_ssd_info(struct subchannel *); -extern int chsc_process_crw(void); -extern int chp_process_crw(int, int); +extern void chsc_process_crw(void); struct css_general_char { u64 : 41; @@ -82,15 +69,26 @@ struct css_chsc_char { extern struct css_general_char css_general_characteristics; extern struct css_chsc_char css_chsc_characteristics; +struct chsc_ssd_info { + u8 path_mask; + u8 fla_valid_mask; + struct chp_id chpid[8]; + u16 fla[8]; +}; +extern int chsc_get_ssd_info(struct subchannel_id schid, + struct chsc_ssd_info *ssd); extern int chsc_determine_css_characteristics(void); extern int css_characteristics_avail; -extern void *chsc_get_chp_desc(struct subchannel*, int); - extern int chsc_enable_facility(int); struct channel_subsystem; extern int chsc_secm(struct channel_subsystem *, int); -#define to_channelpath(device) container_of(device, struct channel_path, dev) +int chsc_chp_vary(struct chp_id chpid, int on); +int chsc_determine_channel_path_description(struct chp_id chpid, + struct channel_path_desc *desc); +void chsc_chp_online(struct chp_id chpid); +void chsc_chp_offline(struct chp_id chpid); +int chsc_get_channel_measurement_chars(struct channel_path *chp); #endif diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 9cb129ab5be..ea1defba569 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -22,6 +22,7 @@ #include <asm/setup.h> #include <asm/reset.h> #include <asm/ipl.h> +#include <asm/chpid.h> #include "airq.h" #include "cio.h" #include "css.h" @@ -29,6 +30,7 @@ #include "ioasm.h" #include "blacklist.h" #include "cio_debug.h" +#include "chp.h" #include "../s390mach.h" debug_info_t *cio_debug_msg_id; @@ -592,9 +594,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) err = -ENODEV; goto out; } - sch->opm = 0xff; - if (!cio_is_console(sch->schid)) - chsc_validate_chpids(sch); + if (cio_is_console(sch->schid)) + sch->opm = 0xff; + else + sch->opm = chp_get_sch_opm(sch); sch->lpm = sch->schib.pmcw.pam & sch->opm; CIO_DEBUG(KERN_INFO, 0, @@ -954,6 +957,7 @@ static void css_reset(void) { int i, ret; unsigned long long timeout; + struct chp_id chpid; /* Reset subchannels. */ for_each_subchannel(__shutdown_subchannel_easy, NULL); @@ -963,8 +967,10 @@ static void css_reset(void) __ctl_set_bit(14, 28); /* Temporarily reenable machine checks. */ local_mcck_enable(); + chp_id_init(&chpid); for (i = 0; i <= __MAX_CHPID; i++) { - ret = rchp(i); + chpid.id = i; + ret = rchp(chpid); if ((ret == 0) || (ret == 2)) /* * rchp either succeeded, or another rchp is already @@ -1048,37 +1054,19 @@ void reipl_ccw_dev(struct ccw_dev_id *devid) do_reipl_asm(*((__u32*)&schid)); } -static struct schib __initdata ipl_schib; - -/* - * ipl_save_parameters gets called very early. It is not allowed to access - * anything in the bss section at all. The bss section is not cleared yet, - * but may contain some ipl parameters written by the firmware. - * These parameters (if present) are copied to 0x2000. - * To avoid corruption of the ipl parameters, all variables used by this - * function must reside on the stack or in the data section. - */ -void ipl_save_parameters(void) +int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) { struct subchannel_id schid; - unsigned int *ipl_ptr; - void *src, *dst; + struct schib schib; schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; if (!schid.one) - return; - if (stsch(schid, &ipl_schib)) - return; - if (!ipl_schib.pmcw.dnv) - return; - ipl_devno = ipl_schib.pmcw.dev; - ipl_flags |= IPL_DEVNO_VALID; - if (!ipl_schib.pmcw.qf) - return; - ipl_flags |= IPL_PARMBLOCK_VALID; - ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; - src = (void *)(unsigned long)*ipl_ptr; - dst = (void *)IPL_PARMBLOCK_ORIGIN; - memmove(dst, src, PAGE_SIZE); - *ipl_ptr = IPL_PARMBLOCK_ORIGIN; + return -ENODEV; + if (stsch(schid, &schib)) + return -ENODEV; + if (!schib.pmcw.dnv) + return -ENODEV; + iplinfo->devno = schib.pmcw.dev; + iplinfo->is_qdio = schib.pmcw.qf; + return 0; } diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 35154a21035..7446c39951a 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -1,18 +1,11 @@ #ifndef S390_CIO_H #define S390_CIO_H -#include "schid.h" #include <linux/mutex.h> - -/* - * where we put the ssd info - */ -struct ssd_info { - __u8 valid:1; - __u8 type:7; /* subchannel type */ - __u8 chpid[8]; /* chpids */ - __u16 fla[8]; /* full link addresses */ -} __attribute__ ((packed)); +#include <linux/device.h> +#include <asm/chpid.h> +#include "chsc.h" +#include "schid.h" /* * path management control word @@ -108,7 +101,7 @@ struct subchannel { struct schib schib; /* subchannel information block */ struct orb orb; /* operation request block */ struct ccw1 sense_ccw; /* static ccw for sense command */ - struct ssd_info ssd_info; /* subchannel description */ + struct chsc_ssd_info ssd_info; /* subchannel description */ struct device dev; /* entry in device tree */ struct css_driver *driver; } __attribute__ ((aligned(8))); diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 90b22faabbf..28abd697be1 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -476,7 +476,7 @@ struct cmb_area { }; static struct cmb_area cmb_area = { - .lock = SPIN_LOCK_UNLOCKED, + .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock), .list = LIST_HEAD_INIT(cmb_area.list), .num_channels = 1024, }; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fe0ace7aece..27c6d9e55b2 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -20,8 +20,9 @@ #include "ioasm.h" #include "chsc.h" #include "device.h" +#include "idset.h" +#include "chp.h" -int need_rescan = 0; int css_init_done = 0; static int need_reprobe = 0; static int max_ssid = 0; @@ -125,8 +126,52 @@ void css_sch_device_unregister(struct subchannel *sch) mutex_unlock(&sch->reg_mutex); } -static int -css_register_subchannel(struct subchannel *sch) +static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) +{ + int i; + int mask; + + memset(ssd, 0, sizeof(struct chsc_ssd_info)); + ssd->path_mask = pmcw->pim; + for (i = 0; i < 8; i++) { + mask = 0x80 >> i; + if (pmcw->pim & mask) { + chp_id_init(&ssd->chpid[i]); + ssd->chpid[i].id = pmcw->chpid[i]; + } + } +} + +static void ssd_register_chpids(struct chsc_ssd_info *ssd) +{ + int i; + int mask; + + for (i = 0; i < 8; i++) { + mask = 0x80 >> i; + if (ssd->path_mask & mask) + if (!chp_is_registered(ssd->chpid[i])) + chp_new(ssd->chpid[i]); + } +} + +void css_update_ssd_info(struct subchannel *sch) +{ + int ret; + + if (cio_is_console(sch->schid)) { + /* Console is initialized too early for functions requiring + * memory allocation. */ + ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); + } else { + ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); + if (ret) + ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); + ssd_register_chpids(&sch->ssd_info); + } +} + +static int css_register_subchannel(struct subchannel *sch) { int ret; @@ -135,9 +180,7 @@ css_register_subchannel(struct subchannel *sch) sch->dev.bus = &css_bus_type; sch->dev.release = &css_subchannel_release; sch->dev.groups = subch_attr_groups; - - css_get_ssd_info(sch); - + css_update_ssd_info(sch); /* make it known to the system */ ret = css_sch_device_register(sch); if (ret) { @@ -306,7 +349,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) return css_probe_device(schid); } -static int css_evaluate_subchannel(struct subchannel_id schid, int slow) +static void css_evaluate_subchannel(struct subchannel_id schid, int slow) { struct subchannel *sch; int ret; @@ -317,53 +360,66 @@ static int css_evaluate_subchannel(struct subchannel_id schid, int slow) put_device(&sch->dev); } else ret = css_evaluate_new_subchannel(schid, slow); - - return ret; + if (ret == -EAGAIN) + css_schedule_eval(schid); } -static int -css_rescan_devices(struct subchannel_id schid, void *data) +static struct idset *slow_subchannel_set; +static spinlock_t slow_subchannel_lock; + +static int __init slow_subchannel_init(void) { - return css_evaluate_subchannel(schid, 1); + spin_lock_init(&slow_subchannel_lock); + slow_subchannel_set = idset_sch_new(); + if (!slow_subchannel_set) { + printk(KERN_WARNING "cio: could not allocate slow subchannel " + "set\n"); + return -ENOMEM; + } + return 0; } -struct slow_subchannel { - struct list_head slow_list; - struct subchannel_id schid; -}; - -static LIST_HEAD(slow_subchannels_head); -static DEFINE_SPINLOCK(slow_subchannel_lock); +subsys_initcall(slow_subchannel_init); -static void -css_trigger_slow_path(struct work_struct *unused) +static void css_slow_path_func(struct work_struct *unused) { - CIO_TRACE_EVENT(4, "slowpath"); - - if (need_rescan) { - need_rescan = 0; - for_each_subchannel(css_rescan_devices, NULL); - return; - } + struct subchannel_id schid; + CIO_TRACE_EVENT(4, "slowpath"); spin_lock_irq(&slow_subchannel_lock); - while (!list_empty(&slow_subchannels_head)) { - struct slow_subchannel *slow_sch = - list_entry(slow_subchannels_head.next, - struct slow_subchannel, slow_list); - - list_del_init(slow_subchannels_head.next); + init_subchannel_id(&schid); + while (idset_sch_get_first(slow_subchannel_set, &schid)) { + idset_sch_del(slow_subchannel_set, schid); spin_unlock_irq(&slow_subchannel_lock); - css_evaluate_subchannel(slow_sch->schid, 1); + css_evaluate_subchannel(schid, 1); spin_lock_irq(&slow_subchannel_lock); - kfree(slow_sch); } spin_unlock_irq(&slow_subchannel_lock); } -DECLARE_WORK(slow_path_work, css_trigger_slow_path); +static DECLARE_WORK(slow_path_work, css_slow_path_func); struct workqueue_struct *slow_path_wq; +void css_schedule_eval(struct subchannel_id schid) +{ + unsigned long flags; + + spin_lock_irqsave(&slow_subchannel_lock, flags); + idset_sch_add(slow_subchannel_set, schid); + queue_work(slow_path_wq, &slow_path_work); + spin_unlock_irqrestore(&slow_subchannel_lock, flags); +} + +void css_schedule_eval_all(void) +{ + unsigned long flags; + + spin_lock_irqsave(&slow_subchannel_lock, flags); + idset_fill(slow_subchannel_set); + queue_work(slow_path_wq, &slow_path_work); + spin_unlock_irqrestore(&slow_subchannel_lock, flags); +} + /* Reprobe subchannel if unregistered. */ static int reprobe_subchannel(struct subchannel_id schid, void *data) { @@ -426,33 +482,14 @@ void css_schedule_reprobe(void) EXPORT_SYMBOL_GPL(css_schedule_reprobe); /* - * Rescan for new devices. FIXME: This is slow. - * This function is called when we have lost CRWs due to overflows and we have - * to do subchannel housekeeping. - */ -void -css_reiterate_subchannels(void) -{ - css_clear_subchannel_slow_list(); - need_rescan = 1; -} - -/* * Called from the machine check handler for subchannel report words. */ -int -css_process_crw(int rsid1, int rsid2) +void css_process_crw(int rsid1, int rsid2) { - int ret; struct subchannel_id mchk_schid; CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", rsid1, rsid2); - - if (need_rescan) - /* We need to iterate all subchannels anyway. */ - return -EAGAIN; - init_subchannel_id(&mchk_schid); mchk_schid.sch_no = rsid1; if (rsid2 != 0) @@ -463,14 +500,7 @@ css_process_crw(int rsid1, int rsid2) * use stsch() to find out if the subchannel in question has come * or gone. */ - ret = css_evaluate_subchannel(mchk_schid, 0); - if (ret == -EAGAIN) { - if (css_enqueue_subchannel_slow(mchk_schid)) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - } - return ret; + css_evaluate_subchannel(mchk_schid, 0); } static int __init @@ -745,47 +775,6 @@ struct bus_type css_bus_type = { subsys_initcall(init_channel_subsystem); -int -css_enqueue_subchannel_slow(struct subchannel_id schid) -{ - struct slow_subchannel *new_slow_sch; - unsigned long flags; - - new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); - if (!new_slow_sch) - return -ENOMEM; - new_slow_sch->schid = schid; - spin_lock_irqsave(&slow_subchannel_lock, flags); - list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); - spin_unlock_irqrestore(&slow_subchannel_lock, flags); - return 0; -} - -void -css_clear_subchannel_slow_list(void) -{ - unsigned long flags; - - spin_lock_irqsave(&slow_subchannel_lock, flags); - while (!list_empty(&slow_subchannels_head)) { - struct slow_subchannel *slow_sch = - list_entry(slow_subchannels_head.next, - struct slow_subchannel, slow_list); - - list_del_init(slow_subchannels_head.next); - kfree(slow_sch); - } - spin_unlock_irqrestore(&slow_subchannel_lock, flags); -} - - - -int -css_slow_subchannels_exist(void) -{ - return (!list_empty(&slow_subchannels_head)); -} - MODULE_LICENSE("GPL"); EXPORT_SYMBOL(css_bus_type); EXPORT_SYMBOL_GPL(css_characteristics_avail); diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index ca2bab932a8..71fcfdc4280 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -4,8 +4,11 @@ #include <linux/mutex.h> #include <linux/wait.h> #include <linux/workqueue.h> +#include <linux/device.h> +#include <linux/types.h> #include <asm/cio.h> +#include <asm/chpid.h> #include "schid.h" @@ -143,13 +146,12 @@ extern void css_sch_device_unregister(struct subchannel *); extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); -extern int css_process_crw(int, int); +extern void css_process_crw(int, int); extern void css_reiterate_subchannels(void); +void css_update_ssd_info(struct subchannel *sch); #define __MAX_SUBCHANNEL 65535 #define __MAX_SSID 3 -#define __MAX_CHPID 255 -#define __MAX_CSSID 0 struct channel_subsystem { u8 cssid; @@ -185,16 +187,12 @@ int device_trigger_verify(struct subchannel *sch); void device_kill_pending_timer(struct subchannel *); /* Helper functions to build lists for the slow path. */ -extern int css_enqueue_subchannel_slow(struct subchannel_id schid); -void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); -void css_clear_subchannel_slow_list(void); -int css_slow_subchannels_exist(void); -extern int need_rescan; +void css_schedule_eval(struct subchannel_id schid); +void css_schedule_eval_all(void); int sch_is_pseudo_sch(struct subchannel *); extern struct workqueue_struct *slow_path_wq; -extern struct work_struct slow_path_work; int subchannel_add_files (struct device *); extern struct attribute_group *subch_attr_groups[]; diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e322111fb36..03355902c58 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -56,13 +56,12 @@ ccw_bus_match (struct device * dev, struct device_driver * drv) /* Store modalias string delimited by prefix/suffix string into buffer with * specified size. Return length of resulting string (excluding trailing '\0') * even if string doesn't fit buffer (snprintf semantics). */ -static int snprint_alias(char *buf, size_t size, const char *prefix, +static int snprint_alias(char *buf, size_t size, struct ccw_device_id *id, const char *suffix) { int len; - len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type, - id->cu_model); + len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); if (len > size) return len; buf += len; @@ -85,53 +84,40 @@ static int ccw_uevent(struct device *dev, char **envp, int num_envp, struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device_id *id = &(cdev->id); int i = 0; - int len; + int len = 0; + int ret; + char modalias_buf[30]; /* CU_TYPE= */ - len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1; - if (len > buffer_size || i >= num_envp) - return -ENOMEM; - envp[i++] = buffer; - buffer += len; - buffer_size -= len; + ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, + "CU_TYPE=%04X", id->cu_type); + if (ret) + return ret; /* CU_MODEL= */ - len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1; - if (len > buffer_size || i >= num_envp) - return -ENOMEM; - envp[i++] = buffer; - buffer += len; - buffer_size -= len; + ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, + "CU_MODEL=%02X", id->cu_model); + if (ret) + return ret; /* The next two can be zero, that's ok for us */ /* DEV_TYPE= */ - len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1; - if (len > buffer_size || i >= num_envp) - return -ENOMEM; - envp[i++] = buffer; - buffer += len; - buffer_size -= len; + ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, + "DEV_TYPE=%04X", id->dev_type); + if (ret) + return ret; /* DEV_MODEL= */ - len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X", - (unsigned char) id->dev_model) + 1; - if (len > buffer_size || i >= num_envp) - return -ENOMEM; - envp[i++] = buffer; - buffer += len; - buffer_size -= len; + ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, + "DEV_MODEL=%02X", id->dev_model); + if (ret) + return ret; /* MODALIAS= */ - len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1; - if (len > buffer_size || i >= num_envp) - return -ENOMEM; - envp[i++] = buffer; - buffer += len; - buffer_size -= len; - - envp[i] = NULL; - - return 0; + snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); + ret = add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, + "MODALIAS=%s", modalias_buf); + return ret; } struct bus_type ccw_bus_type; @@ -230,12 +216,18 @@ static ssize_t chpids_show (struct device * dev, struct device_attribute *attr, char * buf) { struct subchannel *sch = to_subchannel(dev); - struct ssd_info *ssd = &sch->ssd_info; + struct chsc_ssd_info *ssd = &sch->ssd_info; ssize_t ret = 0; int chp; + int mask; - for (chp = 0; chp < 8; chp++) - ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); + for (chp = 0; chp < 8; chp++) { + mask = 0x80 >> chp; + if (ssd->path_mask & mask) + ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); + else + ret += sprintf(buf + ret, "00 "); + } ret += sprintf (buf+ret, "\n"); return min((ssize_t)PAGE_SIZE, ret); } @@ -280,7 +272,7 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf) struct ccw_device_id *id = &(cdev->id); int len; - len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1; + len = snprint_alias(buf, PAGE_SIZE, id, "\n") + 1; return len > PAGE_SIZE ? PAGE_SIZE : len; } @@ -298,16 +290,10 @@ int ccw_device_is_orphan(struct ccw_device *cdev) return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); } -static void ccw_device_unregister(struct work_struct *work) +static void ccw_device_unregister(struct ccw_device *cdev) { - struct ccw_device_private *priv; - struct ccw_device *cdev; - - priv = container_of(work, struct ccw_device_private, kick_work); - cdev = priv->cdev; if (test_and_clear_bit(1, &cdev->private->registered)) - device_unregister(&cdev->dev); - put_device(&cdev->dev); + device_del(&cdev->dev); } static void @@ -324,11 +310,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev) spin_lock_irqsave(cdev->ccwlock, flags); cdev->private->state = DEV_STATE_NOT_OPER; spin_unlock_irqrestore(cdev->ccwlock, flags); - if (get_device(&cdev->dev)) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_unregister); - queue_work(ccw_device_work, &cdev->private->kick_work); - } + ccw_device_unregister(cdev); + put_device(&cdev->dev); return ; } sch = to_subchannel(cdev->dev.parent); @@ -413,11 +396,60 @@ ccw_device_set_online(struct ccw_device *cdev) return (ret == 0) ? -ENODEV : ret; } -static ssize_t -online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +static void online_store_handle_offline(struct ccw_device *cdev) +{ + if (cdev->private->state == DEV_STATE_DISCONNECTED) + ccw_device_remove_disconnected(cdev); + else if (cdev->drv && cdev->drv->set_offline) + ccw_device_set_offline(cdev); +} + +static int online_store_recog_and_online(struct ccw_device *cdev) +{ + int ret; + + /* Do device recognition, if needed. */ + if (cdev->id.cu_type == 0) { + ret = ccw_device_recognition(cdev); + if (ret) { + printk(KERN_WARNING"Couldn't start recognition " + "for device %s (ret=%d)\n", + cdev->dev.bus_id, ret); + return ret; + } + wait_event(cdev->private->wait_q, + cdev->private->flags.recog_done); + } + if (cdev->drv && cdev->drv->set_online) + ccw_device_set_online(cdev); + return 0; +} +static void online_store_handle_online(struct ccw_device *cdev, int force) +{ + int ret; + + ret = online_store_recog_and_online(cdev); + if (ret) + return; + if (force && cdev->private->state == DEV_STATE_BOXED) { + ret = ccw_device_stlck(cdev); + if (ret) { + printk(KERN_WARNING"ccw_device_stlck for device %s " + "returned %d!\n", cdev->dev.bus_id, ret); + return; + } + if (cdev->id.cu_type == 0) + cdev->private->state = DEV_STATE_NOT_OPER; + online_store_recog_and_online(cdev); + } + +} + +static ssize_t online_store (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct ccw_device *cdev = to_ccwdev(dev); - int i, force, ret; + int i, force; char *tmp; if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) @@ -434,51 +466,17 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf force = 0; i = simple_strtoul(buf, &tmp, 16); } - if (i == 1) { - /* Do device recognition, if needed. */ - if (cdev->id.cu_type == 0) { - ret = ccw_device_recognition(cdev); - if (ret) { - printk(KERN_WARNING"Couldn't start recognition " - "for device %s (ret=%d)\n", - cdev->dev.bus_id, ret); - goto out; - } - wait_event(cdev->private->wait_q, - cdev->private->flags.recog_done); - } - if (cdev->drv && cdev->drv->set_online) - ccw_device_set_online(cdev); - } else if (i == 0) { - if (cdev->private->state == DEV_STATE_DISCONNECTED) - ccw_device_remove_disconnected(cdev); - else if (cdev->drv && cdev->drv->set_offline) - ccw_device_set_offline(cdev); - } - if (force && cdev->private->state == DEV_STATE_BOXED) { - ret = ccw_device_stlck(cdev); - if (ret) { - printk(KERN_WARNING"ccw_device_stlck for device %s " - "returned %d!\n", cdev->dev.bus_id, ret); - goto out; - } - /* Do device recognition, if needed. */ - if (cdev->id.cu_type == 0) { - cdev->private->state = DEV_STATE_NOT_OPER; - ret = ccw_device_recognition(cdev); - if (ret) { - printk(KERN_WARNING"Couldn't start recognition " - "for device %s (ret=%d)\n", - cdev->dev.bus_id, ret); - goto out; - } - wait_event(cdev->private->wait_q, - cdev->private->flags.recog_done); - } - if (cdev->drv && cdev->drv->set_online) - ccw_device_set_online(cdev); + + switch (i) { + case 0: + online_store_handle_offline(cdev); + break; + case 1: + online_store_handle_online(cdev, force); + break; + default: + count = -EINVAL; } - out: if (cdev->drv) module_put(cdev->drv->owner); atomic_set(&cdev->private->onoff, 0); @@ -548,17 +546,10 @@ static struct attribute_group ccwdev_attr_group = { .attrs = ccwdev_attrs, }; -static int -device_add_files (struct device *dev) -{ - return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); -} - -static void -device_remove_files(struct device *dev) -{ - sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); -} +struct attribute_group *ccwdev_attr_groups[] = { + &ccwdev_attr_group, + NULL, +}; /* this is a simple abstraction for device_register that sets the * correct bus type and adds the bus specific files */ @@ -573,10 +564,6 @@ static int ccw_device_register(struct ccw_device *cdev) return ret; set_bit(1, &cdev->private->registered); - if ((ret = device_add_files(dev))) { - if (test_and_clear_bit(1, &cdev->private->registered)) - device_del(dev); - } return ret; } @@ -648,10 +635,6 @@ ccw_device_add_changed(struct work_struct *work) return; } set_bit(1, &cdev->private->registered); - if (device_add_files(&cdev->dev)) { - if (test_and_clear_bit(1, &cdev->private->registered)) - device_unregister(&cdev->dev); - } } void ccw_device_do_unreg_rereg(struct work_struct *work) @@ -664,9 +647,7 @@ void ccw_device_do_unreg_rereg(struct work_struct *work) cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); - device_remove_files(&cdev->dev); - if (test_and_clear_bit(1, &cdev->private->registered)) - device_del(&cdev->dev); + ccw_device_unregister(cdev); PREPARE_WORK(&cdev->private->kick_work, ccw_device_add_changed); queue_work(ccw_device_work, &cdev->private->kick_work); @@ -705,6 +686,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, cdev->dev.parent = &sch->dev; cdev->dev.release = ccw_device_release; INIT_LIST_HEAD(&cdev->private->kick_work.entry); + cdev->dev.groups = ccwdev_attr_groups; /* Do first half of device_register. */ device_initialize(&cdev->dev); if (!get_device(&sch->dev)) { @@ -736,6 +718,7 @@ static int io_subchannel_recog(struct ccw_device *, struct subchannel *); static void sch_attach_device(struct subchannel *sch, struct ccw_device *cdev) { + css_update_ssd_info(sch); spin_lock_irq(sch->lock); sch->dev.driver_data = cdev; cdev->private->schid = sch->schid; @@ -871,7 +854,7 @@ io_subchannel_register(struct work_struct *work) priv = container_of(work, struct ccw_device_private, kick_work); cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); - + css_update_ssd_info(sch); /* * io_subchannel_register() will also be called after device * recognition has been done for a boxed device (which will already @@ -1133,15 +1116,8 @@ io_subchannel_remove (struct subchannel *sch) sch->dev.driver_data = NULL; cdev->private->state = DEV_STATE_NOT_OPER; spin_unlock_irqrestore(cdev->ccwlock, flags); - /* - * Put unregistration on workqueue to avoid livelocks on the css bus - * semaphore. - */ - if (get_device(&cdev->dev)) { - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_unregister); - queue_work(ccw_device_work, &cdev->private->kick_work); - } + ccw_device_unregister(cdev); + put_device(&cdev->dev); return 0; } diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 089a3ddd626..898ec3b2beb 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -15,6 +15,7 @@ #include <asm/ccwdev.h> #include <asm/cio.h> +#include <asm/chpid.h> #include "cio.h" #include "cio_debug.h" @@ -22,6 +23,7 @@ #include "device.h" #include "chsc.h" #include "ioasm.h" +#include "chp.h" int device_is_online(struct subchannel *sch) @@ -210,14 +212,18 @@ static void __recover_lost_chpids(struct subchannel *sch, int old_lpm) { int mask, i; + struct chp_id chpid; + chp_id_init(&chpid); for (i = 0; i<8; i++) { mask = 0x80 >> i; if (!(sch->lpm & mask)) continue; if (old_lpm & mask) continue; - chpid_is_actually_online(sch->schib.pmcw.chpid[i]); + chpid.id = sch->schib.pmcw.chpid[i]; + if (!chp_is_registered(chpid)) + css_schedule_eval_all(); } } diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 7c7775aae38..16f59fcb66b 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -16,12 +16,14 @@ #include <asm/ccwdev.h> #include <asm/idals.h> +#include <asm/chpid.h> #include "cio.h" #include "cio_debug.h" #include "css.h" #include "chsc.h" #include "device.h" +#include "chp.h" int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) { @@ -606,9 +608,12 @@ void * ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) { struct subchannel *sch; + struct chp_id chpid; sch = to_subchannel(cdev->dev.parent); - return chsc_get_chp_desc(sch, chp_no); + chp_id_init(&chpid); + chpid.id = sch->schib.pmcw.chpid[chp_no]; + return chp_get_chp_desc(chpid); } // FIXME: these have to go: diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c new file mode 100644 index 00000000000..16ea828e99f --- /dev/null +++ b/drivers/s390/cio/idset.c @@ -0,0 +1,112 @@ +/* + * drivers/s390/cio/idset.c + * + * Copyright IBM Corp. 2007 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/slab.h> +#include <asm/bitops.h> +#include "idset.h" +#include "css.h" + +struct idset { + int num_ssid; + int num_id; + unsigned long bitmap[0]; +}; + +static inline unsigned long bitmap_size(int num_ssid, int num_id) +{ + return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long); +} + +static struct idset *idset_new(int num_ssid, int num_id) +{ + struct idset *set; + + set = kzalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id), + GFP_KERNEL); + if (set) { + set->num_ssid = num_ssid; + set->num_id = num_id; + } + return set; +} + +void idset_free(struct idset *set) +{ + kfree(set); +} + +void idset_clear(struct idset *set) +{ + memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id)); +} + +void idset_fill(struct idset *set) +{ + memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); +} + +static inline void idset_add(struct idset *set, int ssid, int id) +{ + set_bit(ssid * set->num_id + id, set->bitmap); +} + +static inline void idset_del(struct idset *set, int ssid, int id) +{ + clear_bit(ssid * set->num_id + id, set->bitmap); +} + +static inline int idset_contains(struct idset *set, int ssid, int id) +{ + return test_bit(ssid * set->num_id + id, set->bitmap); +} + +static inline int idset_get_first(struct idset *set, int *ssid, int *id) +{ + int bitnum; + + bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); + if (bitnum >= set->num_ssid * set->num_id) + return 0; + *ssid = bitnum / set->num_id; + *id = bitnum % set->num_id; + return 1; +} + +struct idset *idset_sch_new(void) +{ + return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1); +} + +void idset_sch_add(struct idset *set, struct subchannel_id schid) +{ + idset_add(set, schid.ssid, schid.sch_no); +} + +void idset_sch_del(struct idset *set, struct subchannel_id schid) +{ + idset_del(set, schid.ssid, schid.sch_no); +} + +int idset_sch_contains(struct idset *set, struct subchannel_id schid) +{ + return idset_contains(set, schid.ssid, schid.sch_no); +} + +int idset_sch_get_first(struct idset *set, struct subchannel_id *schid) +{ + int ssid = 0; + int id = 0; + int rc; + + rc = idset_get_first(set, &ssid, &id); + if (rc) { + init_subchannel_id(schid); + schid->ssid = ssid; + schid->sch_no = id; + } + return rc; +} diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h new file mode 100644 index 00000000000..144466ab8c1 --- /dev/null +++ b/drivers/s390/cio/idset.h @@ -0,0 +1,25 @@ +/* + * drivers/s390/cio/idset.h + * + * Copyright IBM Corp. 2007 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#ifndef S390_IDSET_H +#define S390_IDSET_H S390_IDSET_H + +#include "schid.h" + +struct idset; + +void idset_free(struct idset *set); +void idset_clear(struct idset *set); +void idset_fill(struct idset *set); + +struct idset *idset_sch_new(void); +void idset_sch_add(struct idset *set, struct subchannel_id id); +void idset_sch_del(struct idset *set, struct subchannel_id id); +int idset_sch_contains(struct idset *set, struct subchannel_id id); +int idset_sch_get_first(struct idset *set, struct subchannel_id *id); + +#endif /* S390_IDSET_H */ diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index ad6d8294006..7153dd95908 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -1,6 +1,7 @@ #ifndef S390_CIO_IOASM_H #define S390_CIO_IOASM_H +#include <asm/chpid.h> #include "schid.h" /* @@ -189,9 +190,9 @@ static inline int chsc(void *chsc_area) return cc; } -static inline int rchp(int chpid) +static inline int rchp(struct chp_id chpid) { - register unsigned int reg1 asm ("1") = chpid; + register struct chp_id reg1 asm ("1") = chpid; int ccode; asm volatile( diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 0d6d5fcc128..570a960bfb5 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c @@ -1638,21 +1638,19 @@ add_channel(struct ccw_device *cdev, enum channel_types type) struct channel *ch; DBF_TEXT(trace, 2, __FUNCTION__); - if ((ch = - (struct channel *) kmalloc(sizeof (struct channel), - GFP_KERNEL)) == NULL) { + ch = kzalloc(sizeof(struct channel), GFP_KERNEL); + if (!ch) { ctc_pr_warn("ctc: Out of memory in add_channel\n"); return -1; } - memset(ch, 0, sizeof (struct channel)); - if ((ch->ccw = kmalloc(8*sizeof(struct ccw1), - GFP_KERNEL | GFP_DMA)) == NULL) { + /* assure all flags and counters are reset */ + ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + if (!ch->ccw) { kfree(ch); ctc_pr_warn("ctc: Out of memory in add_channel\n"); return -1; } - memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset /** * "static" ccws are used in the following way: @@ -1692,15 +1690,14 @@ add_channel(struct ccw_device *cdev, enum channel_types type) return -1; } fsm_newstate(ch->fsm, CH_STATE_IDLE); - if ((ch->irb = kmalloc(sizeof (struct irb), - GFP_KERNEL)) == NULL) { + ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL); + if (!ch->irb) { ctc_pr_warn("ctc: Out of memory in add_channel\n"); kfree_fsm(ch->fsm); kfree(ch->ccw); kfree(ch); return -1; } - memset(ch->irb, 0, sizeof (struct irb)); while (*c && less_than((*c)->id, ch->id)) c = &(*c)->next; if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) { @@ -2745,14 +2742,13 @@ ctc_probe_device(struct ccwgroup_device *cgdev) if (!get_device(&cgdev->dev)) return -ENODEV; - priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL); + priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL); if (!priv) { ctc_pr_err("%s: Out of memory\n", __func__); put_device(&cgdev->dev); return -ENOMEM; } - memset(priv, 0, sizeof (struct ctc_priv)); rc = ctc_add_files(&cgdev->dev); if (rc) { kfree(priv); @@ -2793,10 +2789,9 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device, DBF_TEXT(setup, 3, __FUNCTION__); if (alloc_device) { - dev = kmalloc(sizeof (struct net_device), GFP_KERNEL); + dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); if (!dev) return NULL; - memset(dev, 0, sizeof (struct net_device)); } dev->priv = privptr; diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 806bb1a921e..644a06eba82 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c @@ -21,6 +21,7 @@ #include "cio/cio.h" #include "cio/chsc.h" #include "cio/css.h" +#include "cio/chp.h" #include "s390mach.h" static struct semaphore m_sem; @@ -44,14 +45,13 @@ static int s390_collect_crw_info(void *param) { struct crw crw[2]; - int ccode, ret, slow; + int ccode; struct semaphore *sem; unsigned int chain; sem = (struct semaphore *)param; repeat: down_interruptible(sem); - slow = 0; chain = 0; while (1) { if (unlikely(chain > 1)) { @@ -84,9 +84,8 @@ repeat: /* Check for overflows. */ if (crw[chain].oflw) { pr_debug("%s: crw overflow detected!\n", __FUNCTION__); - css_reiterate_subchannels(); + css_schedule_eval_all(); chain = 0; - slow = 1; continue; } switch (crw[chain].rsc) { @@ -94,10 +93,7 @@ repeat: if (crw[0].chn && !chain) break; pr_debug("source is subchannel %04X\n", crw[0].rsid); - ret = css_process_crw (crw[0].rsid, - chain ? crw[1].rsid : 0); - if (ret == -EAGAIN) - slow = 1; + css_process_crw(crw[0].rsid, chain ? crw[1].rsid : 0); break; case CRW_RSC_MONITOR: pr_debug("source is monitoring facility\n"); @@ -116,28 +112,23 @@ repeat: } switch (crw[0].erc) { case CRW_ERC_IPARM: /* Path has come. */ - ret = chp_process_crw(crw[0].rsid, 1); + chp_process_crw(crw[0].rsid, 1); break; case CRW_ERC_PERRI: /* Path has gone. */ case CRW_ERC_PERRN: - ret = chp_process_crw(crw[0].rsid, 0); + chp_process_crw(crw[0].rsid, 0); break; default: pr_debug("Don't know how to handle erc=%x\n", crw[0].erc); - ret = 0; } - if (ret == -EAGAIN) - slow = 1; break; case CRW_RSC_CONFIG: pr_debug("source is configuration-alert facility\n"); break; case CRW_RSC_CSS: pr_debug("source is channel subsystem\n"); - ret = chsc_process_crw(); - if (ret == -EAGAIN) - slow = 1; + chsc_process_crw(); break; default: pr_debug("unknown source\n"); @@ -146,8 +137,6 @@ repeat: /* chain is always 0 or 1 here. */ chain = crw[chain].chn ? chain + 1 : 0; } - if (slow) - queue_work(slow_path_wq, &slow_path_work); goto repeat; return 0; } diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index 090743d2f91..19343f9675c 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c @@ -357,6 +357,24 @@ static __init int create_proc_sysinfo(void) __initcall(create_proc_sysinfo); +int get_cpu_capability(unsigned int *capability) +{ + struct sysinfo_1_2_2 *info; + int rc; + + info = (void *) get_zeroed_page(GFP_KERNEL); + if (!info) + return -ENOMEM; + rc = stsi(info, 1, 2, 2); + if (rc == -ENOSYS) + goto out; + rc = 0; + *capability = info->capability; +out: + free_page((unsigned long) info); + return rc; +} + /* * CPU capability might have changed. Therefore recalculate loops_per_jiffy. */ diff --git a/include/asm-avr32/arch-at32ap/io.h b/include/asm-avr32/arch-at32ap/io.h new file mode 100644 index 00000000000..ee59e401f04 --- /dev/null +++ b/include/asm-avr32/arch-at32ap/io.h @@ -0,0 +1,39 @@ +#ifndef __ASM_AVR32_ARCH_AT32AP_IO_H +#define __ASM_AVR32_ARCH_AT32AP_IO_H + +/* For "bizarre" halfword swapping */ +#include <linux/byteorder/swabb.h> + +#if defined(CONFIG_AP7000_32_BIT_SMC) +# define __swizzle_addr_b(addr) (addr ^ 3UL) +# define __swizzle_addr_w(addr) (addr ^ 2UL) +# define __swizzle_addr_l(addr) (addr) +# define ioswabb(a, x) (x) +# define ioswabw(a, x) (x) +# define ioswabl(a, x) (x) +# define __mem_ioswabb(a, x) (x) +# define __mem_ioswabw(a, x) swab16(x) +# define __mem_ioswabl(a, x) swab32(x) +#elif defined(CONFIG_AP7000_16_BIT_SMC) +# define __swizzle_addr_b(addr) (addr ^ 1UL) +# define __swizzle_addr_w(addr) (addr) +# define __swizzle_addr_l(addr) (addr) +# define ioswabb(a, x) (x) +# define ioswabw(a, x) (x) +# define ioswabl(a, x) swahw32(x) +# define __mem_ioswabb(a, x) (x) +# define __mem_ioswabw(a, x) swab16(x) +# define __mem_ioswabl(a, x) swahb32(x) +#else +# define __swizzle_addr_b(addr) (addr) +# define __swizzle_addr_w(addr) (addr) +# define __swizzle_addr_l(addr) (addr) +# define ioswabb(a, x) (x) +# define ioswabw(a, x) swab16(x) +# define ioswabl(a, x) swab32(x) +# define __mem_ioswabb(a, x) (x) +# define __mem_ioswabw(a, x) (x) +# define __mem_ioswabl(a, x) (x) +#endif + +#endif /* __ASM_AVR32_ARCH_AT32AP_IO_H */ diff --git a/include/asm-avr32/arch-at32ap/smc.h b/include/asm-avr32/arch-at32ap/smc.h index 3732b328303..07152b7fd9c 100644 --- a/include/asm-avr32/arch-at32ap/smc.h +++ b/include/asm-avr32/arch-at32ap/smc.h @@ -48,10 +48,32 @@ struct smc_config { unsigned int nwe_controlled:1; /* + * 0: NWAIT is disabled + * 1: Reserved + * 2: NWAIT is frozen mode + * 3: NWAIT in ready mode + */ + unsigned int nwait_mode:2; + + /* * 0: Byte select access type * 1: Byte write access type */ unsigned int byte_write:1; + + /* + * Number of clock cycles before data is released after + * the rising edge of the read controlling signal + * + * Total cycles from SMC is tdf_cycles + 1 + */ + unsigned int tdf_cycles:4; + + /* + * 0: TDF optimization disabled + * 1: TDF optimization enabled + */ + unsigned int tdf_mode:1; }; extern int smc_set_configuration(int cs, const struct smc_config *config); diff --git a/include/asm-avr32/arch-at32ap/time.h b/include/asm-avr32/arch-at32ap/time.h new file mode 100644 index 00000000000..cc8a43418a4 --- /dev/null +++ b/include/asm-avr32/arch-at32ap/time.h @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ASM_AVR32_ARCH_AT32AP_TIME_H +#define _ASM_AVR32_ARCH_AT32AP_TIME_H + +#include <linux/platform_device.h> + +extern struct irqaction timer_irqaction; +extern struct platform_device at32_systc0_device; +extern void local_timer_interrupt(int irq, void *dev_id); + +#define TIMER_BCR 0x000000c0 +#define TIMER_BCR_SYNC 0 +#define TIMER_BMR 0x000000c4 +#define TIMER_BMR_TC0XC0S 0 +#define TIMER_BMR_TC1XC1S 2 +#define TIMER_BMR_TC2XC2S 4 +#define TIMER_CCR 0x00000000 +#define TIMER_CCR_CLKDIS 1 +#define TIMER_CCR_CLKEN 0 +#define TIMER_CCR_SWTRG 2 +#define TIMER_CMR 0x00000004 +#define TIMER_CMR_ABETRG 10 +#define TIMER_CMR_ACPA 16 +#define TIMER_CMR_ACPC 18 +#define TIMER_CMR_AEEVT 20 +#define TIMER_CMR_ASWTRG 22 +#define TIMER_CMR_BCPB 24 +#define TIMER_CMR_BCPC 26 +#define TIMER_CMR_BEEVT 28 +#define TIMER_CMR_BSWTRG 30 +#define TIMER_CMR_BURST 4 +#define TIMER_CMR_CLKI 3 +#define TIMER_CMR_CPCDIS 7 +#define TIMER_CMR_CPCSTOP 6 +#define TIMER_CMR_CPCTRG 14 +#define TIMER_CMR_EEVT 10 +#define TIMER_CMR_EEVTEDG 8 +#define TIMER_CMR_ENETRG 12 +#define TIMER_CMR_ETRGEDG 8 +#define TIMER_CMR_LDBDIS 7 +#define TIMER_CMR_LDBSTOP 6 +#define TIMER_CMR_LDRA 16 +#define TIMER_CMR_LDRB 18 +#define TIMER_CMR_TCCLKS 0 +#define TIMER_CMR_WAVE 15 +#define TIMER_CMR_WAVSEL 13 +#define TIMER_CV 0x00000010 +#define TIMER_CV_CV 0 +#define TIMER_IDR 0x00000028 +#define TIMER_IDR_COVFS 0 +#define TIMER_IDR_CPAS 2 +#define TIMER_IDR_CPBS 3 +#define TIMER_IDR_CPCS 4 +#define TIMER_IDR_ETRGS 7 +#define TIMER_IDR_LDRAS 5 +#define TIMER_IDR_LDRBS 6 +#define TIMER_IDR_LOVRS 1 +#define TIMER_IER 0x00000024 +#define TIMER_IER_COVFS 0 +#define TIMER_IER_CPAS 2 +#define TIMER_IER_CPBS 3 +#define TIMER_IER_CPCS 4 +#define TIMER_IER_ETRGS 7 +#define TIMER_IER_LDRAS 5 +#define TIMER_IER_LDRBS 6 +#define TIMER_IER_LOVRS 1 +#define TIMER_IMR 0x0000002c +#define TIMER_IMR_COVFS 0 +#define TIMER_IMR_CPAS 2 +#define TIMER_IMR_CPBS 3 +#define TIMER_IMR_CPCS 4 +#define TIMER_IMR_ETRGS 7 +#define TIMER_IMR_LDRAS 5 +#define TIMER_IMR_LDRBS 6 +#define TIMER_IMR_LOVRS 1 +#define TIMER_RA 0x00000014 +#define TIMER_RA_RA 0 +#define TIMER_RB 0x00000018 +#define TIMER_RB_RB 0 +#define TIMER_RC 0x0000001c +#define TIMER_RC_RC 0 +#define TIMER_SR 0x00000020 +#define TIMER_SR_CLKSTA 16 +#define TIMER_SR_COVFS 0 +#define TIMER_SR_CPAS 2 +#define TIMER_SR_CPBS 3 +#define TIMER_SR_CPCS 4 +#define TIMER_SR_ETRGS 7 +#define TIMER_SR_LDRAS 5 +#define TIMER_SR_LDRBS 6 +#define TIMER_SR_LOVRS 1 +#define TIMER_SR_MTIOA 17 +#define TIMER_SR_MTIOB 18 + +/* Bit manipulation macros */ +#define TIMER_BIT(name) (1 << TIMER_##name) +#define TIMER_BF(name,value) ((value) << TIMER_##name) + +/* Register access macros */ +#define timer_read(port,instance,reg) \ + __raw_readl(port + (0x40 * instance) + TIMER_##reg) +#define timer_write(port,instance,reg,value) \ + __raw_writel((value), port + (0x40 * instance) + TIMER_##reg) + +#endif /* _ASM_AVR32_ARCH_AT32AP_TIME_H */ diff --git a/include/asm-avr32/atomic.h b/include/asm-avr32/atomic.h index c40b6032c48..b9c2548a52f 100644 --- a/include/asm-avr32/atomic.h +++ b/include/asm-avr32/atomic.h @@ -173,7 +173,7 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v) } #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_sub(i, v) (void)atomic_sub_return(i, v) #define atomic_add(i, v) (void)atomic_add_return(i, v) diff --git a/include/asm-avr32/bug.h b/include/asm-avr32/bug.h index 521766bc936..afdcd79a296 100644 --- a/include/asm-avr32/bug.h +++ b/include/asm-avr32/bug.h @@ -18,27 +18,53 @@ #ifdef CONFIG_DEBUG_BUGVERBOSE -#define BUG() \ - do { \ - asm volatile(".hword %0\n\t" \ - ".hword %1\n\t" \ - ".long %2" \ - : \ - : "n"(AVR32_BUG_OPCODE), \ - "i"(__LINE__), "X"(__FILE__)); \ - } while (0) +#define _BUG_OR_WARN(flags) \ + asm volatile( \ + "1: .hword %0\n" \ + " .section __bug_table,\"a\",@progbits\n" \ + "2: .long 1b\n" \ + " .long %1\n" \ + " .short %2\n" \ + " .short %3\n" \ + " .org 2b + %4\n" \ + " .previous" \ + : \ + : "i"(AVR32_BUG_OPCODE), "i"(__FILE__), \ + "i"(__LINE__), "i"(flags), \ + "i"(sizeof(struct bug_entry))) #else +#define _BUG_OR_WARN(flags) \ + asm volatile( \ + "1: .hword %0\n" \ + " .section __bug_table,\"a\",@progbits\n" \ + "2: .long 1b\n" \ + " .short %1\n" \ + " .org 2b + %2\n" \ + " .previous" \ + : \ + : "i"(AVR32_BUG_OPCODE), "i"(flags), \ + "i"(sizeof(struct bug_entry))) + +#endif /* CONFIG_DEBUG_BUGVERBOSE */ + #define BUG() \ do { \ - asm volatile(".hword %0\n\t" \ - : : "n"(AVR32_BUG_OPCODE)); \ + _BUG_OR_WARN(0); \ + for (;;); \ } while (0) -#endif /* CONFIG_DEBUG_BUGVERBOSE */ +#define WARN_ON(condition) \ + ({ \ + typeof(condition) __ret_warn_on = (condition); \ + if (unlikely(__ret_warn_on)) \ + _BUG_OR_WARN(BUGFLAG_WARNING); \ + unlikely(__ret_warn_on); \ + }) #define HAVE_ARCH_BUG +#define HAVE_ARCH_WARN_ON #endif /* CONFIG_BUG */ diff --git a/include/asm-avr32/io.h b/include/asm-avr32/io.h index c08e8104839..e30d4b3bd83 100644 --- a/include/asm-avr32/io.h +++ b/include/asm-avr32/io.h @@ -1,13 +1,15 @@ #ifndef __ASM_AVR32_IO_H #define __ASM_AVR32_IO_H +#include <linux/kernel.h> #include <linux/string.h> - -#ifdef __KERNEL__ +#include <linux/types.h> #include <asm/addrspace.h> #include <asm/byteorder.h> +#include <asm/arch/io.h> + /* virt_to_phys will only work when address is in P1 or P2 */ static __inline__ unsigned long virt_to_phys(volatile void *address) { @@ -36,104 +38,215 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen); extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); -static inline void writeb(unsigned char b, volatile void __iomem *addr) +static inline void __raw_writeb(u8 v, volatile void __iomem *addr) { - *(volatile unsigned char __force *)addr = b; + *(volatile u8 __force *)addr = v; } -static inline void writew(unsigned short b, volatile void __iomem *addr) +static inline void __raw_writew(u16 v, volatile void __iomem *addr) { - *(volatile unsigned short __force *)addr = b; + *(volatile u16 __force *)addr = v; } -static inline void writel(unsigned int b, volatile void __iomem *addr) +static inline void __raw_writel(u32 v, volatile void __iomem *addr) { - *(volatile unsigned int __force *)addr = b; + *(volatile u32 __force *)addr = v; } -#define __raw_writeb writeb -#define __raw_writew writew -#define __raw_writel writel -static inline unsigned char readb(const volatile void __iomem *addr) +static inline u8 __raw_readb(const volatile void __iomem *addr) { - return *(const volatile unsigned char __force *)addr; + return *(const volatile u8 __force *)addr; } -static inline unsigned short readw(const volatile void __iomem *addr) +static inline u16 __raw_readw(const volatile void __iomem *addr) { - return *(const volatile unsigned short __force *)addr; + return *(const volatile u16 __force *)addr; } -static inline unsigned int readl(const volatile void __iomem *addr) +static inline u32 __raw_readl(const volatile void __iomem *addr) { - return *(const volatile unsigned int __force *)addr; + return *(const volatile u32 __force *)addr; +} + +/* Convert I/O port address to virtual address */ +#ifndef __io +# define __io(p) ((void *)phys_to_uncached(p)) +#endif + +/* + * Not really sure about the best way to slow down I/O on + * AVR32. Defining it as a no-op until we have an actual test case. + */ +#define SLOW_DOWN_IO do { } while (0) + +#define __BUILD_MEMORY_SINGLE(pfx, bwl, type) \ +static inline void \ +pfx##write##bwl(type val, volatile void __iomem *addr) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \ + __val = pfx##ioswab##bwl(__addr, val); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + *__addr = __val; \ +} \ + \ +static inline type pfx##read##bwl(const volatile void __iomem *addr) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + __val = *__addr; \ + return pfx##ioswab##bwl(__addr, __val); \ +} + +#define __BUILD_IOPORT_SINGLE(pfx, bwl, type, p, slow) \ +static inline void pfx##out##bwl##p(type val, unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = __io(__swizzle_addr_##bwl(port)); \ + __val = pfx##ioswab##bwl(__addr, val); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + *__addr = __val; \ + slow; \ +} \ + \ +static inline type pfx##in##bwl##p(unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = __io(__swizzle_addr_##bwl(port)); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + __val = *__addr; \ + slow; \ + \ + return pfx##ioswab##bwl(__addr, __val); \ +} + +#define __BUILD_MEMORY_PFX(bus, bwl, type) \ + __BUILD_MEMORY_SINGLE(bus, bwl, type) + +#define BUILDIO_MEM(bwl, type) \ + __BUILD_MEMORY_PFX(, bwl, type) \ + __BUILD_MEMORY_PFX(__mem_, bwl, type) + +#define __BUILD_IOPORT_PFX(bus, bwl, type) \ + __BUILD_IOPORT_SINGLE(bus, bwl, type, ,) \ + __BUILD_IOPORT_SINGLE(bus, bwl, type, _p, SLOW_DOWN_IO) + +#define BUILDIO_IOPORT(bwl, type) \ + __BUILD_IOPORT_PFX(, bwl, type) \ + __BUILD_IOPORT_PFX(__mem_, bwl, type) + +BUILDIO_MEM(b, u8) +BUILDIO_MEM(w, u16) +BUILDIO_MEM(l, u32) + +BUILDIO_IOPORT(b, u8) +BUILDIO_IOPORT(w, u16) +BUILDIO_IOPORT(l, u32) + +#define readb_relaxed readb +#define readw_relaxed readw +#define readl_relaxed readl + +#define __BUILD_MEMORY_STRING(bwl, type) \ +static inline void writes##bwl(volatile void __iomem *addr, \ + const void *data, unsigned int count) \ +{ \ + const type *__data = data; \ + \ + while (count--) \ + __mem_write##bwl(*__data++, addr); \ +} \ + \ +static inline void reads##bwl(const volatile void __iomem *addr, \ + void *data, unsigned int count) \ +{ \ + type *__data = data; \ + \ + while (count--) \ + *__data++ = __mem_read##bwl(addr); \ } -#define __raw_readb readb -#define __raw_readw readw -#define __raw_readl readl -#define writesb(p, d, l) __raw_writesb((unsigned int)p, d, l) -#define writesw(p, d, l) __raw_writesw((unsigned int)p, d, l) -#define writesl(p, d, l) __raw_writesl((unsigned int)p, d, l) +#define __BUILD_IOPORT_STRING(bwl, type) \ +static inline void outs##bwl(unsigned long port, const void *data, \ + unsigned int count) \ +{ \ + const type *__data = data; \ + \ + while (count--) \ + __mem_out##bwl(*__data++, port); \ +} \ + \ +static inline void ins##bwl(unsigned long port, void *data, \ + unsigned int count) \ +{ \ + type *__data = data; \ + \ + while (count--) \ + *__data++ = __mem_in##bwl(port); \ +} -#define readsb(p, d, l) __raw_readsb((unsigned int)p, d, l) -#define readsw(p, d, l) __raw_readsw((unsigned int)p, d, l) -#define readsl(p, d, l) __raw_readsl((unsigned int)p, d, l) +#define BUILDSTRING(bwl, type) \ + __BUILD_MEMORY_STRING(bwl, type) \ + __BUILD_IOPORT_STRING(bwl, type) +BUILDSTRING(b, u8) +BUILDSTRING(w, u16) +BUILDSTRING(l, u32) /* * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be */ #ifndef ioread8 -#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; }) +#define ioread8(p) ((unsigned int)readb(p)) -#define ioread16(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(p)); __v; }) -#define ioread16be(p) ({ unsigned int __v = be16_to_cpu(__raw_readw(p)); __v; }) +#define ioread16(p) ((unsigned int)readw(p)) +#define ioread16be(p) ((unsigned int)__raw_readw(p)) -#define ioread32(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(p)); __v; }) -#define ioread32be(p) ({ unsigned int __v = be32_to_cpu(__raw_readl(p)); __v; }) +#define ioread32(p) ((unsigned int)readl(p)) +#define ioread32be(p) ((unsigned int)__raw_readl(p)) -#define iowrite8(v,p) __raw_writeb(v, p) +#define iowrite8(v,p) writeb(v, p) -#define iowrite16(v,p) __raw_writew(cpu_to_le16(v), p) -#define iowrite16be(v,p) __raw_writew(cpu_to_be16(v), p) +#define iowrite16(v,p) writew(v, p) +#define iowrite16be(v,p) __raw_writew(v, p) -#define iowrite32(v,p) __raw_writel(cpu_to_le32(v), p) -#define iowrite32be(v,p) __raw_writel(cpu_to_be32(v), p) +#define iowrite32(v,p) writel(v, p) +#define iowrite32be(v,p) __raw_writel(v, p) -#define ioread8_rep(p,d,c) __raw_readsb(p,d,c) -#define ioread16_rep(p,d,c) __raw_readsw(p,d,c) -#define ioread32_rep(p,d,c) __raw_readsl(p,d,c) +#define ioread8_rep(p,d,c) readsb(p,d,c) +#define ioread16_rep(p,d,c) readsw(p,d,c) +#define ioread32_rep(p,d,c) readsl(p,d,c) -#define iowrite8_rep(p,s,c) __raw_writesb(p,s,c) -#define iowrite16_rep(p,s,c) __raw_writesw(p,s,c) -#define iowrite32_rep(p,s,c) __raw_writesl(p,s,c) +#define iowrite8_rep(p,s,c) writesb(p,s,c) +#define iowrite16_rep(p,s,c) writesw(p,s,c) +#define iowrite32_rep(p,s,c) writesl(p,s,c) #endif - -/* - * These two are only here because ALSA _thinks_ it needs them... - */ static inline void memcpy_fromio(void * to, const volatile void __iomem *from, unsigned long count) { - char *p = to; - while (count) { - count--; - *p = readb(from); - p++; - from++; - } + memcpy(to, (const void __force *)from, count); } static inline void memcpy_toio(volatile void __iomem *to, const void * from, unsigned long count) { - const char *p = from; - while (count) { - count--; - writeb(*p, to); - p++; - to++; - } + memcpy((void __force *)to, from, count); } static inline void memset_io(volatile void __iomem *addr, unsigned char val, @@ -142,99 +255,8 @@ static inline void memset_io(volatile void __iomem *addr, unsigned char val, memset((void __force *)addr, val, count); } -/* - * Bad read/write accesses... - */ -extern void __readwrite_bug(const char *fn); - #define IO_SPACE_LIMIT 0xffffffff -/* Convert I/O port address to virtual address */ -#define __io(p) ((void __iomem *)phys_to_uncached(p)) - -/* - * IO port access primitives - * ------------------------- - * - * The AVR32 doesn't have special IO access instructions; all IO is memory - * mapped. Note that these are defined to perform little endian accesses - * only. Their primary purpose is to access PCI and ISA peripherals. - * - * Note that for a big endian machine, this implies that the following - * big endian mode connectivity is in place. - * - * The machine specific io.h include defines __io to translate an "IO" - * address to a memory address. - * - * Note that we prevent GCC re-ordering or caching values in expressions - * by introducing sequence points into the in*() definitions. Note that - * __raw_* do not guarantee this behaviour. - * - * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space. - */ -#define outb(v, p) __raw_writeb(v, __io(p)) -#define outw(v, p) __raw_writew(cpu_to_le16(v), __io(p)) -#define outl(v, p) __raw_writel(cpu_to_le32(v), __io(p)) - -#define inb(p) __raw_readb(__io(p)) -#define inw(p) le16_to_cpu(__raw_readw(__io(p))) -#define inl(p) le32_to_cpu(__raw_readl(__io(p))) - -static inline void __outsb(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - outb(*(u8 *)addr, port); - addr++; - } -} - -static inline void __insb(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - *(u8 *)addr = inb(port); - addr++; - } -} - -static inline void __outsw(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - outw(*(u16 *)addr, port); - addr += 2; - } -} - -static inline void __insw(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - *(u16 *)addr = inw(port); - addr += 2; - } -} - -static inline void __outsl(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - outl(*(u32 *)addr, port); - addr += 4; - } -} - -static inline void __insl(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - *(u32 *)addr = inl(port); - addr += 4; - } -} - -#define outsb(port, addr, count) __outsb(port, addr, count) -#define insb(port, addr, count) __insb(port, addr, count) -#define outsw(port, addr, count) __outsw(port, addr, count) -#define insw(port, addr, count) __insw(port, addr, count) -#define outsl(port, addr, count) __outsl(port, addr, count) -#define insl(port, addr, count) __insl(port, addr, count) - extern void __iomem *__ioremap(unsigned long offset, size_t size, unsigned long flags); extern void __iounmap(void __iomem *addr); @@ -292,6 +314,4 @@ extern void __iounmap(void __iomem *addr); */ #define xlate_dev_kmem_ptr(p) p -#endif /* __KERNEL__ */ - #endif /* __ASM_AVR32_IO_H */ diff --git a/include/asm-avr32/processor.h b/include/asm-avr32/processor.h index f6913778a45..6a64833756a 100644 --- a/include/asm-avr32/processor.h +++ b/include/asm-avr32/processor.h @@ -40,6 +40,14 @@ enum tlb_config { TLB_INVALID }; +#define AVR32_FEATURE_RMW (1 << 0) +#define AVR32_FEATURE_DSP (1 << 1) +#define AVR32_FEATURE_SIMD (1 << 2) +#define AVR32_FEATURE_OCD (1 << 3) +#define AVR32_FEATURE_PCTR (1 << 4) +#define AVR32_FEATURE_JAVA (1 << 5) +#define AVR32_FEATURE_FPU (1 << 6) + struct avr32_cpuinfo { struct clk *clk; unsigned long loops_per_jiffy; @@ -48,6 +56,7 @@ struct avr32_cpuinfo { unsigned short arch_revision; unsigned short cpu_revision; enum tlb_config tlb_config; + unsigned long features; struct cache_info icache; struct cache_info dcache; @@ -125,10 +134,10 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); #define thread_saved_pc(tsk) ((tsk)->thread.cpu_context.pc) struct pt_regs; -void show_trace(struct task_struct *task, unsigned long *stack, - struct pt_regs *regs); - extern unsigned long get_wchan(struct task_struct *p); +extern void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl); +extern void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp, + struct pt_regs *regs, const char *log_lvl); #define KSTK_EIP(tsk) ((tsk)->thread.cpu_context.pc) #define KSTK_ESP(tsk) ((tsk)->thread.cpu_context.ksp) diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h index 0a5224245e4..1ff1a217015 100644 --- a/include/asm-avr32/setup.h +++ b/include/asm-avr32/setup.h @@ -124,19 +124,12 @@ struct tagtable { #define for_each_tag(t,base) \ for (t = base; t->hdr.size; t = tag_next(t)) -extern struct tag_mem_range *mem_phys; -extern struct tag_mem_range *mem_reserved; -extern struct tag_mem_range *mem_ramdisk; - extern struct tag *bootloader_tags; -extern void setup_bootmem(void); -extern void setup_processor(void); -extern void board_setup_fbmem(unsigned long fbmem_start, - unsigned long fbmem_size); +extern resource_size_t fbmem_start; +extern resource_size_t fbmem_size; -/* Chip-specific hook to enable the use of SDRAM */ -void chip_enable_sdram(void); +void setup_processor(void); #endif /* !__ASSEMBLY__ */ diff --git a/include/asm-avr32/sysreg.h b/include/asm-avr32/sysreg.h index f91975f330f..c02bc8304b1 100644 --- a/include/asm-avr32/sysreg.h +++ b/include/asm-avr32/sysreg.h @@ -7,326 +7,281 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#ifndef __ASM_AVR32_SYSREG_H__ -#define __ASM_AVR32_SYSREG_H__ +#ifndef __ASM_AVR32_SYSREG_H +#define __ASM_AVR32_SYSREG_H /* sysreg register offsets */ -#define SYSREG_SR 0x0000 -#define SYSREG_EVBA 0x0004 -#define SYSREG_ACBA 0x0008 -#define SYSREG_CPUCR 0x000c -#define SYSREG_ECR 0x0010 -#define SYSREG_RSR_SUP 0x0014 -#define SYSREG_RSR_INT0 0x0018 -#define SYSREG_RSR_INT1 0x001c -#define SYSREG_RSR_INT2 0x0020 -#define SYSREG_RSR_INT3 0x0024 -#define SYSREG_RSR_EX 0x0028 -#define SYSREG_RSR_NMI 0x002c -#define SYSREG_RSR_DBG 0x0030 -#define SYSREG_RAR_SUP 0x0034 -#define SYSREG_RAR_INT0 0x0038 -#define SYSREG_RAR_INT1 0x003c -#define SYSREG_RAR_INT2 0x0040 -#define SYSREG_RAR_INT3 0x0044 -#define SYSREG_RAR_EX 0x0048 -#define SYSREG_RAR_NMI 0x004c -#define SYSREG_RAR_DBG 0x0050 -#define SYSREG_JECR 0x0054 -#define SYSREG_JOSP 0x0058 -#define SYSREG_JAVA_LV0 0x005c -#define SYSREG_JAVA_LV1 0x0060 -#define SYSREG_JAVA_LV2 0x0064 -#define SYSREG_JAVA_LV3 0x0068 -#define SYSREG_JAVA_LV4 0x006c -#define SYSREG_JAVA_LV5 0x0070 -#define SYSREG_JAVA_LV6 0x0074 -#define SYSREG_JAVA_LV7 0x0078 -#define SYSREG_JTBA 0x007c -#define SYSREG_JBCR 0x0080 -#define SYSREG_CONFIG0 0x0100 -#define SYSREG_CONFIG1 0x0104 -#define SYSREG_COUNT 0x0108 -#define SYSREG_COMPARE 0x010c -#define SYSREG_TLBEHI 0x0110 -#define SYSREG_TLBELO 0x0114 -#define SYSREG_PTBR 0x0118 -#define SYSREG_TLBEAR 0x011c -#define SYSREG_MMUCR 0x0120 -#define SYSREG_TLBARLO 0x0124 -#define SYSREG_TLBARHI 0x0128 -#define SYSREG_PCCNT 0x012c -#define SYSREG_PCNT0 0x0130 -#define SYSREG_PCNT1 0x0134 -#define SYSREG_PCCR 0x0138 -#define SYSREG_BEAR 0x013c +#define SYSREG_SR 0x0000 +#define SYSREG_EVBA 0x0004 +#define SYSREG_ACBA 0x0008 +#define SYSREG_CPUCR 0x000c +#define SYSREG_ECR 0x0010 +#define SYSREG_RSR_SUP 0x0014 +#define SYSREG_RSR_INT0 0x0018 +#define SYSREG_RSR_INT1 0x001c +#define SYSREG_RSR_INT2 0x0020 +#define SYSREG_RSR_INT3 0x0024 +#define SYSREG_RSR_EX 0x0028 +#define SYSREG_RSR_NMI 0x002c +#define SYSREG_RSR_DBG 0x0030 +#define SYSREG_RAR_SUP 0x0034 +#define SYSREG_RAR_INT0 0x0038 +#define SYSREG_RAR_INT1 0x003c +#define SYSREG_RAR_INT2 0x0040 +#define SYSREG_RAR_INT3 0x0044 +#define SYSREG_RAR_EX 0x0048 +#define SYSREG_RAR_NMI 0x004c +#define SYSREG_RAR_DBG 0x0050 +#define SYSREG_JECR 0x0054 +#define SYSREG_JOSP 0x0058 +#define SYSREG_JAVA_LV0 0x005c +#define SYSREG_JAVA_LV1 0x0060 +#define SYSREG_JAVA_LV2 0x0064 +#define SYSREG_JAVA_LV3 0x0068 +#define SYSREG_JAVA_LV4 0x006c +#define SYSREG_JAVA_LV5 0x0070 +#define SYSREG_JAVA_LV6 0x0074 +#define SYSREG_JAVA_LV7 0x0078 +#define SYSREG_JTBA 0x007c +#define SYSREG_JBCR 0x0080 +#define SYSREG_CONFIG0 0x0100 +#define SYSREG_CONFIG1 0x0104 +#define SYSREG_COUNT 0x0108 +#define SYSREG_COMPARE 0x010c +#define SYSREG_TLBEHI 0x0110 +#define SYSREG_TLBELO 0x0114 +#define SYSREG_PTBR 0x0118 +#define SYSREG_TLBEAR 0x011c +#define SYSREG_MMUCR 0x0120 +#define SYSREG_TLBARLO 0x0124 +#define SYSREG_TLBARHI 0x0128 +#define SYSREG_PCCNT 0x012c +#define SYSREG_PCNT0 0x0130 +#define SYSREG_PCNT1 0x0134 +#define SYSREG_PCCR 0x0138 +#define SYSREG_BEAR 0x013c +#define SYSREG_SABAL 0x0300 +#define SYSREG_SABAH 0x0304 +#define SYSREG_SABD 0x0308 /* Bitfields in SR */ -#define SYSREG_SR_C_OFFSET 0 -#define SYSREG_SR_C_SIZE 1 -#define SYSREG_Z_OFFSET 1 -#define SYSREG_Z_SIZE 1 -#define SYSREG_SR_N_OFFSET 2 -#define SYSREG_SR_N_SIZE 1 -#define SYSREG_SR_V_OFFSET 3 -#define SYSREG_SR_V_SIZE 1 -#define SYSREG_Q_OFFSET 4 -#define SYSREG_Q_SIZE 1 -#define SYSREG_GM_OFFSET 16 -#define SYSREG_GM_SIZE 1 -#define SYSREG_I0M_OFFSET 17 -#define SYSREG_I0M_SIZE 1 -#define SYSREG_I1M_OFFSET 18 -#define SYSREG_I1M_SIZE 1 -#define SYSREG_I2M_OFFSET 19 -#define SYSREG_I2M_SIZE 1 -#define SYSREG_I3M_OFFSET 20 -#define SYSREG_I3M_SIZE 1 -#define SYSREG_EM_OFFSET 21 -#define SYSREG_EM_SIZE 1 -#define SYSREG_M0_OFFSET 22 -#define SYSREG_M0_SIZE 1 -#define SYSREG_M1_OFFSET 23 -#define SYSREG_M1_SIZE 1 -#define SYSREG_M2_OFFSET 24 -#define SYSREG_M2_SIZE 1 -#define SYSREG_SR_D_OFFSET 26 -#define SYSREG_SR_D_SIZE 1 -#define SYSREG_DM_OFFSET 27 -#define SYSREG_DM_SIZE 1 -#define SYSREG_SR_J_OFFSET 28 -#define SYSREG_SR_J_SIZE 1 -#define SYSREG_R_OFFSET 29 -#define SYSREG_R_SIZE 1 -#define SYSREG_H_OFFSET 30 -#define SYSREG_H_SIZE 1 - -/* Bitfields in EVBA */ - -/* Bitfields in ACBA */ +#define SYSREG_SR_C_OFFSET 0 +#define SYSREG_SR_C_SIZE 1 +#define SYSREG_Z_OFFSET 1 +#define SYSREG_Z_SIZE 1 +#define SYSREG_SR_N_OFFSET 2 +#define SYSREG_SR_N_SIZE 1 +#define SYSREG_SR_V_OFFSET 3 +#define SYSREG_SR_V_SIZE 1 +#define SYSREG_Q_OFFSET 4 +#define SYSREG_Q_SIZE 1 +#define SYSREG_L_OFFSET 5 +#define SYSREG_L_SIZE 1 +#define SYSREG_T_OFFSET 14 +#define SYSREG_T_SIZE 1 +#define SYSREG_SR_R_OFFSET 15 +#define SYSREG_SR_R_SIZE 1 +#define SYSREG_GM_OFFSET 16 +#define SYSREG_GM_SIZE 1 +#define SYSREG_I0M_OFFSET 17 +#define SYSREG_I0M_SIZE 1 +#define SYSREG_I1M_OFFSET 18 +#define SYSREG_I1M_SIZE 1 +#define SYSREG_I2M_OFFSET 19 +#define SYSREG_I2M_SIZE 1 +#define SYSREG_I3M_OFFSET 20 +#define SYSREG_I3M_SIZE 1 +#define SYSREG_EM_OFFSET 21 +#define SYSREG_EM_SIZE 1 +#define SYSREG_M0_OFFSET 22 +#define SYSREG_M0_SIZE 1 +#define SYSREG_M1_OFFSET 23 +#define SYSREG_M1_SIZE 1 +#define SYSREG_M2_OFFSET 24 +#define SYSREG_M2_SIZE 1 +#define SYSREG_SR_D_OFFSET 26 +#define SYSREG_SR_D_SIZE 1 +#define SYSREG_DM_OFFSET 27 +#define SYSREG_DM_SIZE 1 +#define SYSREG_SR_J_OFFSET 28 +#define SYSREG_SR_J_SIZE 1 +#define SYSREG_H_OFFSET 29 +#define SYSREG_H_SIZE 1 /* Bitfields in CPUCR */ -#define SYSREG_BI_OFFSET 0 -#define SYSREG_BI_SIZE 1 -#define SYSREG_BE_OFFSET 1 -#define SYSREG_BE_SIZE 1 -#define SYSREG_FE_OFFSET 2 -#define SYSREG_FE_SIZE 1 -#define SYSREG_RE_OFFSET 3 -#define SYSREG_RE_SIZE 1 -#define SYSREG_IBE_OFFSET 4 -#define SYSREG_IBE_SIZE 1 -#define SYSREG_IEE_OFFSET 5 -#define SYSREG_IEE_SIZE 1 - -/* Bitfields in ECR */ -#define SYSREG_ECR_OFFSET 0 -#define SYSREG_ECR_SIZE 32 - -/* Bitfields in RSR_SUP */ - -/* Bitfields in RSR_INT0 */ - -/* Bitfields in RSR_INT1 */ - -/* Bitfields in RSR_INT2 */ - -/* Bitfields in RSR_INT3 */ - -/* Bitfields in RSR_EX */ - -/* Bitfields in RSR_NMI */ - -/* Bitfields in RSR_DBG */ - -/* Bitfields in RAR_SUP */ - -/* Bitfields in RAR_INT0 */ - -/* Bitfields in RAR_INT1 */ - -/* Bitfields in RAR_INT2 */ - -/* Bitfields in RAR_INT3 */ - -/* Bitfields in RAR_EX */ - -/* Bitfields in RAR_NMI */ - -/* Bitfields in RAR_DBG */ - -/* Bitfields in JECR */ - -/* Bitfields in JOSP */ - -/* Bitfields in JAVA_LV0 */ - -/* Bitfields in JAVA_LV1 */ - -/* Bitfields in JAVA_LV2 */ - -/* Bitfields in JAVA_LV3 */ - -/* Bitfields in JAVA_LV4 */ - -/* Bitfields in JAVA_LV5 */ - -/* Bitfields in JAVA_LV6 */ - -/* Bitfields in JAVA_LV7 */ - -/* Bitfields in JTBA */ - -/* Bitfields in JBCR */ +#define SYSREG_BI_OFFSET 0 +#define SYSREG_BI_SIZE 1 +#define SYSREG_BE_OFFSET 1 +#define SYSREG_BE_SIZE 1 +#define SYSREG_FE_OFFSET 2 +#define SYSREG_FE_SIZE 1 +#define SYSREG_RE_OFFSET 3 +#define SYSREG_RE_SIZE 1 +#define SYSREG_IBE_OFFSET 4 +#define SYSREG_IBE_SIZE 1 +#define SYSREG_IEE_OFFSET 5 +#define SYSREG_IEE_SIZE 1 /* Bitfields in CONFIG0 */ -#define SYSREG_CONFIG0_D_OFFSET 1 -#define SYSREG_CONFIG0_D_SIZE 1 -#define SYSREG_CONFIG0_S_OFFSET 2 -#define SYSREG_CONFIG0_S_SIZE 1 -#define SYSREG_O_OFFSET 3 -#define SYSREG_O_SIZE 1 -#define SYSREG_P_OFFSET 4 -#define SYSREG_P_SIZE 1 -#define SYSREG_CONFIG0_J_OFFSET 5 -#define SYSREG_CONFIG0_J_SIZE 1 -#define SYSREG_F_OFFSET 6 -#define SYSREG_F_SIZE 1 -#define SYSREG_MMUT_OFFSET 7 -#define SYSREG_MMUT_SIZE 3 -#define SYSREG_AR_OFFSET 10 -#define SYSREG_AR_SIZE 3 -#define SYSREG_AT_OFFSET 13 -#define SYSREG_AT_SIZE 3 -#define SYSREG_PROCESSORREVISION_OFFSET 16 -#define SYSREG_PROCESSORREVISION_SIZE 8 -#define SYSREG_PROCESSORID_OFFSET 24 -#define SYSREG_PROCESSORID_SIZE 8 +#define SYSREG_CONFIG0_R_OFFSET 0 +#define SYSREG_CONFIG0_R_SIZE 1 +#define SYSREG_CONFIG0_D_OFFSET 1 +#define SYSREG_CONFIG0_D_SIZE 1 +#define SYSREG_CONFIG0_S_OFFSET 2 +#define SYSREG_CONFIG0_S_SIZE 1 +#define SYSREG_CONFIG0_O_OFFSET 3 +#define SYSREG_CONFIG0_O_SIZE 1 +#define SYSREG_CONFIG0_P_OFFSET 4 +#define SYSREG_CONFIG0_P_SIZE 1 +#define SYSREG_CONFIG0_J_OFFSET 5 +#define SYSREG_CONFIG0_J_SIZE 1 +#define SYSREG_CONFIG0_F_OFFSET 6 +#define SYSREG_CONFIG0_F_SIZE 1 +#define SYSREG_MMUT_OFFSET 7 +#define SYSREG_MMUT_SIZE 3 +#define SYSREG_AR_OFFSET 10 +#define SYSREG_AR_SIZE 3 +#define SYSREG_AT_OFFSET 13 +#define SYSREG_AT_SIZE 3 +#define SYSREG_PROCESSORREVISION_OFFSET 16 +#define SYSREG_PROCESSORREVISION_SIZE 8 +#define SYSREG_PROCESSORID_OFFSET 24 +#define SYSREG_PROCESSORID_SIZE 8 /* Bitfields in CONFIG1 */ -#define SYSREG_DASS_OFFSET 0 -#define SYSREG_DASS_SIZE 3 -#define SYSREG_DLSZ_OFFSET 3 -#define SYSREG_DLSZ_SIZE 3 -#define SYSREG_DSET_OFFSET 6 -#define SYSREG_DSET_SIZE 4 -#define SYSREG_IASS_OFFSET 10 -#define SYSREG_IASS_SIZE 2 -#define SYSREG_ILSZ_OFFSET 13 -#define SYSREG_ILSZ_SIZE 3 -#define SYSREG_ISET_OFFSET 16 -#define SYSREG_ISET_SIZE 4 -#define SYSREG_DMMUSZ_OFFSET 20 -#define SYSREG_DMMUSZ_SIZE 6 -#define SYSREG_IMMUSZ_OFFSET 26 -#define SYSREG_IMMUSZ_SIZE 6 - -/* Bitfields in COUNT */ - -/* Bitfields in COMPARE */ +#define SYSREG_DASS_OFFSET 0 +#define SYSREG_DASS_SIZE 3 +#define SYSREG_DLSZ_OFFSET 3 +#define SYSREG_DLSZ_SIZE 3 +#define SYSREG_DSET_OFFSET 6 +#define SYSREG_DSET_SIZE 4 +#define SYSREG_IASS_OFFSET 10 +#define SYSREG_IASS_SIZE 3 +#define SYSREG_ILSZ_OFFSET 13 +#define SYSREG_ILSZ_SIZE 3 +#define SYSREG_ISET_OFFSET 16 +#define SYSREG_ISET_SIZE 4 +#define SYSREG_DMMUSZ_OFFSET 20 +#define SYSREG_DMMUSZ_SIZE 6 +#define SYSREG_IMMUSZ_OFFSET 26 +#define SYSREG_IMMUSZ_SIZE 6 /* Bitfields in TLBEHI */ -#define SYSREG_ASID_OFFSET 0 -#define SYSREG_ASID_SIZE 8 -#define SYSREG_TLBEHI_I_OFFSET 8 -#define SYSREG_TLBEHI_I_SIZE 1 -#define SYSREG_TLBEHI_V_OFFSET 9 -#define SYSREG_TLBEHI_V_SIZE 1 -#define SYSREG_VPN_OFFSET 10 -#define SYSREG_VPN_SIZE 22 +#define SYSREG_ASID_OFFSET 0 +#define SYSREG_ASID_SIZE 8 +#define SYSREG_TLBEHI_I_OFFSET 8 +#define SYSREG_TLBEHI_I_SIZE 1 +#define SYSREG_TLBEHI_V_OFFSET 9 +#define SYSREG_TLBEHI_V_SIZE 1 +#define SYSREG_VPN_OFFSET 10 +#define SYSREG_VPN_SIZE 22 /* Bitfields in TLBELO */ -#define SYSREG_W_OFFSET 0 -#define SYSREG_W_SIZE 1 -#define SYSREG_TLBELO_D_OFFSET 1 -#define SYSREG_TLBELO_D_SIZE 1 -#define SYSREG_SZ_OFFSET 2 -#define SYSREG_SZ_SIZE 2 -#define SYSREG_AP_OFFSET 4 -#define SYSREG_AP_SIZE 3 -#define SYSREG_B_OFFSET 7 -#define SYSREG_B_SIZE 1 -#define SYSREG_G_OFFSET 8 -#define SYSREG_G_SIZE 1 -#define SYSREG_TLBELO_C_OFFSET 9 -#define SYSREG_TLBELO_C_SIZE 1 -#define SYSREG_PFN_OFFSET 10 -#define SYSREG_PFN_SIZE 22 - -/* Bitfields in PTBR */ - -/* Bitfields in TLBEAR */ +#define SYSREG_W_OFFSET 0 +#define SYSREG_W_SIZE 1 +#define SYSREG_TLBELO_D_OFFSET 1 +#define SYSREG_TLBELO_D_SIZE 1 +#define SYSREG_SZ_OFFSET 2 +#define SYSREG_SZ_SIZE 2 +#define SYSREG_AP_OFFSET 4 +#define SYSREG_AP_SIZE 3 +#define SYSREG_B_OFFSET 7 +#define SYSREG_B_SIZE 1 +#define SYSREG_G_OFFSET 8 +#define SYSREG_G_SIZE 1 +#define SYSREG_TLBELO_C_OFFSET 9 +#define SYSREG_TLBELO_C_SIZE 1 +#define SYSREG_PFN_OFFSET 10 +#define SYSREG_PFN_SIZE 22 /* Bitfields in MMUCR */ -#define SYSREG_E_OFFSET 0 -#define SYSREG_E_SIZE 1 -#define SYSREG_M_OFFSET 1 -#define SYSREG_M_SIZE 1 -#define SYSREG_MMUCR_I_OFFSET 2 -#define SYSREG_MMUCR_I_SIZE 1 -#define SYSREG_MMUCR_N_OFFSET 3 -#define SYSREG_MMUCR_N_SIZE 1 -#define SYSREG_MMUCR_S_OFFSET 4 -#define SYSREG_MMUCR_S_SIZE 1 -#define SYSREG_DLA_OFFSET 8 -#define SYSREG_DLA_SIZE 6 -#define SYSREG_DRP_OFFSET 14 -#define SYSREG_DRP_SIZE 6 -#define SYSREG_ILA_OFFSET 20 -#define SYSREG_ILA_SIZE 6 -#define SYSREG_IRP_OFFSET 26 -#define SYSREG_IRP_SIZE 6 - -/* Bitfields in TLBARLO */ - -/* Bitfields in TLBARHI */ - -/* Bitfields in PCCNT */ - -/* Bitfields in PCNT0 */ - -/* Bitfields in PCNT1 */ +#define SYSREG_E_OFFSET 0 +#define SYSREG_E_SIZE 1 +#define SYSREG_M_OFFSET 1 +#define SYSREG_M_SIZE 1 +#define SYSREG_MMUCR_I_OFFSET 2 +#define SYSREG_MMUCR_I_SIZE 1 +#define SYSREG_MMUCR_N_OFFSET 3 +#define SYSREG_MMUCR_N_SIZE 1 +#define SYSREG_MMUCR_S_OFFSET 4 +#define SYSREG_MMUCR_S_SIZE 1 +#define SYSREG_DLA_OFFSET 8 +#define SYSREG_DLA_SIZE 6 +#define SYSREG_DRP_OFFSET 14 +#define SYSREG_DRP_SIZE 6 +#define SYSREG_ILA_OFFSET 20 +#define SYSREG_ILA_SIZE 6 +#define SYSREG_IRP_OFFSET 26 +#define SYSREG_IRP_SIZE 6 /* Bitfields in PCCR */ - -/* Bitfields in BEAR */ +#define SYSREG_PCCR_R_OFFSET 1 +#define SYSREG_PCCR_R_SIZE 1 +#define SYSREG_PCCR_C_OFFSET 2 +#define SYSREG_PCCR_C_SIZE 1 +#define SYSREG_PCCR_S_OFFSET 3 +#define SYSREG_PCCR_S_SIZE 1 +#define SYSREG_IEC_OFFSET 4 +#define SYSREG_IEC_SIZE 1 +#define SYSREG_IE0_OFFSET 5 +#define SYSREG_IE0_SIZE 1 +#define SYSREG_IE1_OFFSET 6 +#define SYSREG_IE1_SIZE 1 +#define SYSREG_FC_OFFSET 8 +#define SYSREG_FC_SIZE 1 +#define SYSREG_F0_OFFSET 9 +#define SYSREG_F0_SIZE 1 +#define SYSREG_F1_OFFSET 10 +#define SYSREG_F1_SIZE 1 +#define SYSREG_CONF0_OFFSET 12 +#define SYSREG_CONF0_SIZE 6 +#define SYSREG_CONF1_OFFSET 18 +#define SYSREG_CONF1_SIZE 6 /* Constants for ECR */ -#define ECR_UNRECOVERABLE 0 -#define ECR_TLB_MULTIPLE 1 -#define ECR_BUS_ERROR_WRITE 2 -#define ECR_BUS_ERROR_READ 3 -#define ECR_NMI 4 -#define ECR_ADDR_ALIGN_X 5 -#define ECR_PROTECTION_X 6 -#define ECR_DEBUG 7 -#define ECR_ILLEGAL_OPCODE 8 -#define ECR_UNIMPL_INSTRUCTION 9 -#define ECR_PRIVILEGE_VIOLATION 10 -#define ECR_FPE 11 -#define ECR_COPROC_ABSENT 12 -#define ECR_ADDR_ALIGN_R 13 -#define ECR_ADDR_ALIGN_W 14 -#define ECR_PROTECTION_R 15 -#define ECR_PROTECTION_W 16 -#define ECR_DTLB_MODIFIED 17 -#define ECR_TLB_MISS_X 20 -#define ECR_TLB_MISS_R 24 -#define ECR_TLB_MISS_W 28 +#define ECR_UNRECOVERABLE 0 +#define ECR_TLB_MULTIPLE 1 +#define ECR_BUS_ERROR_WRITE 2 +#define ECR_BUS_ERROR_READ 3 +#define ECR_NMI 4 +#define ECR_ADDR_ALIGN_X 5 +#define ECR_PROTECTION_X 6 +#define ECR_DEBUG 7 +#define ECR_ILLEGAL_OPCODE 8 +#define ECR_UNIMPL_INSTRUCTION 9 +#define ECR_PRIVILEGE_VIOLATION 10 +#define ECR_FPE 11 +#define ECR_COPROC_ABSENT 12 +#define ECR_ADDR_ALIGN_R 13 +#define ECR_ADDR_ALIGN_W 14 +#define ECR_PROTECTION_R 15 +#define ECR_PROTECTION_W 16 +#define ECR_DTLB_MODIFIED 17 +#define ECR_TLB_MISS_X 20 +#define ECR_TLB_MISS_R 24 +#define ECR_TLB_MISS_W 28 /* Bit manipulation macros */ -#define SYSREG_BIT(name) (1 << SYSREG_##name##_OFFSET) -#define SYSREG_BF(name,value) (((value) & ((1 << SYSREG_##name##_SIZE) - 1)) << SYSREG_##name##_OFFSET) -#define SYSREG_BFEXT(name,value) (((value) >> SYSREG_##name##_OFFSET) & ((1 << SYSREG_##name##_SIZE) - 1)) -#define SYSREG_BFINS(name,value,old) (((old) & ~(((1 << SYSREG_##name##_SIZE) - 1) << SYSREG_##name##_OFFSET)) | SYSREG_BF(name,value)) +#define SYSREG_BIT(name) \ + (1 << SYSREG_##name##_OFFSET) +#define SYSREG_BF(name,value) \ + (((value) & ((1 << SYSREG_##name##_SIZE) - 1)) \ + << SYSREG_##name##_OFFSET) +#define SYSREG_BFEXT(name,value)\ + (((value) >> SYSREG_##name##_OFFSET) \ + & ((1 << SYSREG_##name##_SIZE) - 1)) +#define SYSREG_BFINS(name,value,old) \ + (((old) & ~(((1 << SYSREG_##name##_SIZE) - 1) \ + << SYSREG_##name##_OFFSET)) \ + | SYSREG_BF(name,value)) +/* Register access macros */ #ifdef __CHECKER__ extern unsigned long __builtin_mfsr(unsigned long reg); extern void __builtin_mtsr(unsigned long reg, unsigned long value); #endif -/* Register access macros */ -#define sysreg_read(reg) __builtin_mfsr(SYSREG_##reg) -#define sysreg_write(reg, value) __builtin_mtsr(SYSREG_##reg, value) +#define sysreg_read(reg) __builtin_mfsr(SYSREG_##reg) +#define sysreg_write(reg, value) __builtin_mtsr(SYSREG_##reg, value) -#endif /* __ASM_AVR32_SYSREG_H__ */ +#endif /* __ASM_AVR32_SYSREG_H */ diff --git a/include/asm-avr32/system.h b/include/asm-avr32/system.h index ac596058697..a8236bacc87 100644 --- a/include/asm-avr32/system.h +++ b/include/asm-avr32/system.h @@ -9,6 +9,7 @@ #define __ASM_AVR32_SYSTEM_H #include <linux/compiler.h> +#include <linux/linkage.h> #include <linux/types.h> #include <asm/ptrace.h> @@ -140,15 +141,9 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, sizeof(*(ptr)))) struct pt_regs; -extern void __die(const char *, struct pt_regs *, unsigned long, - const char *, const char *, unsigned long); -extern void __die_if_kernel(const char *, struct pt_regs *, unsigned long, - const char *, const char *, unsigned long); - -#define die(msg, regs, err) \ - __die(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__) -#define die_if_kernel(msg, regs, err) \ - __die_if_kernel(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__) +void NORET_TYPE die(const char *str, struct pt_regs *regs, long err); +void _exception(long signr, struct pt_regs *regs, int code, + unsigned long addr); #define arch_align_stack(x) (x) diff --git a/include/asm-avr32/thread_info.h b/include/asm-avr32/thread_info.h index d1f5b35ebd5..a2e606dd4f4 100644 --- a/include/asm-avr32/thread_info.h +++ b/include/asm-avr32/thread_info.h @@ -83,6 +83,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SINGLE_STEP 6 /* single step after next break */ #define TIF_MEMDIE 7 #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal */ +#define TIF_CPU_GOING_TO_SLEEP 9 /* CPU is entering sleep 0 mode */ #define TIF_USERSPACE 31 /* true if FS sets userspace */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) @@ -94,6 +95,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SINGLE_STEP (1 << TIF_SINGLE_STEP) #define _TIF_MEMDIE (1 << TIF_MEMDIE) #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) +#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) /* XXX: These two masks must never span more than 16 bits! */ /* work to do on interrupt/exception return */ diff --git a/include/asm-avr32/uaccess.h b/include/asm-avr32/uaccess.h index 74a679e9098..ed092395215 100644 --- a/include/asm-avr32/uaccess.h +++ b/include/asm-avr32/uaccess.h @@ -181,24 +181,23 @@ extern int __put_user_bad(void); #define __get_user_nocheck(x, ptr, size) \ ({ \ - typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \ + unsigned long __gu_val = 0; \ int __gu_err = 0; \ \ switch (size) { \ case 1: __get_user_asm("ub", __gu_val, ptr, __gu_err); break; \ case 2: __get_user_asm("uh", __gu_val, ptr, __gu_err); break; \ case 4: __get_user_asm("w", __gu_val, ptr, __gu_err); break; \ - case 8: __get_user_asm("d", __gu_val, ptr, __gu_err); break; \ default: __gu_err = __get_user_bad(); break; \ } \ \ - x = __gu_val; \ + x = (typeof(*(ptr)))__gu_val; \ __gu_err; \ }) #define __get_user_check(x, ptr, size) \ ({ \ - typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \ + unsigned long __gu_val = 0; \ const typeof(*(ptr)) __user * __gu_addr = (ptr); \ int __gu_err = 0; \ \ @@ -216,10 +215,6 @@ extern int __put_user_bad(void); __get_user_asm("w", __gu_val, __gu_addr, \ __gu_err); \ break; \ - case 8: \ - __get_user_asm("d", __gu_val, __gu_addr, \ - __gu_err); \ - break; \ default: \ __gu_err = __get_user_bad(); \ break; \ @@ -227,7 +222,7 @@ extern int __put_user_bad(void); } else { \ __gu_err = -EFAULT; \ } \ - x = __gu_val; \ + x = (typeof(*(ptr)))__gu_val; \ __gu_err; \ }) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 6d7e279b149..dc8f99ee305 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -139,8 +139,15 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define pte_same(A,B) (pte_val(A) == pte_val(B)) #endif -#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY -#define page_test_and_clear_dirty(page) (0) +#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY +#define page_test_dirty(page) (0) +#endif + +#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY +#define page_clear_dirty(page) do { } while (0) +#endif + +#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY #define pte_maybe_dirty(pte) pte_dirty(pte) #else #define pte_maybe_dirty(pte) (1) diff --git a/include/asm-s390/bug.h b/include/asm-s390/bug.h index 87689836394..838684dc6d3 100644 --- a/include/asm-s390/bug.h +++ b/include/asm-s390/bug.h @@ -1,27 +1,70 @@ -#ifndef _S390_BUG_H -#define _S390_BUG_H +#ifndef _ASM_S390_BUG_H +#define _ASM_S390_BUG_H #include <linux/kernel.h> #ifdef CONFIG_BUG -static inline __attribute__((noreturn)) void __do_illegal_op(void) -{ -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3) - __builtin_trap(); +#ifdef CONFIG_64BIT +#define S390_LONG ".quad" #else - asm volatile(".long 0"); +#define S390_LONG ".long" #endif -} -#define BUG() do { \ - printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ - __do_illegal_op(); \ +#ifdef CONFIG_DEBUG_BUGVERBOSE + +#define __EMIT_BUG(x) do { \ + asm volatile( \ + "0: j 0b+2\n" \ + "1:\n" \ + ".section .rodata.str,\"aMS\",@progbits,1\n" \ + "2: .asciz \""__FILE__"\"\n" \ + ".previous\n" \ + ".section __bug_table,\"a\"\n" \ + "3:\t" S390_LONG "\t1b,2b\n" \ + " .short %0,%1\n" \ + " .org 3b+%2\n" \ + ".previous\n" \ + : : "i" (__LINE__), \ + "i" (x), \ + "i" (sizeof(struct bug_entry))); \ } while (0) +#else /* CONFIG_DEBUG_BUGVERBOSE */ + +#define __EMIT_BUG(x) do { \ + asm volatile( \ + "0: j 0b+2\n" \ + "1:\n" \ + ".section __bug_table,\"a\"\n" \ + "2:\t" S390_LONG "\t1b\n" \ + " .short %0\n" \ + " .org 2b+%1\n" \ + ".previous\n" \ + : : "i" (x), \ + "i" (sizeof(struct bug_entry))); \ +} while (0) + +#endif /* CONFIG_DEBUG_BUGVERBOSE */ + +#define BUG() __EMIT_BUG(0) + +#define WARN_ON(x) ({ \ + typeof(x) __ret_warn_on = (x); \ + if (__builtin_constant_p(__ret_warn_on)) { \ + if (__ret_warn_on) \ + __EMIT_BUG(BUGFLAG_WARNING); \ + } else { \ + if (unlikely(__ret_warn_on)) \ + __EMIT_BUG(BUGFLAG_WARNING); \ + } \ + unlikely(__ret_warn_on); \ +}) + #define HAVE_ARCH_BUG -#endif +#define HAVE_ARCH_WARN_ON +#endif /* CONFIG_BUG */ #include <asm-generic/bug.h> -#endif +#endif /* _ASM_S390_BUG_H */ diff --git a/include/asm-s390/ccwgroup.h b/include/asm-s390/ccwgroup.h index d2f9c0d53a9..925b3ddfa14 100644 --- a/include/asm-s390/ccwgroup.h +++ b/include/asm-s390/ccwgroup.h @@ -11,6 +11,7 @@ struct ccwgroup_device { CCWGROUP_ONLINE, } state; atomic_t onoff; + struct mutex reg_mutex; unsigned int count; /* number of attached slave devices */ struct device dev; /* master device */ struct ccw_device *cdev[0]; /* variable number, allocate as needed */ diff --git a/include/asm-s390/chpid.h b/include/asm-s390/chpid.h new file mode 100644 index 00000000000..b203336fd89 --- /dev/null +++ b/include/asm-s390/chpid.h @@ -0,0 +1,53 @@ +/* + * drivers/s390/cio/chpid.h + * + * Copyright IBM Corp. 2007 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#ifndef _ASM_S390_CHPID_H +#define _ASM_S390_CHPID_H _ASM_S390_CHPID_H + +#include <linux/string.h> +#include <asm/types.h> +#include <asm/cio.h> + +#define __MAX_CHPID 255 + +struct chp_id { + u8 reserved1; + u8 cssid; + u8 reserved2; + u8 id; +} __attribute__((packed)); + +static inline void chp_id_init(struct chp_id *chpid) +{ + memset(chpid, 0, sizeof(struct chp_id)); +} + +static inline int chp_id_is_equal(struct chp_id *a, struct chp_id *b) +{ + return (a->id == b->id) && (a->cssid == b->cssid); +} + +static inline void chp_id_next(struct chp_id *chpid) +{ + if (chpid->id < __MAX_CHPID) + chpid->id++; + else { + chpid->id = 0; + chpid->cssid++; + } +} + +static inline int chp_id_is_valid(struct chp_id *chpid) +{ + return (chpid->cssid <= __MAX_CSSID); +} + + +#define chp_id_for_each(c) \ + for (chp_id_init(c); chp_id_is_valid(c); chp_id_next(c)) + +#endif /* _ASM_S390_CHPID_H */ diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h index d9278503098..f738d282758 100644 --- a/include/asm-s390/cio.h +++ b/include/asm-s390/cio.h @@ -13,6 +13,7 @@ #ifdef __KERNEL__ #define LPM_ANYPATH 0xff +#define __MAX_CSSID 0 /* * subchannel status word @@ -292,6 +293,13 @@ extern void css_schedule_reprobe(void); extern void reipl_ccw_dev(struct ccw_dev_id *id); +struct cio_iplinfo { + u16 devno; + int is_qdio; +}; + +extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo); + #endif #endif diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h index 0eb64083480..bdcd448d43f 100644 --- a/include/asm-s390/ipl.h +++ b/include/asm-s390/ipl.h @@ -8,6 +8,8 @@ #define _ASM_S390_IPL_H #include <asm/types.h> +#include <asm/cio.h> +#include <asm/setup.h> #define IPL_PARMBLOCK_ORIGIN 0x2000 @@ -74,12 +76,12 @@ struct ipl_parameter_block { } __attribute__((packed)); /* - * IPL validity flags and parameters as detected in head.S + * IPL validity flags */ extern u32 ipl_flags; -extern u16 ipl_devno; extern u32 dump_prefix_page; + extern void do_reipl(void); extern void ipl_save_parameters(void); @@ -89,6 +91,35 @@ enum { IPL_NSS_VALID = 4, }; +enum ipl_type { + IPL_TYPE_UNKNOWN = 1, + IPL_TYPE_CCW = 2, + IPL_TYPE_FCP = 4, + IPL_TYPE_FCP_DUMP = 8, + IPL_TYPE_NSS = 16, +}; + +struct ipl_info +{ + enum ipl_type type; + union { + struct { + struct ccw_dev_id dev_id; + } ccw; + struct { + struct ccw_dev_id dev_id; + u64 wwpn; + u64 lun; + } fcp; + struct { + char name[NSS_NAME_SIZE + 1]; + } nss; + } data; +}; + +extern struct ipl_info ipl_info; +extern void setup_ipl_info(void); + /* * DIAG 308 support */ diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index 4a31d0a7ee8..ffc9788a21a 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h @@ -147,6 +147,52 @@ void pgm_check_handler(void); void mcck_int_handler(void); void io_int_handler(void); +struct save_area_s390 { + u32 ext_save; + u64 timer; + u64 clk_cmp; + u8 pad1[24]; + u8 psw[8]; + u32 pref_reg; + u8 pad2[20]; + u32 acc_regs[16]; + u64 fp_regs[4]; + u32 gp_regs[16]; + u32 ctrl_regs[16]; +} __attribute__((packed)); + +struct save_area_s390x { + u64 fp_regs[16]; + u64 gp_regs[16]; + u8 psw[16]; + u8 pad1[8]; + u32 pref_reg; + u32 fp_ctrl_reg; + u8 pad2[4]; + u32 tod_reg; + u64 timer; + u64 clk_cmp; + u8 pad3[8]; + u32 acc_regs[16]; + u64 ctrl_regs[16]; +} __attribute__((packed)); + +union save_area { + struct save_area_s390 s390; + struct save_area_s390x s390x; +}; + +#define SAVE_AREA_BASE_S390 0xd4 +#define SAVE_AREA_BASE_S390X 0x1200 + +#ifndef __s390x__ +#define SAVE_AREA_SIZE sizeof(struct save_area_s390) +#define SAVE_AREA_BASE SAVE_AREA_BASE_S390 +#else +#define SAVE_AREA_SIZE sizeof(struct save_area_s390x) +#define SAVE_AREA_BASE SAVE_AREA_BASE_S390X +#endif + struct _lowcore { #ifndef __s390x__ diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 13c16546eff..8fe8d42e64c 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -753,14 +753,14 @@ ptep_establish(struct vm_area_struct *vma, * should therefore only be called if it is not mapped in any * address space. */ -static inline int page_test_and_clear_dirty(struct page *page) +static inline int page_test_dirty(struct page *page) { - unsigned long physpage = page_to_phys(page); - int skey = page_get_storage_key(physpage); + return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; +} - if (skey & _PAGE_CHANGED) - page_set_storage_key(physpage, skey & ~_PAGE_CHANGED); - return skey & _PAGE_CHANGED; +static inline void page_clear_dirty(struct page *page) +{ + page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); } /* @@ -953,7 +953,8 @@ extern void memmap_init(unsigned long, int, unsigned long, unsigned long); #define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME -#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY +#define __HAVE_ARCH_PAGE_TEST_DIRTY +#define __HAVE_ARCH_PAGE_CLEAR_DIRTY #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG #include <asm-generic/pgtable.h> diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 33b80ced4bc..e0fcea8c64c 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -57,6 +57,7 @@ struct cpuinfo_S390 extern void s390_adjust_jiffies(void); extern void print_cpu_info(struct cpuinfo_S390 *); +extern int get_cpu_capability(unsigned int *); /* Lazy FPU handling on uni-processor */ extern struct task_struct *last_task_used_math; @@ -196,6 +197,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t); extern char *task_show_regs(struct task_struct *task, char *buffer); extern void show_registers(struct pt_regs *regs); +extern void show_code(struct pt_regs *regs); extern void show_trace(struct task_struct *task, unsigned long *sp); unsigned long get_wchan(struct task_struct *p); diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h index 468b9701840..21ed6477321 100644 --- a/include/asm-s390/sclp.h +++ b/include/asm-s390/sclp.h @@ -9,6 +9,7 @@ #define _ASM_S390_SCLP_H #include <linux/types.h> +#include <asm/chpid.h> struct sccb_header { u16 length; @@ -33,7 +34,20 @@ struct sclp_readinfo_sccb { u8 _reserved3[4096 - 112]; /* 112-4095 */ } __attribute__((packed, aligned(4096))); +#define SCLP_CHP_INFO_MASK_SIZE 32 + +struct sclp_chp_info { + u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; + u8 standby[SCLP_CHP_INFO_MASK_SIZE]; + u8 configured[SCLP_CHP_INFO_MASK_SIZE]; +}; + extern struct sclp_readinfo_sccb s390_readinfo_sccb; extern void sclp_readinfo_early(void); +extern int sclp_sdias_blk_count(void); +extern int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); +extern int sclp_chp_configure(struct chp_id chpid); +extern int sclp_chp_deconfigure(struct chp_id chpid); +extern int sclp_chp_read_info(struct sclp_chp_info *info); #endif /* _ASM_S390_SCLP_H */ diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h index 44c7aee2bd3..a76a6b8fd88 100644 --- a/include/asm-s390/setup.h +++ b/include/asm-s390/setup.h @@ -40,6 +40,7 @@ struct mem_chunk { }; extern struct mem_chunk memory_chunk[]; +extern unsigned long real_memory_size; #ifdef CONFIG_S390_SWITCH_AMODE extern unsigned int switch_amode; @@ -77,6 +78,7 @@ extern unsigned long machine_flags; #endif /* __s390x__ */ #define MACHINE_HAS_SCLP (!MACHINE_IS_P390) +#define ZFCPDUMP_HSA_SIZE (32UL<<20) /* * Console mode. Override with conmode= diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index b957e4cda46..0a28e6d6ef4 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h @@ -54,9 +54,6 @@ extern int smp_call_function_on(void (*func) (void *info), void *info, #define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) -extern int smp_get_cpu(cpumask_t cpu_map); -extern void smp_put_cpu(int cpu); - static inline __u16 hard_smp_processor_id(void) { __u16 cpu_address; @@ -114,9 +111,8 @@ static inline void smp_send_stop(void) } #define smp_cpu_not_running(cpu) 1 -#define smp_get_cpu(cpu) ({ 0; }) -#define smp_put_cpu(cpu) ({ 0; }) #define smp_setup_cpu_possible_map() do { } while (0) #endif +extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; #endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 9cd0d0eaf52..96326594e55 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -133,7 +133,7 @@ static inline void SetPageUptodate(struct page *page) { if (!test_and_set_bit(PG_uptodate, &page->flags)) - page_test_and_clear_dirty(page); + page_clear_dirty(page); } #else #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index f469e3cd08e..a794945fd19 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -67,12 +67,12 @@ static inline long sync_writeback_pages(void) /* * Start background writeback (via pdflush) at this percentage */ -int dirty_background_ratio = 10; +int dirty_background_ratio = 5; /* * The generator of dirty data starts writeback at this percentage */ -int vm_dirty_ratio = 40; +int vm_dirty_ratio = 10; /* * The interval between `kupdate'-style writebacks, in jiffies diff --git a/mm/rmap.c b/mm/rmap.c index b82146e6dfc..59da5b734c8 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -498,8 +498,10 @@ int page_mkclean(struct page *page) struct address_space *mapping = page_mapping(page); if (mapping) ret = page_mkclean_file(mapping, page); - if (page_test_and_clear_dirty(page)) + if (page_test_dirty(page)) { + page_clear_dirty(page); ret = 1; + } } return ret; @@ -605,8 +607,10 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma) * Leaving it set also helps swapoff to reinstate ptes * faster for those pages still in swapcache. */ - if (page_test_and_clear_dirty(page)) + if (page_test_dirty(page)) { + page_clear_dirty(page); set_page_dirty(page); + } __dec_zone_page_state(page, PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); } |