diff options
Diffstat (limited to 'drivers/s390')
48 files changed, 3034 insertions, 654 deletions
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig index 721787cc5a1..4d36208ff8d 100644 --- a/drivers/s390/Kconfig +++ b/drivers/s390/Kconfig @@ -183,7 +183,13 @@ config S390_TAPE_34XX tape subsystems and 100% compatibles. It is safe to say "Y" here. - +config S390_TAPE_3590 + tristate "Support for 3590 tape hardware" + depends on S390_TAPE + help + Select this option if you want to access IBM 3590 magnetic + tape subsystems and 100% compatibles. + It is safe to say "Y" here. config VMLOGRDR tristate "Support for the z/VM recording system services (VM only)" diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 6f50cc9323d..929d6fff615 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -49,20 +49,18 @@ config DASD_FBA config DASD_DIAG tristate "Support for DIAG access to Disks" - depends on DASD && ( 64BIT = 'n' || EXPERIMENTAL) + depends on DASD help Select this option if you want to use Diagnose250 command to access Disks under VM. If you are not running under VM or unsure what it is, say "N". -config DASD_CMB - tristate "Compatibility interface for DASD channel measurement blocks" +config DASD_EER + bool "Extended error reporting (EER)" depends on DASD help - This driver provides an additional interface to the channel measurement - facility, which is normally accessed though sysfs, with a set of - ioctl functions specific to the dasd driver. - This is only needed if you want to use applications written for - linux-2.4 dasd channel measurement facility interface. + This driver provides a character device interface to the + DASD extended error reporting. This is only needed if you want to + use applications written for the EER facility. endif diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile index 58c6780134f..be9f22d52fd 100644 --- a/drivers/s390/block/Makefile +++ b/drivers/s390/block/Makefile @@ -7,11 +7,13 @@ dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o dasd_diag_mod-objs := dasd_diag.o dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ dasd_genhd.o dasd_erp.o +ifdef CONFIG_DASD_EER +dasd_mod-objs += dasd_eer.o +endif obj-$(CONFIG_DASD) += dasd_mod.o obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o -obj-$(CONFIG_DASD_CMB) += dasd_cmb.o obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o obj-$(CONFIG_DCSSBLK) += dcssblk.o diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 33157c84d1d..7967916bda1 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -43,7 +43,6 @@ MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); MODULE_DESCRIPTION("Linux on S/390 DASD device driver," " Copyright 2000 IBM Corporation"); MODULE_SUPPORTED_DEVICE("dasd"); -MODULE_PARM(dasd, "1-" __MODULE_STRING(256) "s"); MODULE_LICENSE("GPL"); /* @@ -71,10 +70,9 @@ dasd_alloc_device(void) { struct dasd_device *device; - device = kmalloc(sizeof (struct dasd_device), GFP_ATOMIC); + device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); if (device == NULL) return ERR_PTR(-ENOMEM); - memset(device, 0, sizeof (struct dasd_device)); /* open_count = 0 means device online but not in use */ atomic_set(&device->open_count, -1); @@ -151,6 +149,8 @@ dasd_state_new_to_known(struct dasd_device *device) static inline void dasd_state_known_to_new(struct dasd_device * device) { + /* Disable extended error reporting for this device. */ + dasd_eer_disable(device); /* Forget the discipline information. */ if (device->discipline) module_put(device->discipline->owner); @@ -545,29 +545,26 @@ dasd_kmalloc_request(char *magic, int cplength, int datasize, (cplength*sizeof(struct ccw1)) > PAGE_SIZE) BUG(); - cqr = kmalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); + cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); if (cqr == NULL) return ERR_PTR(-ENOMEM); - memset(cqr, 0, sizeof(struct dasd_ccw_req)); cqr->cpaddr = NULL; if (cplength > 0) { - cqr->cpaddr = kmalloc(cplength*sizeof(struct ccw1), + cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), GFP_ATOMIC | GFP_DMA); if (cqr->cpaddr == NULL) { kfree(cqr); return ERR_PTR(-ENOMEM); } - memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); } cqr->data = NULL; if (datasize > 0) { - cqr->data = kmalloc(datasize, GFP_ATOMIC | GFP_DMA); + cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); if (cqr->data == NULL) { kfree(cqr->cpaddr); kfree(cqr); return ERR_PTR(-ENOMEM); } - memset(cqr->data, 0, datasize); } strncpy((char *) &cqr->magic, magic, 4); ASCEBC((char *) &cqr->magic, 4); @@ -892,6 +889,9 @@ dasd_handle_state_change_pending(struct dasd_device *device) struct dasd_ccw_req *cqr; struct list_head *l, *n; + /* First of all start sense subsystem status request. */ + dasd_eer_snss(device); + device->stopped &= ~DASD_STOPPED_PENDING; /* restart all 'running' IO on queue */ @@ -1111,6 +1111,19 @@ restart: } goto restart; } + + /* First of all call extended error reporting. */ + if (dasd_eer_enabled(device) && + cqr->status == DASD_CQR_FAILED) { + dasd_eer_write(device, cqr, DASD_EER_FATALERROR); + + /* restart request */ + cqr->status = DASD_CQR_QUEUED; + cqr->retries = 255; + device->stopped |= DASD_STOPPED_QUIESCE; + goto restart; + } + /* Process finished ERP request. */ if (cqr->refers) { __dasd_process_erp(device, cqr); @@ -1248,7 +1261,8 @@ __dasd_start_head(struct dasd_device * device) cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); /* check FAILFAST */ if (device->stopped & ~DASD_STOPPED_PENDING && - test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) { + test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && + (!dasd_eer_enabled(device))) { cqr->status = DASD_CQR_FAILED; dasd_schedule_bh(device); } @@ -1807,7 +1821,7 @@ dasd_exit(void) #ifdef CONFIG_PROC_FS dasd_proc_exit(); #endif - dasd_ioctl_exit(); + dasd_eer_exit(); if (dasd_page_cache != NULL) { kmem_cache_destroy(dasd_page_cache); dasd_page_cache = NULL; @@ -2004,6 +2018,9 @@ dasd_generic_notify(struct ccw_device *cdev, int event) switch (event) { case CIO_GONE: case CIO_NO_PATH: + /* First of all call extended error reporting. */ + dasd_eer_write(device, NULL, DASD_EER_NOPATH); + if (device->state < DASD_STATE_BASIC) break; /* Device is active. We want to keep it. */ @@ -2061,6 +2078,7 @@ dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver) put_driver(drv); } + static int __init dasd_init(void) { @@ -2093,7 +2111,7 @@ dasd_init(void) rc = dasd_parse(); if (rc) goto failed; - rc = dasd_ioctl_init(); + rc = dasd_eer_init(); if (rc) goto failed; #ifdef CONFIG_PROC_FS diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 4ee0f934e32..2ed51562319 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -1108,6 +1108,9 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) case 0x0B: DEV_MESSAGE(KERN_WARNING, device, "%s", "FORMAT F - Volume is suspended duplex"); + /* call extended error reporting (EER) */ + dasd_eer_write(device, erp->refers, + DASD_EER_PPRCSUSPEND); break; case 0x0C: DEV_MESSAGE(KERN_WARNING, device, "%s", diff --git a/drivers/s390/block/dasd_cmb.c b/drivers/s390/block/dasd_cmb.c deleted file mode 100644 index e88f73ee72c..00000000000 --- a/drivers/s390/block/dasd_cmb.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Linux on zSeries Channel Measurement Facility support - * (dasd device driver interface) - * - * Copyright 2000,2003 IBM Corporation - * - * Author: Arnd Bergmann <arndb@de.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ -#include <linux/init.h> -#include <linux/module.h> -#include <asm/ccwdev.h> -#include <asm/cmb.h> - -#include "dasd_int.h" - -static int -dasd_ioctl_cmf_enable(struct block_device *bdev, int no, long args) -{ - struct dasd_device *device; - - device = bdev->bd_disk->private_data; - if (!device) - return -EINVAL; - - return enable_cmf(device->cdev); -} - -static int -dasd_ioctl_cmf_disable(struct block_device *bdev, int no, long args) -{ - struct dasd_device *device; - - device = bdev->bd_disk->private_data; - if (!device) - return -EINVAL; - - return disable_cmf(device->cdev); -} - -static int -dasd_ioctl_readall_cmb(struct block_device *bdev, int no, long args) -{ - struct dasd_device *device; - struct cmbdata __user *udata; - struct cmbdata data; - size_t size; - int ret; - - device = bdev->bd_disk->private_data; - if (!device) - return -EINVAL; - udata = (void __user *) args; - size = _IOC_SIZE(no); - - if (!access_ok(VERIFY_WRITE, udata, size)) - return -EFAULT; - ret = cmf_readall(device->cdev, &data); - if (ret) - return ret; - if (copy_to_user(udata, &data, min(size, sizeof(*udata)))) - return -EFAULT; - return 0; -} - -/* module initialization below here. dasd already provides a mechanism - * to dynamically register ioctl functions, so we simply use this. */ -static inline int -ioctl_reg(unsigned int no, dasd_ioctl_fn_t handler) -{ - return dasd_ioctl_no_register(THIS_MODULE, no, handler); -} - -static inline void -ioctl_unreg(unsigned int no, dasd_ioctl_fn_t handler) -{ - dasd_ioctl_no_unregister(THIS_MODULE, no, handler); -} - -static void -dasd_cmf_exit(void) -{ - ioctl_unreg(BIODASDCMFENABLE, dasd_ioctl_cmf_enable); - ioctl_unreg(BIODASDCMFDISABLE, dasd_ioctl_cmf_disable); - ioctl_unreg(BIODASDREADALLCMB, dasd_ioctl_readall_cmb); -} - -static int __init -dasd_cmf_init(void) -{ - int ret; - ret = ioctl_reg (BIODASDCMFENABLE, dasd_ioctl_cmf_enable); - if (ret) - goto err; - ret = ioctl_reg (BIODASDCMFDISABLE, dasd_ioctl_cmf_disable); - if (ret) - goto err; - ret = ioctl_reg (BIODASDREADALLCMB, dasd_ioctl_readall_cmb); - if (ret) - goto err; - - return 0; -err: - dasd_cmf_exit(); - - return ret; -} - -module_init(dasd_cmf_init); -module_exit(dasd_cmf_exit); - -MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("channel measurement facility interface for dasd\n" - "Copyright 2003 IBM Corporation\n"); diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 1629b27c48a..2f720108a7e 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -16,6 +16,7 @@ #include <linux/config.h> #include <linux/ctype.h> #include <linux/init.h> +#include <linux/module.h> #include <asm/debug.h> #include <asm/uaccess.h> @@ -69,6 +70,8 @@ int dasd_autodetect = 0; /* is true, when autodetection is active */ * strings when running as a module. */ static char *dasd[256]; +module_param_array(dasd, charp, NULL, 0); + /* * Single spinlock to protect devmap structures and lists. */ @@ -715,10 +718,51 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr, char *bu static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); +/* + * extended error-reporting + */ +static ssize_t +dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct dasd_devmap *devmap; + int eer_flag; + + devmap = dasd_find_busid(dev->bus_id); + if (!IS_ERR(devmap) && devmap->device) + eer_flag = dasd_eer_enabled(devmap->device); + else + eer_flag = 0; + return snprintf(buf, PAGE_SIZE, eer_flag ? "1\n" : "0\n"); +} + +static ssize_t +dasd_eer_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_devmap *devmap; + int rc; + + devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); + if (IS_ERR(devmap)) + return PTR_ERR(devmap); + if (!devmap->device) + return count; + if (buf[0] == '1') { + rc = dasd_eer_enable(devmap->device); + if (rc) + return rc; + } else + dasd_eer_disable(devmap->device); + return count; +} + +static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store); + static struct attribute * dasd_attrs[] = { &dev_attr_readonly.attr, &dev_attr_discipline.attr, &dev_attr_use_diag.attr, + &dev_attr_eer_enabled.attr, NULL, }; diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 822e2a26557..ee09ef33d08 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1227,19 +1227,14 @@ dasd_eckd_fill_info(struct dasd_device * device, * (see dasd_eckd_reserve) device. */ static int -dasd_eckd_release(struct block_device *bdev, int no, long args) +dasd_eckd_release(struct dasd_device *device) { - struct dasd_device *device; struct dasd_ccw_req *cqr; int rc; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1, 32, device); if (IS_ERR(cqr)) { @@ -1272,19 +1267,14 @@ dasd_eckd_release(struct block_device *bdev, int no, long args) * the interrupt is outstanding for a certain time. */ static int -dasd_eckd_reserve(struct block_device *bdev, int no, long args) +dasd_eckd_reserve(struct dasd_device *device) { - struct dasd_device *device; struct dasd_ccw_req *cqr; int rc; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1, 32, device); if (IS_ERR(cqr)) { @@ -1316,19 +1306,14 @@ dasd_eckd_reserve(struct block_device *bdev, int no, long args) * (unconditional reserve) */ static int -dasd_eckd_steal_lock(struct block_device *bdev, int no, long args) +dasd_eckd_steal_lock(struct dasd_device *device) { - struct dasd_device *device; struct dasd_ccw_req *cqr; int rc; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1, 32, device); if (IS_ERR(cqr)) { @@ -1358,19 +1343,14 @@ dasd_eckd_steal_lock(struct block_device *bdev, int no, long args) * Read performance statistics */ static int -dasd_eckd_performance(struct block_device *bdev, int no, long args) +dasd_eckd_performance(struct dasd_device *device, void __user *argp) { - struct dasd_device *device; struct dasd_psf_prssd_data *prssdp; struct dasd_rssd_perf_stats_t *stats; struct dasd_ccw_req *cqr; struct ccw1 *ccw; int rc; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1 /* PSF */ + 1 /* RSSD */ , (sizeof (struct dasd_psf_prssd_data) + @@ -1414,8 +1394,9 @@ dasd_eckd_performance(struct block_device *bdev, int no, long args) /* Prepare for Read Subsystem Data */ prssdp = (struct dasd_psf_prssd_data *) cqr->data; stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); - rc = copy_to_user((long __user *) args, (long *) stats, - sizeof(struct dasd_rssd_perf_stats_t)); + if (copy_to_user(argp, stats, + sizeof(struct dasd_rssd_perf_stats_t))) + rc = -EFAULT; } dasd_sfree_request(cqr, cqr->device); return rc; @@ -1426,27 +1407,22 @@ dasd_eckd_performance(struct block_device *bdev, int no, long args) * Returnes the cache attributes used in Define Extend (DE). */ static int -dasd_eckd_get_attrib (struct block_device *bdev, int no, long args) +dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) { - struct dasd_device *device; - struct dasd_eckd_private *private; - struct attrib_data_t attrib; + struct dasd_eckd_private *private = + (struct dasd_eckd_private *)device->private; + struct attrib_data_t attrib = private->attrib; int rc; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (!args) + if (!argp) return -EINVAL; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - - private = (struct dasd_eckd_private *) device->private; - attrib = private->attrib; - - rc = copy_to_user((long __user *) args, (long *) &attrib, - sizeof (struct attrib_data_t)); + rc = 0; + if (copy_to_user(argp, (long *) &attrib, + sizeof (struct attrib_data_t))) + rc = -EFAULT; return rc; } @@ -1456,26 +1432,19 @@ dasd_eckd_get_attrib (struct block_device *bdev, int no, long args) * Stores the attributes for cache operation to be used in Define Extend (DE). */ static int -dasd_eckd_set_attrib(struct block_device *bdev, int no, long args) +dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) { - struct dasd_device *device; - struct dasd_eckd_private *private; + struct dasd_eckd_private *private = + (struct dasd_eckd_private *)device->private; struct attrib_data_t attrib; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (!args) + if (!argp) return -EINVAL; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - - if (copy_from_user(&attrib, (void __user *) args, - sizeof (struct attrib_data_t))) { + if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) return -EFAULT; - } - private = (struct dasd_eckd_private *) device->private; private->attrib = attrib; DEV_MESSAGE(KERN_INFO, device, @@ -1484,6 +1453,27 @@ dasd_eckd_set_attrib(struct block_device *bdev, int no, long args) return 0; } +static int +dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp) +{ + switch (cmd) { + case BIODASDGATTR: + return dasd_eckd_get_attrib(device, argp); + case BIODASDSATTR: + return dasd_eckd_set_attrib(device, argp); + case BIODASDPSRD: + return dasd_eckd_performance(device, argp); + case BIODASDRLSE: + return dasd_eckd_release(device); + case BIODASDRSRV: + return dasd_eckd_reserve(device); + case BIODASDSLCK: + return dasd_eckd_steal_lock(device); + default: + return -ENOIOCTLCMD; + } +} + /* * Print sense data and related channel program. * Parts are printed because printk buffer is only 1024 bytes. @@ -1642,6 +1632,7 @@ static struct dasd_discipline dasd_eckd_discipline = { .free_cp = dasd_eckd_free_cp, .dump_sense = dasd_eckd_dump_sense, .fill_info = dasd_eckd_fill_info, + .ioctl = dasd_eckd_ioctl, }; static int __init @@ -1649,59 +1640,18 @@ dasd_eckd_init(void) { int ret; - dasd_ioctl_no_register(THIS_MODULE, BIODASDGATTR, - dasd_eckd_get_attrib); - dasd_ioctl_no_register(THIS_MODULE, BIODASDSATTR, - dasd_eckd_set_attrib); - dasd_ioctl_no_register(THIS_MODULE, BIODASDPSRD, - dasd_eckd_performance); - dasd_ioctl_no_register(THIS_MODULE, BIODASDRLSE, - dasd_eckd_release); - dasd_ioctl_no_register(THIS_MODULE, BIODASDRSRV, - dasd_eckd_reserve); - dasd_ioctl_no_register(THIS_MODULE, BIODASDSLCK, - dasd_eckd_steal_lock); - ASCEBC(dasd_eckd_discipline.ebcname, 4); ret = ccw_driver_register(&dasd_eckd_driver); - if (ret) { - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDGATTR, - dasd_eckd_get_attrib); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSATTR, - dasd_eckd_set_attrib); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDPSRD, - dasd_eckd_performance); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRLSE, - dasd_eckd_release); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRSRV, - dasd_eckd_reserve); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSLCK, - dasd_eckd_steal_lock); - return ret; - } - - dasd_generic_auto_online(&dasd_eckd_driver); - return 0; + if (!ret) + dasd_generic_auto_online(&dasd_eckd_driver); + return ret; } static void __exit dasd_eckd_cleanup(void) { ccw_driver_unregister(&dasd_eckd_driver); - - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDGATTR, - dasd_eckd_get_attrib); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSATTR, - dasd_eckd_set_attrib); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDPSRD, - dasd_eckd_performance); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRLSE, - dasd_eckd_release); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRSRV, - dasd_eckd_reserve); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSLCK, - dasd_eckd_steal_lock); } module_init(dasd_eckd_init); diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index bc3823d3522..ad8524bb7bb 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h @@ -29,6 +29,7 @@ #define DASD_ECKD_CCW_PSF 0x27 #define DASD_ECKD_CCW_RSSD 0x3e #define DASD_ECKD_CCW_LOCATE_RECORD 0x47 +#define DASD_ECKD_CCW_SNSS 0x54 #define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 #define DASD_ECKD_CCW_WRITE_MT 0x85 #define DASD_ECKD_CCW_READ_MT 0x86 diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c new file mode 100644 index 00000000000..2d946b6ca07 --- /dev/null +++ b/drivers/s390/block/dasd_eer.c @@ -0,0 +1,682 @@ +/* + * Character device driver for extended error reporting. + * + * Copyright (C) 2005 IBM Corporation + * extended error reporting for DASD ECKD devices + * Author(s): Stefan Weinhuber <wein@de.ibm.com> + */ + +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/device.h> +#include <linux/poll.h> + +#include <asm/uaccess.h> +#include <asm/semaphore.h> +#include <asm/atomic.h> +#include <asm/ebcdic.h> + +#include "dasd_int.h" +#include "dasd_eckd.h" + +#ifdef PRINTK_HEADER +#undef PRINTK_HEADER +#endif /* PRINTK_HEADER */ +#define PRINTK_HEADER "dasd(eer):" + +/* + * SECTION: the internal buffer + */ + +/* + * The internal buffer is meant to store obaque blobs of data, so it does + * not know of higher level concepts like triggers. + * It consists of a number of pages that are used as a ringbuffer. Each data + * blob is stored in a simple record that consists of an integer, which + * contains the size of the following data, and the data bytes themselfes. + * + * To allow for multiple independent readers we create one internal buffer + * each time the device is opened and destroy the buffer when the file is + * closed again. The number of pages used for this buffer is determined by + * the module parmeter eer_pages. + * + * One record can be written to a buffer by using the functions + * - dasd_eer_start_record (one time per record to write the size to the + * buffer and reserve the space for the data) + * - dasd_eer_write_buffer (one or more times per record to write the data) + * The data can be written in several steps but you will have to compute + * the total size up front for the invocation of dasd_eer_start_record. + * If the ringbuffer is full, dasd_eer_start_record will remove the required + * number of old records. + * + * A record is typically read in two steps, first read the integer that + * specifies the size of the following data, then read the data. + * Both can be done by + * - dasd_eer_read_buffer + * + * For all mentioned functions you need to get the bufferlock first and keep + * it until a complete record is written or read. + * + * All information necessary to keep track of an internal buffer is kept in + * a struct eerbuffer. The buffer specific to a file pointer is strored in + * the private_data field of that file. To be able to write data to all + * existing buffers, each buffer is also added to the bufferlist. + * If the user does not want to read a complete record in one go, we have to + * keep track of the rest of the record. residual stores the number of bytes + * that are still to deliver. If the rest of the record is invalidated between + * two reads then residual will be set to -1 so that the next read will fail. + * All entries in the eerbuffer structure are protected with the bufferlock. + * To avoid races between writing to a buffer on the one side and creating + * and destroying buffers on the other side, the bufferlock must also be used + * to protect the bufferlist. + */ + +static int eer_pages = 5; +module_param(eer_pages, int, S_IRUGO|S_IWUSR); + +struct eerbuffer { + struct list_head list; + char **buffer; + int buffersize; + int buffer_page_count; + int head; + int tail; + int residual; +}; + +static LIST_HEAD(bufferlist); +static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED; +static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); + +/* + * How many free bytes are available on the buffer. + * Needs to be called with bufferlock held. + */ +static int dasd_eer_get_free_bytes(struct eerbuffer *eerb) +{ + if (eerb->head < eerb->tail) + return eerb->tail - eerb->head - 1; + return eerb->buffersize - eerb->head + eerb->tail -1; +} + +/* + * How many bytes of buffer space are used. + * Needs to be called with bufferlock held. + */ +static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb) +{ + + if (eerb->head >= eerb->tail) + return eerb->head - eerb->tail; + return eerb->buffersize - eerb->tail + eerb->head; +} + +/* + * The dasd_eer_write_buffer function just copies count bytes of data + * to the buffer. Make sure to call dasd_eer_start_record first, to + * make sure that enough free space is available. + * Needs to be called with bufferlock held. + */ +static void dasd_eer_write_buffer(struct eerbuffer *eerb, + char *data, int count) +{ + + unsigned long headindex,localhead; + unsigned long rest, len; + char *nextdata; + + nextdata = data; + rest = count; + while (rest > 0) { + headindex = eerb->head / PAGE_SIZE; + localhead = eerb->head % PAGE_SIZE; + len = min(rest, PAGE_SIZE - localhead); + memcpy(eerb->buffer[headindex]+localhead, nextdata, len); + nextdata += len; + rest -= len; + eerb->head += len; + if (eerb->head == eerb->buffersize) + eerb->head = 0; /* wrap around */ + BUG_ON(eerb->head > eerb->buffersize); + } +} + +/* + * Needs to be called with bufferlock held. + */ +static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count) +{ + + unsigned long tailindex,localtail; + unsigned long rest, len, finalcount; + char *nextdata; + + finalcount = min(count, dasd_eer_get_filled_bytes(eerb)); + nextdata = data; + rest = finalcount; + while (rest > 0) { + tailindex = eerb->tail / PAGE_SIZE; + localtail = eerb->tail % PAGE_SIZE; + len = min(rest, PAGE_SIZE - localtail); + memcpy(nextdata, eerb->buffer[tailindex] + localtail, len); + nextdata += len; + rest -= len; + eerb->tail += len; + if (eerb->tail == eerb->buffersize) + eerb->tail = 0; /* wrap around */ + BUG_ON(eerb->tail > eerb->buffersize); + } + return finalcount; +} + +/* + * Whenever you want to write a blob of data to the internal buffer you + * have to start by using this function first. It will write the number + * of bytes that will be written to the buffer. If necessary it will remove + * old records to make room for the new one. + * Needs to be called with bufferlock held. + */ +static int dasd_eer_start_record(struct eerbuffer *eerb, int count) +{ + int tailcount; + + if (count + sizeof(count) > eerb->buffersize) + return -ENOMEM; + while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) { + if (eerb->residual > 0) { + eerb->tail += eerb->residual; + if (eerb->tail >= eerb->buffersize) + eerb->tail -= eerb->buffersize; + eerb->residual = -1; + } + dasd_eer_read_buffer(eerb, (char *) &tailcount, + sizeof(tailcount)); + eerb->tail += tailcount; + if (eerb->tail >= eerb->buffersize) + eerb->tail -= eerb->buffersize; + } + dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count)); + + return 0; +}; + +/* + * Release pages that are not used anymore. + */ +static void dasd_eer_free_buffer_pages(char **buf, int no_pages) +{ + int i; + + for (i = 0; i < no_pages; i++) + free_page((unsigned long) buf[i]); +} + +/* + * Allocate a new set of memory pages. + */ +static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages) +{ + int i; + + for (i = 0; i < no_pages; i++) { + buf[i] = (char *) get_zeroed_page(GFP_KERNEL); + if (!buf[i]) { + dasd_eer_free_buffer_pages(buf, i); + return -ENOMEM; + } + } + return 0; +} + +/* + * SECTION: The extended error reporting functionality + */ + +/* + * When a DASD device driver wants to report an error, it calls the + * function dasd_eer_write and gives the respective trigger ID as + * parameter. Currently there are four kinds of triggers: + * + * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems + * DASD_EER_PPRCSUSPEND: PPRC was suspended + * DASD_EER_NOPATH: There is no path to the device left. + * DASD_EER_STATECHANGE: The state of the device has changed. + * + * For the first three triggers all required information can be supplied by + * the caller. For these triggers a record is written by the function + * dasd_eer_write_standard_trigger. + * + * The DASD_EER_STATECHANGE trigger is special since a sense subsystem + * status ccw need to be executed to gather the necessary sense data first. + * The dasd_eer_snss function will queue the SNSS request and the request + * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE + * trigger. + * + * To avoid memory allocations at runtime, the necessary memory is allocated + * when the extended error reporting is enabled for a device (by + * dasd_eer_probe). There is one sense subsystem status request for each + * eer enabled DASD device. The presence of the cqr in device->eer_cqr + * indicates that eer is enable for the device. The use of the snss request + * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates + * that the cqr is currently in use, dasd_eer_snss cannot start a second + * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of + * the SNSS request will check the bit and call dasd_eer_snss again. + */ + +#define SNSS_DATA_SIZE 44 + +#define DASD_EER_BUSID_SIZE 10 +struct dasd_eer_header { + __u32 total_size; + __u32 trigger; + __u64 tv_sec; + __u64 tv_usec; + char busid[DASD_EER_BUSID_SIZE]; +}; + +/* + * The following function can be used for those triggers that have + * all necessary data available when the function is called. + * If the parameter cqr is not NULL, the chain of requests will be searched + * for valid sense data, and all valid sense data sets will be added to + * the triggers data. + */ +static void dasd_eer_write_standard_trigger(struct dasd_device *device, + struct dasd_ccw_req *cqr, + int trigger) +{ + struct dasd_ccw_req *temp_cqr; + int data_size; + struct timeval tv; + struct dasd_eer_header header; + unsigned long flags; + struct eerbuffer *eerb; + + /* go through cqr chain and count the valid sense data sets */ + data_size = 0; + for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) + if (temp_cqr->irb.esw.esw0.erw.cons) + data_size += 32; + + header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ + header.trigger = trigger; + do_gettimeofday(&tv); + header.tv_sec = tv.tv_sec; + header.tv_usec = tv.tv_usec; + strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); + + spin_lock_irqsave(&bufferlock, flags); + list_for_each_entry(eerb, &bufferlist, list) { + dasd_eer_start_record(eerb, header.total_size); + dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header)); + for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) + if (temp_cqr->irb.esw.esw0.erw.cons) + dasd_eer_write_buffer(eerb, cqr->irb.ecw, 32); + dasd_eer_write_buffer(eerb, "EOR", 4); + } + spin_unlock_irqrestore(&bufferlock, flags); + wake_up_interruptible(&dasd_eer_read_wait_queue); +} + +/* + * This function writes a DASD_EER_STATECHANGE trigger. + */ +static void dasd_eer_write_snss_trigger(struct dasd_device *device, + struct dasd_ccw_req *cqr, + int trigger) +{ + int data_size; + int snss_rc; + struct timeval tv; + struct dasd_eer_header header; + unsigned long flags; + struct eerbuffer *eerb; + + snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; + if (snss_rc) + data_size = 0; + else + data_size = SNSS_DATA_SIZE; + + header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ + header.trigger = DASD_EER_STATECHANGE; + do_gettimeofday(&tv); + header.tv_sec = tv.tv_sec; + header.tv_usec = tv.tv_usec; + strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); + + spin_lock_irqsave(&bufferlock, flags); + list_for_each_entry(eerb, &bufferlist, list) { + dasd_eer_start_record(eerb, header.total_size); + dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header)); + if (!snss_rc) + dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE); + dasd_eer_write_buffer(eerb, "EOR", 4); + } + spin_unlock_irqrestore(&bufferlock, flags); + wake_up_interruptible(&dasd_eer_read_wait_queue); +} + +/* + * This function is called for all triggers. It calls the appropriate + * function that writes the actual trigger records. + */ +void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr, + unsigned int id) +{ + if (!device->eer_cqr) + return; + switch (id) { + case DASD_EER_FATALERROR: + case DASD_EER_PPRCSUSPEND: + dasd_eer_write_standard_trigger(device, cqr, id); + break; + case DASD_EER_NOPATH: + dasd_eer_write_standard_trigger(device, NULL, id); + break; + case DASD_EER_STATECHANGE: + dasd_eer_write_snss_trigger(device, cqr, id); + break; + default: /* unknown trigger, so we write it without any sense data */ + dasd_eer_write_standard_trigger(device, NULL, id); + break; + } +} +EXPORT_SYMBOL(dasd_eer_write); + +/* + * Start a sense subsystem status request. + * Needs to be called with the device held. + */ +void dasd_eer_snss(struct dasd_device *device) +{ + struct dasd_ccw_req *cqr; + + cqr = device->eer_cqr; + if (!cqr) /* Device not eer enabled. */ + return; + if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) { + /* Sense subsystem status request in use. */ + set_bit(DASD_FLAG_EER_SNSS, &device->flags); + return; + } + clear_bit(DASD_FLAG_EER_SNSS, &device->flags); + cqr->status = DASD_CQR_QUEUED; + list_add(&cqr->list, &device->ccw_queue); + dasd_schedule_bh(device); +} + +/* + * Callback function for use with sense subsystem status request. + */ +static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) +{ + struct dasd_device *device = cqr->device; + unsigned long flags; + + dasd_eer_write(device, cqr, DASD_EER_STATECHANGE); + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + if (device->eer_cqr == cqr) { + clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); + if (test_bit(DASD_FLAG_EER_SNSS, &device->flags)) + /* Another SNSS has been requested in the meantime. */ + dasd_eer_snss(device); + cqr = NULL; + } + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + if (cqr) + /* + * Extended error recovery has been switched off while + * the SNSS request was running. It could even have + * been switched off and on again in which case there + * is a new ccw in device->eer_cqr. Free the "old" + * snss request now. + */ + dasd_kfree_request(cqr, device); +} + +/* + * Enable error reporting on a given device. + */ +int dasd_eer_enable(struct dasd_device *device) +{ + struct dasd_ccw_req *cqr; + unsigned long flags; + + if (device->eer_cqr) + return 0; + + if (!device->discipline || strcmp(device->discipline->name, "ECKD")) + return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ + + cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */, + SNSS_DATA_SIZE, device); + if (!cqr) + return -ENOMEM; + + cqr->device = device; + cqr->retries = 255; + cqr->expires = 10 * HZ; + + cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; + cqr->cpaddr->count = SNSS_DATA_SIZE; + cqr->cpaddr->flags = 0; + cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; + + cqr->buildclk = get_clock(); + cqr->status = DASD_CQR_FILLED; + cqr->callback = dasd_eer_snss_cb; + + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + if (!device->eer_cqr) { + device->eer_cqr = cqr; + cqr = NULL; + } + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + if (cqr) + dasd_kfree_request(cqr, device); + return 0; +} + +/* + * Disable error reporting on a given device. + */ +void dasd_eer_disable(struct dasd_device *device) +{ + struct dasd_ccw_req *cqr; + unsigned long flags; + int in_use; + + if (!device->eer_cqr) + return; + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + cqr = device->eer_cqr; + device->eer_cqr = NULL; + clear_bit(DASD_FLAG_EER_SNSS, &device->flags); + in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + if (cqr && !in_use) + dasd_kfree_request(cqr, device); +} + +/* + * SECTION: the device operations + */ + +/* + * On the one side we need a lock to access our internal buffer, on the + * other side a copy_to_user can sleep. So we need to copy the data we have + * to transfer in a readbuffer, which is protected by the readbuffer_mutex. + */ +static char readbuffer[PAGE_SIZE]; +static DECLARE_MUTEX(readbuffer_mutex); + +static int dasd_eer_open(struct inode *inp, struct file *filp) +{ + struct eerbuffer *eerb; + unsigned long flags; + + eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); + eerb->buffer_page_count = eer_pages; + if (eerb->buffer_page_count < 1 || + eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { + kfree(eerb); + MESSAGE(KERN_WARNING, "can't open device since module " + "parameter eer_pages is smaller then 1 or" + " bigger then %d", (int)(INT_MAX / PAGE_SIZE)); + return -EINVAL; + } + eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; + eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *), + GFP_KERNEL); + if (!eerb->buffer) { + kfree(eerb); + return -ENOMEM; + } + if (dasd_eer_allocate_buffer_pages(eerb->buffer, + eerb->buffer_page_count)) { + kfree(eerb->buffer); + kfree(eerb); + return -ENOMEM; + } + filp->private_data = eerb; + spin_lock_irqsave(&bufferlock, flags); + list_add(&eerb->list, &bufferlist); + spin_unlock_irqrestore(&bufferlock, flags); + + return nonseekable_open(inp,filp); +} + +static int dasd_eer_close(struct inode *inp, struct file *filp) +{ + struct eerbuffer *eerb; + unsigned long flags; + + eerb = (struct eerbuffer *) filp->private_data; + spin_lock_irqsave(&bufferlock, flags); + list_del(&eerb->list); + spin_unlock_irqrestore(&bufferlock, flags); + dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count); + kfree(eerb->buffer); + kfree(eerb); + + return 0; +} + +static ssize_t dasd_eer_read(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + int tc,rc; + int tailcount,effective_count; + unsigned long flags; + struct eerbuffer *eerb; + + eerb = (struct eerbuffer *) filp->private_data; + if (down_interruptible(&readbuffer_mutex)) + return -ERESTARTSYS; + + spin_lock_irqsave(&bufferlock, flags); + + if (eerb->residual < 0) { /* the remainder of this record */ + /* has been deleted */ + eerb->residual = 0; + spin_unlock_irqrestore(&bufferlock, flags); + up(&readbuffer_mutex); + return -EIO; + } else if (eerb->residual > 0) { + /* OK we still have a second half of a record to deliver */ + effective_count = min(eerb->residual, (int) count); + eerb->residual -= effective_count; + } else { + tc = 0; + while (!tc) { + tc = dasd_eer_read_buffer(eerb, (char *) &tailcount, + sizeof(tailcount)); + if (!tc) { + /* no data available */ + spin_unlock_irqrestore(&bufferlock, flags); + up(&readbuffer_mutex); + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + rc = wait_event_interruptible( + dasd_eer_read_wait_queue, + eerb->head != eerb->tail); + if (rc) + return rc; + if (down_interruptible(&readbuffer_mutex)) + return -ERESTARTSYS; + spin_lock_irqsave(&bufferlock, flags); + } + } + WARN_ON(tc != sizeof(tailcount)); + effective_count = min(tailcount,(int)count); + eerb->residual = tailcount - effective_count; + } + + tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count); + WARN_ON(tc != effective_count); + + spin_unlock_irqrestore(&bufferlock, flags); + + if (copy_to_user(buf, readbuffer, effective_count)) { + up(&readbuffer_mutex); + return -EFAULT; + } + + up(&readbuffer_mutex); + return effective_count; +} + +static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable) +{ + unsigned int mask; + unsigned long flags; + struct eerbuffer *eerb; + + eerb = (struct eerbuffer *) filp->private_data; + poll_wait(filp, &dasd_eer_read_wait_queue, ptable); + spin_lock_irqsave(&bufferlock, flags); + if (eerb->head != eerb->tail) + mask = POLLIN | POLLRDNORM ; + else + mask = 0; + spin_unlock_irqrestore(&bufferlock, flags); + return mask; +} + +static struct file_operations dasd_eer_fops = { + .open = &dasd_eer_open, + .release = &dasd_eer_close, + .read = &dasd_eer_read, + .poll = &dasd_eer_poll, + .owner = THIS_MODULE, +}; + +static struct miscdevice dasd_eer_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "dasd_eer", + .fops = &dasd_eer_fops, +}; + +int __init dasd_eer_init(void) +{ + int rc; + + rc = misc_register(&dasd_eer_dev); + if (rc) { + MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " + "register misc device"); + return rc; + } + + return 0; +} + +void __exit dasd_eer_exit(void) +{ + WARN_ON(misc_deregister(&dasd_eer_dev) != 0); +} diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 7cb0b9e78a6..4293ba82752 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -69,15 +69,6 @@ */ struct dasd_device; -typedef int (*dasd_ioctl_fn_t) (struct block_device *bdev, int no, long args); - -struct dasd_ioctl { - struct list_head list; - struct module *owner; - int no; - dasd_ioctl_fn_t handler; -}; - typedef enum { dasd_era_fatal = -1, /* no chance to recover */ dasd_era_none = 0, /* don't recover, everything alright */ @@ -272,10 +263,28 @@ struct dasd_discipline { /* i/o control functions. */ int (*fill_geometry) (struct dasd_device *, struct hd_geometry *); int (*fill_info) (struct dasd_device *, struct dasd_information2_t *); + int (*ioctl) (struct dasd_device *, unsigned int, void __user *); }; extern struct dasd_discipline *dasd_diag_discipline_pointer; + +/* + * Notification numbers for extended error reporting notifications: + * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's + * eer pointer) is freed. The error reporting module needs to do all necessary + * cleanup steps. + * The DASD_EER_TRIGGER notification sends the actual error reports (triggers). + */ +#define DASD_EER_DISABLE 0 +#define DASD_EER_TRIGGER 1 + +/* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */ +#define DASD_EER_FATALERROR 1 +#define DASD_EER_NOPATH 2 +#define DASD_EER_STATECHANGE 3 +#define DASD_EER_PPRCSUSPEND 4 + struct dasd_device { /* Block device stuff. */ struct gendisk *gdp; @@ -289,6 +298,9 @@ struct dasd_device { unsigned long flags; /* per device flags */ unsigned short features; /* copy of devmap-features (read-only!) */ + /* extended error reporting stuff (eer) */ + struct dasd_ccw_req *eer_cqr; + /* Device discipline stuff. */ struct dasd_discipline *discipline; struct dasd_discipline *base_discipline; @@ -334,6 +346,8 @@ struct dasd_device { /* per device flags */ #define DASD_FLAG_DSC_ERROR 2 /* return -EIO when disconnected */ #define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ +#define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */ +#define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */ void dasd_put_device_wake(struct dasd_device *); @@ -523,10 +537,6 @@ int dasd_scan_partitions(struct dasd_device *); void dasd_destroy_partitions(struct dasd_device *); /* externals in dasd_ioctl.c */ -int dasd_ioctl_init(void); -void dasd_ioctl_exit(void); -int dasd_ioctl_no_register(struct module *, int, dasd_ioctl_fn_t); -int dasd_ioctl_no_unregister(struct module *, int, dasd_ioctl_fn_t); int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long); long dasd_compat_ioctl(struct file *, unsigned int, unsigned long); @@ -557,6 +567,30 @@ dasd_era_t dasd_9336_erp_examine(struct dasd_ccw_req *, struct irb *); dasd_era_t dasd_9343_erp_examine(struct dasd_ccw_req *, struct irb *); struct dasd_ccw_req *dasd_9343_erp_action(struct dasd_ccw_req *); +/* externals in dasd_eer.c */ +#ifdef CONFIG_DASD_EER +int dasd_eer_init(void); +void dasd_eer_exit(void); +int dasd_eer_enable(struct dasd_device *); +void dasd_eer_disable(struct dasd_device *); +void dasd_eer_write(struct dasd_device *, struct dasd_ccw_req *cqr, + unsigned int id); +void dasd_eer_snss(struct dasd_device *); + +static inline int dasd_eer_enabled(struct dasd_device *device) +{ + return device->eer_cqr != NULL; +} +#else +#define dasd_eer_init() (0) +#define dasd_eer_exit() do { } while (0) +#define dasd_eer_enable(d) (0) +#define dasd_eer_disable(d) do { } while (0) +#define dasd_eer_write(d,c,i) do { } while (0) +#define dasd_eer_snss(d) do { } while (0) +#define dasd_eer_enabled(d) (0) +#endif /* CONFIG_DASD_ERR */ + #endif /* __KERNEL__ */ #endif /* DASD_H */ diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index f9930552ab5..b8c80d28df4 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -16,6 +16,7 @@ #include <linux/blkpg.h> #include <asm/ccwdev.h> +#include <asm/cmb.h> #include <asm/uaccess.h> /* This is ugly... */ @@ -23,116 +24,12 @@ #include "dasd_int.h" -/* - * SECTION: ioctl functions. - */ -static struct list_head dasd_ioctl_list = LIST_HEAD_INIT(dasd_ioctl_list); - -/* - * Find the ioctl with number no. - */ -static struct dasd_ioctl * -dasd_find_ioctl(int no) -{ - struct dasd_ioctl *ioctl; - - list_for_each_entry (ioctl, &dasd_ioctl_list, list) - if (ioctl->no == no) - return ioctl; - return NULL; -} - -/* - * Register ioctl with number no. - */ -int -dasd_ioctl_no_register(struct module *owner, int no, dasd_ioctl_fn_t handler) -{ - struct dasd_ioctl *new; - if (dasd_find_ioctl(no)) - return -EBUSY; - new = kmalloc(sizeof (struct dasd_ioctl), GFP_KERNEL); - if (new == NULL) - return -ENOMEM; - new->owner = owner; - new->no = no; - new->handler = handler; - list_add(&new->list, &dasd_ioctl_list); - return 0; -} - -/* - * Deregister ioctl with number no. - */ -int -dasd_ioctl_no_unregister(struct module *owner, int no, dasd_ioctl_fn_t handler) -{ - struct dasd_ioctl *old = dasd_find_ioctl(no); - if (old == NULL) - return -ENOENT; - if (old->no != no || old->handler != handler || owner != old->owner) - return -EINVAL; - list_del(&old->list); - kfree(old); - return 0; -} - -int -dasd_ioctl(struct inode *inp, struct file *filp, - unsigned int no, unsigned long data) -{ - struct block_device *bdev = inp->i_bdev; - struct dasd_device *device = bdev->bd_disk->private_data; - struct dasd_ioctl *ioctl; - const char *dir; - int rc; - - if ((_IOC_DIR(no) != _IOC_NONE) && (data == 0)) { - PRINT_DEBUG("empty data ptr"); - return -EINVAL; - } - dir = _IOC_DIR (no) == _IOC_NONE ? "0" : - _IOC_DIR (no) == _IOC_READ ? "r" : - _IOC_DIR (no) == _IOC_WRITE ? "w" : - _IOC_DIR (no) == (_IOC_READ | _IOC_WRITE) ? "rw" : "u"; - DBF_DEV_EVENT(DBF_DEBUG, device, - "ioctl 0x%08x %s'0x%x'%d(%d) with data %8lx", no, - dir, _IOC_TYPE(no), _IOC_NR(no), _IOC_SIZE(no), data); - /* Search for ioctl no in the ioctl list. */ - list_for_each_entry(ioctl, &dasd_ioctl_list, list) { - if (ioctl->no == no) { - /* Found a matching ioctl. Call it. */ - if (!try_module_get(ioctl->owner)) - continue; - rc = ioctl->handler(bdev, no, data); - module_put(ioctl->owner); - return rc; - } - } - /* No ioctl with number no. */ - DBF_DEV_EVENT(DBF_INFO, device, - "unknown ioctl 0x%08x=%s'0x%x'%d(%d) data %8lx", no, - dir, _IOC_TYPE(no), _IOC_NR(no), _IOC_SIZE(no), data); - return -EINVAL; -} - -long -dasd_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - int rval; - - lock_kernel(); - rval = dasd_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); - unlock_kernel(); - - return (rval == -EINVAL) ? -ENOIOCTLCMD : rval; -} static int -dasd_ioctl_api_version(struct block_device *bdev, int no, long args) +dasd_ioctl_api_version(void __user *argp) { int ver = DASD_API_VERSION; - return put_user(ver, (int __user *) args); + return put_user(ver, (int __user *)argp); } /* @@ -140,15 +37,13 @@ dasd_ioctl_api_version(struct block_device *bdev, int no, long args) * used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection */ static int -dasd_ioctl_enable(struct block_device *bdev, int no, long args) +dasd_ioctl_enable(struct block_device *bdev) { - struct dasd_device *device; + struct dasd_device *device = bdev->bd_disk->private_data; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; + dasd_enable_device(device); /* Formatting the dasd device can change the capacity. */ mutex_lock(&bdev->bd_mutex); @@ -162,15 +57,13 @@ dasd_ioctl_enable(struct block_device *bdev, int no, long args) * Used by dasdfmt. Disable I/O operations but allow ioctls. */ static int -dasd_ioctl_disable(struct block_device *bdev, int no, long args) +dasd_ioctl_disable(struct block_device *bdev) { - struct dasd_device *device; + struct dasd_device *device = bdev->bd_disk->private_data; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; + /* * Man this is sick. We don't do a real disable but only downgrade * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses @@ -194,18 +87,13 @@ dasd_ioctl_disable(struct block_device *bdev, int no, long args) * Quiesce device. */ static int -dasd_ioctl_quiesce(struct block_device *bdev, int no, long args) +dasd_ioctl_quiesce(struct dasd_device *device) { - struct dasd_device *device; unsigned long flags; if (!capable (CAP_SYS_ADMIN)) return -EACCES; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - DEV_MESSAGE (KERN_DEBUG, device, "%s", "Quiesce IO on device"); spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); @@ -219,18 +107,13 @@ dasd_ioctl_quiesce(struct block_device *bdev, int no, long args) * Quiesce device. */ static int -dasd_ioctl_resume(struct block_device *bdev, int no, long args) +dasd_ioctl_resume(struct dasd_device *device) { - struct dasd_device *device; unsigned long flags; if (!capable (CAP_SYS_ADMIN)) return -EACCES; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - DEV_MESSAGE (KERN_DEBUG, device, "%s", "resume IO on device"); @@ -302,25 +185,19 @@ dasd_format(struct dasd_device * device, struct format_data_t * fdata) * Format device. */ static int -dasd_ioctl_format(struct block_device *bdev, int no, long args) +dasd_ioctl_format(struct block_device *bdev, void __user *argp) { - struct dasd_device *device; + struct dasd_device *device = bdev->bd_disk->private_data; struct format_data_t fdata; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - if (!args) + if (!argp) return -EINVAL; - /* fdata == NULL is no longer a valid arg to dasd_format ! */ - device = bdev->bd_disk->private_data; - - if (device == NULL) - return -ENODEV; if (device->features & DASD_FEATURE_READONLY) return -EROFS; - if (copy_from_user(&fdata, (void __user *) args, - sizeof (struct format_data_t))) + if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) return -EFAULT; if (bdev != bdev->bd_contains) { DEV_MESSAGE(KERN_WARNING, device, "%s", @@ -335,17 +212,8 @@ dasd_ioctl_format(struct block_device *bdev, int no, long args) * Reset device profile information */ static int -dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args) +dasd_ioctl_reset_profile(struct dasd_device *device) { - struct dasd_device *device; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - memset(&device->profile, 0, sizeof (struct dasd_profile_info_t)); return 0; } @@ -354,31 +222,24 @@ dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args) * Return device profile information */ static int -dasd_ioctl_read_profile(struct block_device *bdev, int no, long args) +dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp) { - struct dasd_device *device; - - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - if (dasd_profile_level == DASD_PROFILE_OFF) return -EIO; - - if (copy_to_user((long __user *) args, (long *) &device->profile, + if (copy_to_user(argp, &device->profile, sizeof (struct dasd_profile_info_t))) return -EFAULT; return 0; } #else static int -dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args) +dasd_ioctl_reset_profile(struct dasd_device *device) { return -ENOSYS; } static int -dasd_ioctl_read_profile(struct block_device *bdev, int no, long args) +dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp) { return -ENOSYS; } @@ -388,22 +249,18 @@ dasd_ioctl_read_profile(struct block_device *bdev, int no, long args) * Return dasd information. Used for BIODASDINFO and BIODASDINFO2. */ static int -dasd_ioctl_information(struct block_device *bdev, int no, long args) +dasd_ioctl_information(struct dasd_device *device, + unsigned int cmd, void __user *argp) { - struct dasd_device *device; struct dasd_information2_t *dasd_info; unsigned long flags; int rc; struct ccw_device *cdev; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - if (!device->discipline->fill_info) return -EINVAL; - dasd_info = kmalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); + dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); if (dasd_info == NULL) return -ENOMEM; @@ -446,8 +303,7 @@ dasd_ioctl_information(struct block_device *bdev, int no, long args) memcpy(dasd_info->type, device->discipline->name, 4); else memcpy(dasd_info->type, "none", 4); - dasd_info->req_queue_len = 0; - dasd_info->chanq_len = 0; + if (device->request_queue->request_fn) { struct list_head *l; #ifdef DASD_EXTENDED_PROFILING @@ -467,8 +323,8 @@ dasd_ioctl_information(struct block_device *bdev, int no, long args) } rc = 0; - if (copy_to_user((long __user *) args, (long *) dasd_info, - ((no == (unsigned int) BIODASDINFO2) ? + if (copy_to_user(argp, dasd_info, + ((cmd == (unsigned int) BIODASDINFO2) ? sizeof (struct dasd_information2_t) : sizeof (struct dasd_information_t)))) rc = -EFAULT; @@ -480,69 +336,103 @@ dasd_ioctl_information(struct block_device *bdev, int no, long args) * Set read only */ static int -dasd_ioctl_set_ro(struct block_device *bdev, int no, long args) +dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) { - struct dasd_device *device; - int intval, rc; + struct dasd_device *device = bdev->bd_disk->private_data; + int intval; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (bdev != bdev->bd_contains) // ro setting is not allowed for partitions return -EINVAL; - if (get_user(intval, (int __user *) args)) + if (get_user(intval, (int *)argp)) return -EFAULT; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; set_disk_ro(bdev->bd_disk, intval); - rc = dasd_set_feature(device->cdev, DASD_FEATURE_READONLY, intval); - - return rc; + return dasd_set_feature(device->cdev, DASD_FEATURE_READONLY, intval); } -/* - * List of static ioctls. - */ -static struct { int no; dasd_ioctl_fn_t fn; } dasd_ioctls[] = +static int +dasd_ioctl_readall_cmb(struct dasd_device *device, unsigned int cmd, + unsigned long arg) { - { BIODASDDISABLE, dasd_ioctl_disable }, - { BIODASDENABLE, dasd_ioctl_enable }, - { BIODASDQUIESCE, dasd_ioctl_quiesce }, - { BIODASDRESUME, dasd_ioctl_resume }, - { BIODASDFMT, dasd_ioctl_format }, - { BIODASDINFO, dasd_ioctl_information }, - { BIODASDINFO2, dasd_ioctl_information }, - { BIODASDPRRD, dasd_ioctl_read_profile }, - { BIODASDPRRST, dasd_ioctl_reset_profile }, - { BLKROSET, dasd_ioctl_set_ro }, - { DASDAPIVER, dasd_ioctl_api_version }, - { -1, NULL } -}; + struct cmbdata __user *argp = (void __user *) arg; + size_t size = _IOC_SIZE(cmd); + struct cmbdata data; + int ret; + + ret = cmf_readall(device->cdev, &data); + if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp)))) + return -EFAULT; + return ret; +} int -dasd_ioctl_init(void) +dasd_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) { - int i; + struct block_device *bdev = inode->i_bdev; + struct dasd_device *device = bdev->bd_disk->private_data; + void __user *argp = (void __user *)arg; - for (i = 0; dasd_ioctls[i].no != -1; i++) - dasd_ioctl_no_register(NULL, dasd_ioctls[i].no, - dasd_ioctls[i].fn); - return 0; + if (!device) + return -ENODEV; + + if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) { + PRINT_DEBUG("empty data ptr"); + return -EINVAL; + } + switch (cmd) { + case BIODASDDISABLE: + return dasd_ioctl_disable(bdev); + case BIODASDENABLE: + return dasd_ioctl_enable(bdev); + case BIODASDQUIESCE: + return dasd_ioctl_quiesce(device); + case BIODASDRESUME: + return dasd_ioctl_resume(device); + case BIODASDFMT: + return dasd_ioctl_format(bdev, argp); + case BIODASDINFO: + return dasd_ioctl_information(device, cmd, argp); + case BIODASDINFO2: + return dasd_ioctl_information(device, cmd, argp); + case BIODASDPRRD: + return dasd_ioctl_read_profile(device, argp); + case BIODASDPRRST: + return dasd_ioctl_reset_profile(device); + case BLKROSET: + return dasd_ioctl_set_ro(bdev, argp); + case DASDAPIVER: + return dasd_ioctl_api_version(argp); + case BIODASDCMFENABLE: + return enable_cmf(device->cdev); + case BIODASDCMFDISABLE: + return disable_cmf(device->cdev); + case BIODASDREADALLCMB: + return dasd_ioctl_readall_cmb(device, cmd, arg); + default: + /* if the discipline has an ioctl method try it. */ + if (device->discipline->ioctl) { + int rval = device->discipline->ioctl(device, cmd, argp); + if (rval != -ENOIOCTLCMD) + return rval; + } + + return -EINVAL; + } } -void -dasd_ioctl_exit(void) +long +dasd_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - int i; + int rval; - for (i = 0; dasd_ioctls[i].no != -1; i++) - dasd_ioctl_no_unregister(NULL, dasd_ioctls[i].no, - dasd_ioctls[i].fn); + lock_kernel(); + rval = dasd_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + return (rval == -EINVAL) ? -ENOIOCTLCMD : rval; } - -EXPORT_SYMBOL(dasd_ioctl_no_register); -EXPORT_SYMBOL(dasd_ioctl_no_unregister); diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 44133250da2..be9b05347b4 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -388,12 +388,11 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char /* * get a struct dcssblk_dev_info */ - dev_info = kmalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL); + dev_info = kzalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL); if (dev_info == NULL) { rc = -ENOMEM; goto out; } - memset(dev_info, 0, sizeof(struct dcssblk_dev_info)); strcpy(dev_info->segment_name, local_buf); strlcpy(dev_info->dev.bus_id, local_buf, BUS_ID_SIZE); diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 6377a96735d..0c0162ff6c0 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -26,4 +26,5 @@ tape-$(CONFIG_PROC_FS) += tape_proc.o tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y) obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o +obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o obj-$(CONFIG_MONREADER) += monreader.o diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 5f6fa4c6784..a6415377bc7 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -368,10 +368,9 @@ fs3270_alloc_view(void) { struct fs3270 *fp; - fp = (struct fs3270 *) kmalloc(sizeof(struct fs3270),GFP_KERNEL); + fp = kzalloc(sizeof(struct fs3270),GFP_KERNEL); if (!fp) return ERR_PTR(-ENOMEM); - memset(fp, 0, sizeof(struct fs3270)); fp->init = raw3270_request_alloc(0); if (IS_ERR(fp->init)) { kfree(fp); diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index a317a123dab..6badd840340 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -50,14 +50,12 @@ kbd_alloc(void) { struct kbd_data *kbd; int i, len; - kbd = kmalloc(sizeof(struct kbd_data), GFP_KERNEL); + kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL); if (!kbd) goto out; - memset(kbd, 0, sizeof(struct kbd_data)); - kbd->key_maps = kmalloc(sizeof(key_maps), GFP_KERNEL); + kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL); if (!key_maps) goto out_kbd; - memset(kbd->key_maps, 0, sizeof(key_maps)); for (i = 0; i < ARRAY_SIZE(key_maps); i++) { if (key_maps[i]) { kbd->key_maps[i] = @@ -68,10 +66,9 @@ kbd_alloc(void) { sizeof(u_short)*NR_KEYS); } } - kbd->func_table = kmalloc(sizeof(func_table), GFP_KERNEL); + kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL); if (!kbd->func_table) goto out_maps; - memset(kbd->func_table, 0, sizeof(func_table)); for (i = 0; i < ARRAY_SIZE(func_table); i++) { if (func_table[i]) { len = strlen(func_table[i]) + 1; @@ -82,10 +79,9 @@ kbd_alloc(void) { } } kbd->fn_handler = - kmalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); + kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL); if (!kbd->fn_handler) goto out_func; - memset(kbd->fn_handler, 0, sizeof(fn_handler_fn *) * NR_FN_HANDLER); kbd->accent_table = kmalloc(sizeof(struct kbdiacr)*MAX_DIACR, GFP_KERNEL); if (!kbd->accent_table) diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 5fd3ad86738..fb7bc9e5eeb 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -257,14 +257,13 @@ mon_alloc_mem(void) int i,j; struct mon_private *monpriv; - monpriv = kmalloc(sizeof(struct mon_private), GFP_KERNEL); + monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); if (!monpriv) { P_ERROR("no memory for monpriv\n"); return NULL; } - memset(monpriv, 0, sizeof(struct mon_private)); for (i = 0; i < MON_MSGLIM; i++) { - monpriv->msg_array[i] = kmalloc(sizeof(struct mon_msg), + monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg), GFP_KERNEL); if (!monpriv->msg_array[i]) { P_ERROR("open, no memory for msg_array\n"); @@ -272,7 +271,6 @@ mon_alloc_mem(void) kfree(monpriv->msg_array[j]); return NULL; } - memset(monpriv->msg_array[i], 0, sizeof(struct mon_msg)); } return monpriv; } diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 1026f2bc318..bd06607a5dc 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -115,10 +115,9 @@ raw3270_request_alloc(size_t size) struct raw3270_request *rq; /* Allocate request structure */ - rq = kmalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA); + rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA); if (!rq) return ERR_PTR(-ENOMEM); - memset(rq, 0, sizeof(struct raw3270_request)); /* alloc output buffer. */ if (size > 0) { diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 01d865d9379..cd51ace8b61 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h @@ -250,6 +250,7 @@ extern void tape_free_request(struct tape_request *); extern int tape_do_io(struct tape_device *, struct tape_request *); extern int tape_do_io_async(struct tape_device *, struct tape_request *); extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *); +extern int tape_cancel_io(struct tape_device *, struct tape_request *); void tape_hotplug_event(struct tape_device *, int major, int action); static inline int diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 682039cac15..d4f2da73807 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c @@ -2,8 +2,7 @@ * drivers/s390/char/tape_34xx.c * tape device discipline for 3480/3490 tapes. * - * S390 and zSeries version - * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright (C) IBM Corp. 2001,2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -28,11 +27,6 @@ debug_info_t *TAPE_DBF_AREA = NULL; EXPORT_SYMBOL(TAPE_DBF_AREA); -enum tape_34xx_type { - tape_3480, - tape_3490, -}; - #define TAPE34XX_FMT_3480 0 #define TAPE34XX_FMT_3480_2_XF 1 #define TAPE34XX_FMT_3480_XF 2 diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c new file mode 100644 index 00000000000..c3915f60a3a --- /dev/null +++ b/drivers/s390/char/tape_3590.c @@ -0,0 +1,1301 @@ +/* + * drivers/s390/char/tape_3590.c + * tape device discipline for 3590 tapes. + * + * Copyright (C) IBM Corp. 2001,2006 + * Author(s): Stefan Bader <shbader@de.ibm.com> + * Michael Holzheu <holzheu@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/bio.h> + +#define TAPE_DBF_AREA tape_3590_dbf + +#include "tape.h" +#include "tape_std.h" +#include "tape_3590.h" + +/* + * Pointer to debug area. + */ +debug_info_t *TAPE_DBF_AREA = NULL; +EXPORT_SYMBOL(TAPE_DBF_AREA); + +/******************************************************************* + * Error Recovery fuctions: + * - Read Opposite: implemented + * - Read Device (buffered) log: BRA + * - Read Library log: BRA + * - Swap Devices: BRA + * - Long Busy: BRA + * - Special Intercept: BRA + * - Read Alternate: implemented + *******************************************************************/ + +#define PRINTK_HEADER "TAPE_3590: " + +static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { + [0x00] = "", + [0x10] = "Lost Sense", + [0x11] = "Assigned Elsewhere", + [0x12] = "Allegiance Reset", + [0x13] = "Shared Access Violation", + [0x20] = "Command Reject", + [0x21] = "Configuration Error", + [0x22] = "Protection Exception", + [0x23] = "Write Protect", + [0x24] = "Write Length", + [0x25] = "Read-Only Format", + [0x31] = "Beginning of Partition", + [0x33] = "End of Partition", + [0x34] = "End of Data", + [0x35] = "Block not found", + [0x40] = "Device Intervention", + [0x41] = "Loader Intervention", + [0x42] = "Library Intervention", + [0x50] = "Write Error", + [0x51] = "Erase Error", + [0x52] = "Formatting Error", + [0x53] = "Read Error", + [0x54] = "Unsupported Format", + [0x55] = "No Formatting", + [0x56] = "Positioning lost", + [0x57] = "Read Length", + [0x60] = "Unsupported Medium", + [0x61] = "Medium Length Error", + [0x62] = "Medium removed", + [0x64] = "Load Check", + [0x65] = "Unload Check", + [0x70] = "Equipment Check", + [0x71] = "Bus out Check", + [0x72] = "Protocol Error", + [0x73] = "Interface Error", + [0x74] = "Overrun", + [0x75] = "Halt Signal", + [0x90] = "Device fenced", + [0x91] = "Device Path fenced", + [0xa0] = "Volume misplaced", + [0xa1] = "Volume inaccessible", + [0xa2] = "Volume in input", + [0xa3] = "Volume ejected", + [0xa4] = "All categories reserved", + [0xa5] = "Duplicate Volume", + [0xa6] = "Library Manager Offline", + [0xa7] = "Library Output Station full", + [0xa8] = "Vision System non-operational", + [0xa9] = "Library Manager Equipment Check", + [0xaa] = "Library Equipment Check", + [0xab] = "All Library Cells full", + [0xac] = "No Cleaner Volumes in Library", + [0xad] = "I/O Station door open", + [0xae] = "Subsystem environmental alert", +}; + +/* + * 3590 IOCTL Overload + */ +static int +tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case TAPE390_DISPLAY: { + struct display_struct disp; + + if (copy_from_user(&disp, (char __user *) arg, sizeof(disp))) + return -EFAULT; + + return tape_std_display(device, &disp); + } + default: + return -EINVAL; /* no additional ioctls */ + } +} + +/* + * SENSE Medium: Get Sense data about medium state + */ +static int +tape_3590_sense_medium(struct tape_device *device) +{ + struct tape_request *request; + + request = tape_alloc_request(1, 128); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_MSEN; + tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); + return tape_do_io_free(device, request); +} + +/* + * MTTELL: Tell block. Return the number of block relative to current file. + */ +static int +tape_3590_mttell(struct tape_device *device, int mt_count) +{ + __u64 block_id; + int rc; + + rc = tape_std_read_block_id(device, &block_id); + if (rc) + return rc; + return block_id >> 32; +} + +/* + * MTSEEK: seek to the specified block. + */ +static int +tape_3590_mtseek(struct tape_device *device, int count) +{ + struct tape_request *request; + + DBF_EVENT(6, "xsee id: %x\n", count); + request = tape_alloc_request(3, 4); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_LBL; + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + *(__u32 *) request->cpdata = count; + tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + return tape_do_io_free(device, request); +} + +/* + * Read Opposite Error Recovery Function: + * Used, when Read Forward does not work + */ +static void +tape_3590_read_opposite(struct tape_device *device, + struct tape_request *request) +{ + struct tape_3590_disc_data *data; + + /* + * We have allocated 4 ccws in tape_std_read, so we can now + * transform the request to a read backward, followed by a + * forward space block. + */ + request->op = TO_RBA; + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); + data = device->discdata; + tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op, + device->char_data.idal_buf); + tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); + tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); + DBF_EVENT(6, "xrop ccwg\n"); +} + +/* + * Read Attention Msg + * This should be done after an interrupt with attention bit (0x80) + * in device state. + * + * After a "read attention message" request there are two possible + * results: + * + * 1. A unit check is presented, when attention sense is present (e.g. when + * a medium has been unloaded). The attention sense comes then + * together with the unit check. The recovery action is either "retry" + * (in case there is an attention message pending) or "permanent error". + * + * 2. The attention msg is written to the "read subsystem data" buffer. + * In this case we probably should print it to the console. + */ +static int +tape_3590_read_attmsg(struct tape_device *device) +{ + struct tape_request *request; + char *buf; + + request = tape_alloc_request(3, 4096); + if (IS_ERR(request)) + return PTR_ERR(request); + request->op = TO_READ_ATTMSG; + buf = request->cpdata; + buf[0] = PREP_RD_SS_DATA; + buf[6] = RD_ATTMSG; /* read att msg */ + tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); + tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); + tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + return tape_do_io_free(device, request); +} + +/* + * These functions are used to schedule follow-up actions from within an + * interrupt context (like unsolicited interrupts). + */ +static void +tape_3590_work_handler(void *data) +{ + struct { + struct tape_device *device; + enum tape_op op; + struct work_struct work; + } *p = data; + + switch (p->op) { + case TO_MSEN: + tape_3590_sense_medium(p->device); + break; + case TO_READ_ATTMSG: + tape_3590_read_attmsg(p->device); + break; + default: + DBF_EVENT(3, "T3590: work handler undefined for " + "operation 0x%02x\n", p->op); + } + tape_put_device(p->device); + kfree(p); +} + +static int +tape_3590_schedule_work(struct tape_device *device, enum tape_op op) +{ + struct { + struct tape_device *device; + enum tape_op op; + struct work_struct work; + } *p; + + if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) + return -ENOMEM; + + INIT_WORK(&p->work, tape_3590_work_handler, p); + + p->device = tape_get_device_reference(device); + p->op = op; + + schedule_work(&p->work); + return 0; +} + +#ifdef CONFIG_S390_TAPE_BLOCK +/* + * Tape Block READ + */ +static struct tape_request * +tape_3590_bread(struct tape_device *device, struct request *req) +{ + struct tape_request *request; + struct ccw1 *ccw; + int count = 0, start_block, i; + unsigned off; + char *dst; + struct bio_vec *bv; + struct bio *bio; + + DBF_EVENT(6, "xBREDid:"); + start_block = req->sector >> TAPEBLOCK_HSEC_S2B; + DBF_EVENT(6, "start_block = %i\n", start_block); + + rq_for_each_bio(bio, req) { + bio_for_each_segment(bv, bio, i) { + count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); + } + } + request = tape_alloc_request(2 + count + 1, 4); + if (IS_ERR(request)) + return request; + request->op = TO_BLOCK; + *(__u32 *) request->cpdata = start_block; + ccw = request->cpaddr; + ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); + + /* + * We always setup a nop after the mode set ccw. This slot is + * used in tape_std_check_locate to insert a locate ccw if the + * current tape position doesn't match the start block to be read. + */ + ccw = tape_ccw_cc(ccw, NOP, 0, NULL); + + rq_for_each_bio(bio, req) { + bio_for_each_segment(bv, bio, i) { + dst = kmap(bv->bv_page) + bv->bv_offset; + for (off = 0; off < bv->bv_len; + off += TAPEBLOCK_HSEC_SIZE) { + ccw->flags = CCW_FLAG_CC; + ccw->cmd_code = READ_FORWARD; + ccw->count = TAPEBLOCK_HSEC_SIZE; + set_normalized_cda(ccw, (void *) __pa(dst)); + ccw++; + dst += TAPEBLOCK_HSEC_SIZE; + } + if (off > bv->bv_len) + BUG(); + } + } + ccw = tape_ccw_end(ccw, NOP, 0, NULL); + DBF_EVENT(6, "xBREDccwg\n"); + return request; +} + +static void +tape_3590_free_bread(struct tape_request *request) +{ + struct ccw1 *ccw; + + /* Last ccw is a nop and doesn't need clear_normalized_cda */ + for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++) + if (ccw->cmd_code == READ_FORWARD) + clear_normalized_cda(ccw); + tape_free_request(request); +} + +/* + * check_locate is called just before the tape request is passed to + * the common io layer for execution. It has to check the current + * tape position and insert a locate ccw if it doesn't match the + * start block for the request. + */ +static void +tape_3590_check_locate(struct tape_device *device, struct tape_request *request) +{ + __u32 *start_block; + + start_block = (__u32 *) request->cpdata; + if (*start_block != device->blk_data.block_position) { + /* Add the start offset of the file to get the real block. */ + *start_block += device->bof; + tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); + } +} +#endif + +/* + * The done handler is called at device/channel end and wakes up the sleeping + * process + */ +static int +tape_3590_done(struct tape_device *device, struct tape_request *request) +{ + struct tape_3590_med_sense *sense; + + DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); + + switch (request->op) { + case TO_BSB: + case TO_BSF: + case TO_DSE: + case TO_FSB: + case TO_FSF: + case TO_LBL: + case TO_RFO: + case TO_RBA: + case TO_REW: + case TO_WRI: + case TO_WTM: + case TO_BLOCK: + case TO_LOAD: + tape_med_state_set(device, MS_LOADED); + break; + case TO_RUN: + tape_med_state_set(device, MS_UNLOADED); + break; + case TO_MSEN: + sense = (struct tape_3590_med_sense *) request->cpdata; + if (sense->masst == MSENSE_UNASSOCIATED) + tape_med_state_set(device, MS_UNLOADED); + if (sense->masst == MSENSE_ASSOCIATED_MOUNT) + tape_med_state_set(device, MS_LOADED); + break; + case TO_RBI: /* RBI seems to succeed even without medium loaded. */ + case TO_NOP: /* Same to NOP. */ + case TO_READ_CONFIG: + case TO_READ_ATTMSG: + case TO_DIS: + case TO_ASSIGN: + case TO_UNASSIGN: + break; + case TO_SIZE: + break; + } + return TAPE_IO_SUCCESS; +} + +/* + * This fuction is called, when error recovery was successfull + */ +static inline int +tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) +{ + DBF_EVENT(3, "Error Recovery successfull for %s\n", + tape_op_verbose[request->op]); + return tape_3590_done(device, request); +} + +/* + * This fuction is called, when error recovery was not successfull + */ +static inline int +tape_3590_erp_failed(struct tape_device *device, struct tape_request *request, + struct irb *irb, int rc) +{ + DBF_EVENT(3, "Error Recovery failed for %s\n", + tape_op_verbose[request->op]); + tape_dump_sense_dbf(device, request, irb); + return rc; +} + +/* + * Error Recovery do retry + */ +static inline int +tape_3590_erp_retry(struct tape_device *device, struct tape_request *request, + struct irb *irb) +{ + DBF_EVENT(2, "Retry: %s\n", tape_op_verbose[request->op]); + tape_dump_sense_dbf(device, request, irb); + return TAPE_IO_RETRY; +} + +/* + * Handle unsolicited interrupts + */ +static int +tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) +{ + if (irb->scsw.dstat == DEV_STAT_CHN_END) + /* Probably result of halt ssch */ + return TAPE_IO_PENDING; + else if (irb->scsw.dstat == 0x85) + /* Device Ready -> check medium state */ + tape_3590_schedule_work(device, TO_MSEN); + else if (irb->scsw.dstat & DEV_STAT_ATTENTION) + tape_3590_schedule_work(device, TO_READ_ATTMSG); + else { + DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); + PRINT_WARN("Unsolicited IRQ (Device End) caught.\n"); + tape_dump_sense(device, NULL, irb); + } + return TAPE_IO_SUCCESS; +} + +/* + * Basic Recovery routine + */ +static int +tape_3590_erp_basic(struct tape_device *device, struct tape_request *request, + struct irb *irb, int rc) +{ + struct tape_3590_sense *sense; + + sense = (struct tape_3590_sense *) irb->ecw; + + switch (sense->bra) { + case SENSE_BRA_PER: + return tape_3590_erp_failed(device, request, irb, rc); + case SENSE_BRA_CONT: + return tape_3590_erp_succeded(device, request); + case SENSE_BRA_RE: + return tape_3590_erp_retry(device, request, irb); + case SENSE_BRA_DRE: + return tape_3590_erp_failed(device, request, irb, rc); + default: + PRINT_ERR("Unknown BRA %x - This should not happen!\n", + sense->bra); + BUG(); + return TAPE_IO_STOP; + } +} + +/* + * RDL: Read Device (buffered) log + */ +static int +tape_3590_erp_read_buf_log(struct tape_device *device, + struct tape_request *request, struct irb *irb) +{ + /* + * We just do the basic error recovery at the moment (retry). + * Perhaps in the future, we read the log and dump it somewhere... + */ + return tape_3590_erp_basic(device, request, irb, -EIO); +} + +/* + * SWAP: Swap Devices + */ +static int +tape_3590_erp_swap(struct tape_device *device, struct tape_request *request, + struct irb *irb) +{ + /* + * This error recovery should swap the tapes + * if the original has a problem. The operation + * should proceed with the new tape... this + * should probably be done in user space! + */ + PRINT_WARN("(%s): Swap Tape Device!\n", device->cdev->dev.bus_id); + return tape_3590_erp_basic(device, request, irb, -EIO); +} + +/* + * LBY: Long Busy + */ +static int +tape_3590_erp_long_busy(struct tape_device *device, + struct tape_request *request, struct irb *irb) +{ + /* FIXME: how about WAITING for a minute ? */ + PRINT_WARN("(%s): Device is busy! Please wait a minute!\n", + device->cdev->dev.bus_id); + return tape_3590_erp_basic(device, request, irb, -EBUSY); +} + +/* + * SPI: Special Intercept + */ +static int +tape_3590_erp_special_interrupt(struct tape_device *device, + struct tape_request *request, struct irb *irb) +{ + return tape_3590_erp_basic(device, request, irb, -EIO); +} + +/* + * RDA: Read Alternate + */ +static int +tape_3590_erp_read_alternate(struct tape_device *device, + struct tape_request *request, struct irb *irb) +{ + struct tape_3590_disc_data *data; + + /* + * The issued Read Backward or Read Previous command is not + * supported by the device + * The recovery action should be to issue another command: + * Read Revious: if Read Backward is not supported + * Read Backward: if Read Previous is not supported + */ + data = device->discdata; + if (data->read_back_op == READ_PREVIOUS) { + DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n", + device->cdev_id); + data->read_back_op = READ_BACKWARD; + } else { + DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n", + device->cdev_id); + data->read_back_op = READ_PREVIOUS; + } + tape_3590_read_opposite(device, request); + return tape_3590_erp_retry(device, request, irb); +} + +/* + * Error Recovery read opposite + */ +static int +tape_3590_erp_read_opposite(struct tape_device *device, + struct tape_request *request, struct irb *irb) +{ + switch (request->op) { + case TO_RFO: + /* + * We did read forward, but the data could not be read. + * We will read backward and then skip forward again. + */ + tape_3590_read_opposite(device, request); + return tape_3590_erp_retry(device, request, irb); + case TO_RBA: + /* We tried to read forward and backward, but hat no success */ + return tape_3590_erp_failed(device, request, irb, -EIO); + break; + default: + PRINT_WARN("read_opposite_recovery_called_with_op: %s\n", + tape_op_verbose[request->op]); + return tape_3590_erp_failed(device, request, irb, -EIO); + } +} + +/* + * Print an MIM (Media Information Message) (message code f0) + */ +static void +tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb) +{ + struct tape_3590_sense *sense; + + sense = (struct tape_3590_sense *) irb->ecw; + /* Exception Message */ + switch (sense->fmt.f70.emc) { + case 0x02: + PRINT_WARN("(%s): Data degraded\n", device->cdev->dev.bus_id); + break; + case 0x03: + PRINT_WARN("(%s): Data degraded in partion %i\n", + device->cdev->dev.bus_id, sense->fmt.f70.mp); + break; + case 0x04: + PRINT_WARN("(%s): Medium degraded\n", device->cdev->dev.bus_id); + break; + case 0x05: + PRINT_WARN("(%s): Medium degraded in partition %i\n", + device->cdev->dev.bus_id, sense->fmt.f70.mp); + break; + case 0x06: + PRINT_WARN("(%s): Block 0 Error\n", device->cdev->dev.bus_id); + break; + case 0x07: + PRINT_WARN("(%s): Medium Exception 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f70.md); + break; + default: + PRINT_WARN("(%s): MIM ExMsg: 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f70.emc); + break; + } + /* Service Message */ + switch (sense->fmt.f70.smc) { + case 0x02: + PRINT_WARN("(%s): Reference Media maintenance procedure %i\n", + device->cdev->dev.bus_id, sense->fmt.f70.md); + break; + default: + PRINT_WARN("(%s): MIM ServiceMsg: 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f70.smc); + break; + } +} + +/* + * Print an I/O Subsystem Service Information Message (message code f1) + */ +static void +tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb) +{ + struct tape_3590_sense *sense; + + sense = (struct tape_3590_sense *) irb->ecw; + /* Exception Message */ + switch (sense->fmt.f71.emc) { + case 0x01: + PRINT_WARN("(%s): Effect of failure is unknown\n", + device->cdev->dev.bus_id); + break; + case 0x02: + PRINT_WARN("(%s): CU Exception - no performance impact\n", + device->cdev->dev.bus_id); + break; + case 0x03: + PRINT_WARN("(%s): CU Exception on channel interface 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.md[0]); + break; + case 0x04: + PRINT_WARN("(%s): CU Exception on device path 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.md[0]); + break; + case 0x05: + PRINT_WARN("(%s): CU Exception on library path 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.md[0]); + break; + case 0x06: + PRINT_WARN("(%s): CU Exception on node 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.md[0]); + break; + case 0x07: + PRINT_WARN("(%s): CU Exception on partition 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.md[0]); + break; + default: + PRINT_WARN("(%s): SIM ExMsg: 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.emc); + } + /* Service Message */ + switch (sense->fmt.f71.smc) { + case 0x01: + PRINT_WARN("(%s): Repair impact is unknown\n", + device->cdev->dev.bus_id); + break; + case 0x02: + PRINT_WARN("(%s): Repair will not impact cu performance\n", + device->cdev->dev.bus_id); + break; + case 0x03: + if (sense->fmt.f71.mdf == 0) + PRINT_WARN("(%s): Repair will disable node " + "0x%x on CU\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1]); + else + PRINT_WARN("(%s): Repair will disable nodes " + "(0x%x-0x%x) on CU\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1], sense->fmt.f71.md[2]); + break; + case 0x04: + if (sense->fmt.f71.mdf == 0) + PRINT_WARN("(%s): Repair will disable cannel path " + "0x%x on CU\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1]); + else + PRINT_WARN("(%s): Repair will disable cannel paths " + "(0x%x-0x%x) on CU\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1], sense->fmt.f71.md[2]); + break; + case 0x05: + if (sense->fmt.f71.mdf == 0) + PRINT_WARN("(%s): Repair will disable device path " + "0x%x on CU\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1]); + else + PRINT_WARN("(%s): Repair will disable device paths " + "(0x%x-0x%x) on CU\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1], sense->fmt.f71.md[2]); + break; + case 0x06: + if (sense->fmt.f71.mdf == 0) + PRINT_WARN("(%s): Repair will disable library path " + "0x%x on CU\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1]); + else + PRINT_WARN("(%s): Repair will disable library paths " + "(0x%x-0x%x) on CU\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1], sense->fmt.f71.md[2]); + break; + case 0x07: + PRINT_WARN("(%s): Repair will disable access to CU\n", + device->cdev->dev.bus_id); + break; + default: + PRINT_WARN("(%s): SIM ServiceMsg: 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.smc); + } +} + +/* + * Print an Device Subsystem Service Information Message (message code f2) + */ +static void +tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb) +{ + struct tape_3590_sense *sense; + + sense = (struct tape_3590_sense *) irb->ecw; + /* Exception Message */ + switch (sense->fmt.f71.emc) { + case 0x01: + PRINT_WARN("(%s): Effect of failure is unknown\n", + device->cdev->dev.bus_id); + break; + case 0x02: + PRINT_WARN("(%s): DV Exception - no performance impact\n", + device->cdev->dev.bus_id); + break; + case 0x03: + PRINT_WARN("(%s): DV Exception on channel interface 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.md[0]); + break; + case 0x04: + PRINT_WARN("(%s): DV Exception on loader 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.md[0]); + break; + case 0x05: + PRINT_WARN("(%s): DV Exception on message display 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.md[0]); + break; + case 0x06: + PRINT_WARN("(%s): DV Exception in tape path\n", + device->cdev->dev.bus_id); + break; + case 0x07: + PRINT_WARN("(%s): DV Exception in drive\n", + device->cdev->dev.bus_id); + break; + default: + PRINT_WARN("(%s): DSIM ExMsg: 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.emc); + } + /* Service Message */ + switch (sense->fmt.f71.smc) { + case 0x01: + PRINT_WARN("(%s): Repair impact is unknown\n", + device->cdev->dev.bus_id); + break; + case 0x02: + PRINT_WARN("(%s): Repair will not impact device performance\n", + device->cdev->dev.bus_id); + break; + case 0x03: + if (sense->fmt.f71.mdf == 0) + PRINT_WARN("(%s): Repair will disable channel path " + "0x%x on DV\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1]); + else + PRINT_WARN("(%s): Repair will disable channel path " + "(0x%x-0x%x) on DV\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1], sense->fmt.f71.md[2]); + break; + case 0x04: + if (sense->fmt.f71.mdf == 0) + PRINT_WARN("(%s): Repair will disable interface 0x%x " + "on DV\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1]); + else + PRINT_WARN("(%s): Repair will disable interfaces " + "(0x%x-0x%x) on DV\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1], sense->fmt.f71.md[2]); + break; + case 0x05: + if (sense->fmt.f71.mdf == 0) + PRINT_WARN("(%s): Repair will disable loader 0x%x " + "on DV\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1]); + else + PRINT_WARN("(%s): Repair will disable loader " + "(0x%x-0x%x) on DV\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1], sense->fmt.f71.md[2]); + break; + case 0x07: + PRINT_WARN("(%s): Repair will disable access to DV\n", + device->cdev->dev.bus_id); + break; + case 0x08: + if (sense->fmt.f71.mdf == 0) + PRINT_WARN("(%s): Repair will disable message " + "display 0x%x on DV\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1]); + else + PRINT_WARN("(%s): Repair will disable message " + "displays (0x%x-0x%x) on DV\n", + device->cdev->dev.bus_id, + sense->fmt.f71.md[1], sense->fmt.f71.md[2]); + break; + case 0x09: + PRINT_WARN("(%s): Clean DV\n", device->cdev->dev.bus_id); + break; + default: + PRINT_WARN("(%s): DSIM ServiceMsg: 0x%02x\n", + device->cdev->dev.bus_id, sense->fmt.f71.smc); + } +} + +/* + * Print standard ERA Message + */ +static void +tape_3590_print_era_msg(struct tape_device *device, struct irb *irb) +{ + struct tape_3590_sense *sense; + + sense = (struct tape_3590_sense *) irb->ecw; + if (sense->mc == 0) + return; + if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) { + if (tape_3590_msg[sense->mc] != NULL) + PRINT_WARN("(%s): %s\n", device->cdev->dev.bus_id, + tape_3590_msg[sense->mc]); + else { + PRINT_WARN("(%s): Message Code 0x%x\n", + device->cdev->dev.bus_id, sense->mc); + } + return; + } + if (sense->mc == 0xf0) { + /* Standard Media Information Message */ + PRINT_WARN("(%s): MIM SEV=%i, MC=%02x, ES=%x/%x, " + "RC=%02x-%04x-%02x\n", device->cdev->dev.bus_id, + sense->fmt.f70.sev, sense->mc, + sense->fmt.f70.emc, sense->fmt.f70.smc, + sense->fmt.f70.refcode, sense->fmt.f70.mid, + sense->fmt.f70.fid); + tape_3590_print_mim_msg_f0(device, irb); + return; + } + if (sense->mc == 0xf1) { + /* Standard I/O Subsystem Service Information Message */ + PRINT_WARN("(%s): IOSIM SEV=%i, DEVTYPE=3590/%02x, " + "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", + device->cdev->dev.bus_id, sense->fmt.f71.sev, + device->cdev->id.dev_model, + sense->mc, sense->fmt.f71.emc, + sense->fmt.f71.smc, sense->fmt.f71.refcode1, + sense->fmt.f71.refcode2, sense->fmt.f71.refcode3); + tape_3590_print_io_sim_msg_f1(device, irb); + return; + } + if (sense->mc == 0xf2) { + /* Standard Device Service Information Message */ + PRINT_WARN("(%s): DEVSIM SEV=%i, DEVTYPE=3590/%02x, " + "MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", + device->cdev->dev.bus_id, sense->fmt.f71.sev, + device->cdev->id.dev_model, + sense->mc, sense->fmt.f71.emc, + sense->fmt.f71.smc, sense->fmt.f71.refcode1, + sense->fmt.f71.refcode2, sense->fmt.f71.refcode3); + tape_3590_print_dev_sim_msg_f2(device, irb); + return; + } + if (sense->mc == 0xf3) { + /* Standard Library Service Information Message */ + return; + } + PRINT_WARN("(%s): Device Message(%x)\n", + device->cdev->dev.bus_id, sense->mc); +} + +/* + * 3590 error Recovery routine: + * If possible, it tries to recover from the error. If this is not possible, + * inform the user about the problem. + */ +static int +tape_3590_unit_check(struct tape_device *device, struct tape_request *request, + struct irb *irb) +{ + struct tape_3590_sense *sense; + int rc; + +#ifdef CONFIG_S390_TAPE_BLOCK + if (request->op == TO_BLOCK) { + /* + * Recovery for block device requests. Set the block_position + * to something invalid and retry. + */ + device->blk_data.block_position = -1; + if (request->retries-- <= 0) + return tape_3590_erp_failed(device, request, irb, -EIO); + else + return tape_3590_erp_retry(device, request, irb); + } +#endif + + sense = (struct tape_3590_sense *) irb->ecw; + + /* + * First check all RC-QRCs where we want to do something special + * - "break": basic error recovery is done + * - "goto out:": just print error message if available + */ + rc = -EIO; + switch (sense->rc_rqc) { + + case 0x1110: + tape_3590_print_era_msg(device, irb); + return tape_3590_erp_read_buf_log(device, request, irb); + + case 0x2011: + tape_3590_print_era_msg(device, irb); + return tape_3590_erp_read_alternate(device, request, irb); + + case 0x2230: + case 0x2231: + tape_3590_print_era_msg(device, irb); + return tape_3590_erp_special_interrupt(device, request, irb); + + case 0x3010: + DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", + device->cdev_id); + return tape_3590_erp_basic(device, request, irb, -ENOSPC); + case 0x3012: + DBF_EVENT(2, "(%08x): Forward at End of Partition\n", + device->cdev_id); + return tape_3590_erp_basic(device, request, irb, -ENOSPC); + case 0x3020: + DBF_EVENT(2, "(%08x): End of Data Mark\n", device->cdev_id); + return tape_3590_erp_basic(device, request, irb, -ENOSPC); + + case 0x3122: + DBF_EVENT(2, "(%08x): Rewind Unload initiated\n", + device->cdev_id); + return tape_3590_erp_basic(device, request, irb, -EIO); + case 0x3123: + DBF_EVENT(2, "(%08x): Rewind Unload complete\n", + device->cdev_id); + tape_med_state_set(device, MS_UNLOADED); + return tape_3590_erp_basic(device, request, irb, 0); + + case 0x4010: + /* + * print additional msg since default msg + * "device intervention" is not very meaningfull + */ + PRINT_WARN("(%s): Tape operation when medium not loaded\n", + device->cdev->dev.bus_id); + tape_med_state_set(device, MS_UNLOADED); + return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); + case 0x4012: /* Device Long Busy */ + tape_3590_print_era_msg(device, irb); + return tape_3590_erp_long_busy(device, request, irb); + + case 0x5010: + if (sense->rac == 0xd0) { + /* Swap */ + tape_3590_print_era_msg(device, irb); + return tape_3590_erp_swap(device, request, irb); + } + if (sense->rac == 0x26) { + /* Read Opposite */ + tape_3590_print_era_msg(device, irb); + return tape_3590_erp_read_opposite(device, request, + irb); + } + return tape_3590_erp_basic(device, request, irb, -EIO); + case 0x5020: + case 0x5021: + case 0x5022: + case 0x5040: + case 0x5041: + case 0x5042: + tape_3590_print_era_msg(device, irb); + return tape_3590_erp_swap(device, request, irb); + + case 0x5110: + case 0x5111: + return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); + + case 0x5120: + case 0x1120: + tape_med_state_set(device, MS_UNLOADED); + return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); + + case 0x6020: + PRINT_WARN("(%s): Cartridge of wrong type ?\n", + device->cdev->dev.bus_id); + return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); + + case 0x8011: + PRINT_WARN("(%s): Another host has reserved the tape device\n", + device->cdev->dev.bus_id); + return tape_3590_erp_basic(device, request, irb, -EPERM); + case 0x8013: + PRINT_WARN("(%s): Another host has priviliged access to the " + "tape device\n", device->cdev->dev.bus_id); + PRINT_WARN("(%s): To solve the problem unload the current " + "cartridge!\n", device->cdev->dev.bus_id); + return tape_3590_erp_basic(device, request, irb, -EPERM); + default: + return tape_3590_erp_basic(device, request, irb, -EIO); + } +} + +/* + * 3590 interrupt handler: + */ +static int +tape_3590_irq(struct tape_device *device, struct tape_request *request, + struct irb *irb) +{ + if (request == NULL) + return tape_3590_unsolicited_irq(device, irb); + + if ((irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) && + (irb->scsw.dstat & DEV_STAT_DEV_END) && (request->op == TO_WRI)) { + /* Write at end of volume */ + DBF_EVENT(2, "End of volume\n"); + return tape_3590_erp_failed(device, request, irb, -ENOSPC); + } + + if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) + return tape_3590_unit_check(device, request, irb); + + if (irb->scsw.dstat & DEV_STAT_DEV_END) { + if (irb->scsw.dstat == DEV_STAT_UNIT_EXCEP) { + if (request->op == TO_FSB || request->op == TO_BSB) + request->rescnt++; + else + DBF_EVENT(5, "Unit Exception!\n"); + } + + return tape_3590_done(device, request); + } + + if (irb->scsw.dstat & DEV_STAT_CHN_END) { + DBF_EVENT(2, "cannel end\n"); + return TAPE_IO_PENDING; + } + + if (irb->scsw.dstat & DEV_STAT_ATTENTION) { + DBF_EVENT(2, "Unit Attention when busy..\n"); + return TAPE_IO_PENDING; + } + + DBF_EVENT(6, "xunknownirq\n"); + PRINT_ERR("Unexpected interrupt.\n"); + PRINT_ERR("Current op is: %s", tape_op_verbose[request->op]); + tape_dump_sense(device, request, irb); + return TAPE_IO_STOP; +} + +/* + * Setup device function + */ +static int +tape_3590_setup_device(struct tape_device *device) +{ + int rc; + struct tape_3590_disc_data *data; + + DBF_EVENT(6, "3590 device setup\n"); + data = kmalloc(sizeof(struct tape_3590_disc_data), + GFP_KERNEL | GFP_DMA); + if (data == NULL) + return -ENOMEM; + data->read_back_op = READ_PREVIOUS; + device->discdata = data; + + if ((rc = tape_std_assign(device)) == 0) { + /* Try to find out if medium is loaded */ + if ((rc = tape_3590_sense_medium(device)) != 0) + DBF_LH(3, "3590 medium sense returned %d\n", rc); + } + + return rc; +} + +/* + * Cleanup device function + */ +static void +tape_3590_cleanup_device(struct tape_device *device) +{ + tape_std_unassign(device); + + kfree(device->discdata); + device->discdata = NULL; +} + +/* + * List of 3590 magnetic tape commands. + */ +static tape_mtop_fn tape_3590_mtop[TAPE_NR_MTOPS] = { + [MTRESET] = tape_std_mtreset, + [MTFSF] = tape_std_mtfsf, + [MTBSF] = tape_std_mtbsf, + [MTFSR] = tape_std_mtfsr, + [MTBSR] = tape_std_mtbsr, + [MTWEOF] = tape_std_mtweof, + [MTREW] = tape_std_mtrew, + [MTOFFL] = tape_std_mtoffl, + [MTNOP] = tape_std_mtnop, + [MTRETEN] = tape_std_mtreten, + [MTBSFM] = tape_std_mtbsfm, + [MTFSFM] = tape_std_mtfsfm, + [MTEOM] = tape_std_mteom, + [MTERASE] = tape_std_mterase, + [MTRAS1] = NULL, + [MTRAS2] = NULL, + [MTRAS3] = NULL, + [MTSETBLK] = tape_std_mtsetblk, + [MTSETDENSITY] = NULL, + [MTSEEK] = tape_3590_mtseek, + [MTTELL] = tape_3590_mttell, + [MTSETDRVBUFFER] = NULL, + [MTFSS] = NULL, + [MTBSS] = NULL, + [MTWSM] = NULL, + [MTLOCK] = NULL, + [MTUNLOCK] = NULL, + [MTLOAD] = tape_std_mtload, + [MTUNLOAD] = tape_std_mtunload, + [MTCOMPRESSION] = tape_std_mtcompression, + [MTSETPART] = NULL, + [MTMKPART] = NULL +}; + +/* + * Tape discipline structure for 3590. + */ +static struct tape_discipline tape_discipline_3590 = { + .owner = THIS_MODULE, + .setup_device = tape_3590_setup_device, + .cleanup_device = tape_3590_cleanup_device, + .process_eov = tape_std_process_eov, + .irq = tape_3590_irq, + .read_block = tape_std_read_block, + .write_block = tape_std_write_block, +#ifdef CONFIG_S390_TAPE_BLOCK + .bread = tape_3590_bread, + .free_bread = tape_3590_free_bread, + .check_locate = tape_3590_check_locate, +#endif + .ioctl_fn = tape_3590_ioctl, + .mtop_array = tape_3590_mtop +}; + +static struct ccw_device_id tape_3590_ids[] = { + {CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590}, + { /* end of list */ } +}; + +static int +tape_3590_online(struct ccw_device *cdev) +{ + return tape_generic_online(cdev->dev.driver_data, + &tape_discipline_3590); +} + +static int +tape_3590_offline(struct ccw_device *cdev) +{ + return tape_generic_offline(cdev->dev.driver_data); +} + +static struct ccw_driver tape_3590_driver = { + .name = "tape_3590", + .owner = THIS_MODULE, + .ids = tape_3590_ids, + .probe = tape_generic_probe, + .remove = tape_generic_remove, + .set_offline = tape_3590_offline, + .set_online = tape_3590_online, +}; + +/* + * Setup discipline structure. + */ +static int +tape_3590_init(void) +{ + int rc; + + TAPE_DBF_AREA = debug_register("tape_3590", 2, 2, 4 * sizeof(long)); + debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); +#ifdef DBF_LIKE_HELL + debug_set_level(TAPE_DBF_AREA, 6); +#endif + + DBF_EVENT(3, "3590 init\n"); + /* Register driver for 3590 tapes. */ + rc = ccw_driver_register(&tape_3590_driver); + if (rc) + DBF_EVENT(3, "3590 init failed\n"); + else + DBF_EVENT(3, "3590 registered\n"); + return rc; +} + +static void +tape_3590_exit(void) +{ + ccw_driver_unregister(&tape_3590_driver); + + debug_unregister(TAPE_DBF_AREA); +} + +MODULE_DEVICE_TABLE(ccw, tape_3590_ids); +MODULE_AUTHOR("(C) 2001,2006 IBM Corporation"); +MODULE_DESCRIPTION("Linux on zSeries channel attached 3590 tape device driver"); +MODULE_LICENSE("GPL"); + +module_init(tape_3590_init); +module_exit(tape_3590_exit); diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h new file mode 100644 index 00000000000..cf274b9445a --- /dev/null +++ b/drivers/s390/char/tape_3590.h @@ -0,0 +1,124 @@ +/* + * drivers/s390/char/tape_3590.h + * tape device discipline for 3590 tapes. + * + * Copyright (C) IBM Corp. 2001,2006 + * Author(s): Stefan Bader <shbader@de.ibm.com> + * Michael Holzheu <holzheu@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#ifndef _TAPE_3590_H +#define _TAPE_3590_H + +#define MEDIUM_SENSE 0xc2 +#define READ_PREVIOUS 0x0a +#define MODE_SENSE 0xcf +#define PERFORM_SS_FUNC 0x77 +#define READ_SS_DATA 0x3e + +#define PREP_RD_SS_DATA 0x18 +#define RD_ATTMSG 0x3 + +#define SENSE_BRA_PER 0 +#define SENSE_BRA_CONT 1 +#define SENSE_BRA_RE 2 +#define SENSE_BRA_DRE 3 + +#define SENSE_FMT_LIBRARY 0x23 +#define SENSE_FMT_UNSOLICITED 0x40 +#define SENSE_FMT_COMMAND_REJ 0x41 +#define SENSE_FMT_COMMAND_EXEC0 0x50 +#define SENSE_FMT_COMMAND_EXEC1 0x51 +#define SENSE_FMT_EVENT0 0x60 +#define SENSE_FMT_EVENT1 0x61 +#define SENSE_FMT_MIM 0x70 +#define SENSE_FMT_SIM 0x71 + +#define MSENSE_UNASSOCIATED 0x00 +#define MSENSE_ASSOCIATED_MOUNT 0x01 +#define MSENSE_ASSOCIATED_UMOUNT 0x02 + +#define TAPE_3590_MAX_MSG 0xb0 + +/* Datatypes */ + +struct tape_3590_disc_data { + unsigned char modeset_byte; + int read_back_op; +}; + +struct tape_3590_sense { + + unsigned int command_rej:1; + unsigned int interv_req:1; + unsigned int bus_out_check:1; + unsigned int eq_check:1; + unsigned int data_check:1; + unsigned int overrun:1; + unsigned int def_unit_check:1; + unsigned int assgnd_elsew:1; + + unsigned int locate_fail:1; + unsigned int inst_online:1; + unsigned int reserved:1; + unsigned int blk_seq_err:1; + unsigned int begin_part:1; + unsigned int wr_mode:1; + unsigned int wr_prot:1; + unsigned int not_cap:1; + + unsigned int bra:2; + unsigned int lc:3; + unsigned int vlf_active:1; + unsigned int stm:1; + unsigned int med_pos:1; + + unsigned int rac:8; + + unsigned int rc_rqc:16; + + unsigned int mc:8; + + unsigned int sense_fmt:8; + + union { + struct { + unsigned int emc:4; + unsigned int smc:4; + unsigned int sev:2; + unsigned int reserved:6; + unsigned int md:8; + unsigned int refcode:8; + unsigned int mid:16; + unsigned int mp:16; + unsigned char volid[6]; + unsigned int fid:8; + } f70; + struct { + unsigned int emc:4; + unsigned int smc:4; + unsigned int sev:2; + unsigned int reserved1:5; + unsigned int mdf:1; + unsigned char md[3]; + unsigned int simid:8; + unsigned int uid:16; + unsigned int refcode1:16; + unsigned int refcode2:16; + unsigned int refcode3:16; + unsigned int reserved2:8; + } f71; + unsigned char data[14]; + } fmt; + unsigned char pad[10]; + +} __attribute__ ((packed)); + +struct tape_3590_med_sense { + unsigned int macst:4; + unsigned int masst:4; + char pad[127]; +} __attribute__ ((packed)); + +#endif /* _TAPE_3590_H */ diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c index b3569c82bb1..a5c68e60fcf 100644 --- a/drivers/s390/char/tape_class.c +++ b/drivers/s390/char/tape_class.c @@ -44,11 +44,10 @@ struct tape_class_device *register_tape_dev( int rc; char * s; - tcd = kmalloc(sizeof(struct tape_class_device), GFP_KERNEL); + tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL); if (!tcd) return ERR_PTR(-ENOMEM); - memset(tcd, 0, sizeof(struct tape_class_device)); strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN); for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/')) *s = '!'; diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 4ea438c749c..389ee2c0f44 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -453,16 +453,14 @@ tape_alloc_device(void) { struct tape_device *device; - device = (struct tape_device *) - kmalloc(sizeof(struct tape_device), GFP_KERNEL); + device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); if (device == NULL) { DBF_EXCEPTION(2, "ti:no mem\n"); PRINT_INFO ("can't allocate memory for " "tape info structure\n"); return ERR_PTR(-ENOMEM); } - memset(device, 0, sizeof(struct tape_device)); - device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA); + device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); if (device->modeset_byte == NULL) { DBF_EXCEPTION(2, "ti:no mem\n"); PRINT_INFO("can't allocate memory for modeset byte\n"); @@ -659,34 +657,30 @@ tape_alloc_request(int cplength, int datasize) DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); - request = (struct tape_request *) kmalloc(sizeof(struct tape_request), - GFP_KERNEL); + request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); if (request == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); return ERR_PTR(-ENOMEM); } - memset(request, 0, sizeof(struct tape_request)); /* allocate channel program */ if (cplength > 0) { - request->cpaddr = kmalloc(cplength*sizeof(struct ccw1), + request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), GFP_ATOMIC | GFP_DMA); if (request->cpaddr == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); kfree(request); return ERR_PTR(-ENOMEM); } - memset(request->cpaddr, 0, cplength*sizeof(struct ccw1)); } /* alloc small kernel buffer */ if (datasize > 0) { - request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA); + request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); if (request->cpdata == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); kfree(request->cpaddr); kfree(request); return ERR_PTR(-ENOMEM); } - memset(request->cpdata, 0, datasize); } DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, request->cpdata); @@ -761,6 +755,13 @@ __tape_start_next_request(struct tape_device *device) */ if (request->status == TAPE_REQUEST_IN_IO) return; + /* + * Request has already been stopped. We have to wait until + * the request is removed from the queue in the interrupt + * handling. + */ + if (request->status == TAPE_REQUEST_DONE) + return; /* * We wanted to cancel the request but the common I/O layer @@ -1015,7 +1016,7 @@ tape_do_io_interruptible(struct tape_device *device, wq, (request->callback == NULL) ); - } while (rc != -ERESTARTSYS); + } while (rc == -ERESTARTSYS); DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); rc = -ERESTARTSYS; @@ -1024,6 +1025,20 @@ tape_do_io_interruptible(struct tape_device *device, } /* + * Stop running ccw. + */ +int +tape_cancel_io(struct tape_device *device, struct tape_request *request) +{ + int rc; + + spin_lock_irq(get_ccwdev_lock(device->cdev)); + rc = __tape_cancel_io(device, request); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); + return rc; +} + +/* * Tape interrupt routine, called from the ccw_device layer */ static void @@ -1064,15 +1079,16 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) /* * If the condition code is not zero and the start function bit is * still set, this is an deferred error and the last start I/O did - * not succeed. Restart the request now. + * not succeed. At this point the condition that caused the deferred + * error might still apply. So we just schedule the request to be + * started later. */ - if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { - PRINT_WARN("(%s): deferred cc=%i. restaring\n", - cdev->dev.bus_id, - irb->scsw.cc); - rc = __tape_start_io(device, request); - if (rc) - __tape_end_request(device, request, rc); + if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && + (request->status == TAPE_REQUEST_IN_IO)) { + DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", + device->cdev_id, irb->scsw.cc, irb->scsw.fctl); + request->status = TAPE_REQUEST_QUEUED; + schedule_delayed_work(&device->tape_dnr, HZ); return; } @@ -1286,4 +1302,5 @@ EXPORT_SYMBOL(tape_dump_sense_dbf); EXPORT_SYMBOL(tape_do_io); EXPORT_SYMBOL(tape_do_io_async); EXPORT_SYMBOL(tape_do_io_interruptible); +EXPORT_SYMBOL(tape_cancel_io); EXPORT_SYMBOL(tape_mtop); diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c index 2f9fe30989a..99cf881f41d 100644 --- a/drivers/s390/char/tape_std.c +++ b/drivers/s390/char/tape_std.c @@ -37,20 +37,19 @@ tape_std_assign_timeout(unsigned long data) { struct tape_request * request; struct tape_device * device; + int rc; request = (struct tape_request *) data; if ((device = request->device) == NULL) BUG(); - spin_lock_irq(get_ccwdev_lock(device->cdev)); - if (request->callback != NULL) { - DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n", + DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n", device->cdev_id); - PRINT_ERR("%s: Assignment timeout. Device busy.\n", - device->cdev->dev.bus_id); - ccw_device_clear(device->cdev, (long) request); - } - spin_unlock_irq(get_ccwdev_lock(device->cdev)); + rc = tape_cancel_io(device, request); + if(rc) + PRINT_ERR("(%s): Assign timeout: Cancel failed with rc = %i\n", + device->cdev->dev.bus_id, rc); + } int diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h index 3ab6aafb734..2d311798edf 100644 --- a/drivers/s390/char/tape_std.h +++ b/drivers/s390/char/tape_std.h @@ -1,9 +1,8 @@ /* - * drivers/s390/char/tape_34xx.h + * drivers/s390/char/tape_std.h * standard tape device functions for ibm tapes. * - * S390 and zSeries version - * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright (C) IBM Corp. 2001,2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -149,4 +148,11 @@ void tape_std_error_recovery_do_retry(struct tape_device *); void tape_std_error_recovery_read_opposite(struct tape_device *); void tape_std_error_recovery_HWBUG(struct tape_device *, int condno); +/* S390 tape types */ +enum s390_tape_type { + tape_3480, + tape_3490, + tape_3590, +}; + #endif // _TAPE_STD_H diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 4b906937038..9a141776873 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -691,10 +691,9 @@ tty3270_alloc_view(void) struct tty3270 *tp; int pages; - tp = kmalloc(sizeof(struct tty3270),GFP_KERNEL); + tp = kzalloc(sizeof(struct tty3270), GFP_KERNEL); if (!tp) goto out_err; - memset(tp, 0, sizeof(struct tty3270)); tp->freemem_pages = kmalloc(sizeof(void *) * TTY3270_STRING_PAGES, GFP_KERNEL); if (!tp->freemem_pages) @@ -767,16 +766,14 @@ tty3270_alloc_screen(struct tty3270 *tp) int lines; size = sizeof(struct tty3270_line) * (tp->view.rows - 2); - tp->screen = kmalloc(size, GFP_KERNEL); + tp->screen = kzalloc(size, GFP_KERNEL); if (!tp->screen) goto out_err; - memset(tp->screen, 0, size); for (lines = 0; lines < tp->view.rows - 2; lines++) { size = sizeof(struct tty3270_cell) * tp->view.cols; - tp->screen[lines].cells = kmalloc(size, GFP_KERNEL); + tp->screen[lines].cells = kzalloc(size, GFP_KERNEL); if (!tp->screen[lines].cells) goto out_screen; - memset(tp->screen[lines].cells, 0, size); } return 0; out_screen: diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index b2d75de144c..c625b69ebd1 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -759,9 +759,8 @@ vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) { struct device *dev; int ret; - dev = kmalloc(sizeof(struct device), GFP_KERNEL); + dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (dev) { - memset(dev, 0, sizeof(struct device)); snprintf(dev->bus_id, BUS_ID_SIZE, "%s", priv->internal_name); dev->bus = &iucv_bus; diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 8013c8eb76f..bdfee7fbaa2 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -157,11 +157,10 @@ ccwgroup_create(struct device *root, if (argc > 256) /* disallow dumb users */ return -EINVAL; - gdev = kmalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL); + gdev = kzalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL); if (!gdev) return -ENOMEM; - memset(gdev, 0, sizeof(*gdev) + argc*sizeof(gdev->cdev[0])); atomic_set(&gdev->onoff, 0); del_drvdata = 0; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index f4183d66025..6412b2c3edd 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -98,10 +98,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) ssd_area = page; - ssd_area->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0004, - }; + ssd_area->request.length = 0x0010; + ssd_area->request.code = 0x0004; ssd_area->ssid = sch->schid.ssid; ssd_area->f_sch = sch->schid.sch_no; @@ -517,10 +515,8 @@ chsc_process_crw(void) struct device *dev; memset(sei_area, 0, sizeof(*sei_area)); memset(&res_data, 0, sizeof(struct res_acc_data)); - sei_area->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x000e, - }; + sei_area->request.length = 0x0010; + sei_area->request.code = 0x000e; ccode = chsc(sei_area); if (ccode > 0) @@ -875,6 +871,264 @@ s390_vary_chpid( __u8 chpid, int on) } /* + * Channel measurement related functions + */ +static ssize_t +chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off, + size_t count) +{ + struct channel_path *chp; + unsigned int size; + + chp = to_channelpath(container_of(kobj, struct device, kobj)); + if (!chp->cmg_chars) + return 0; + + size = sizeof(struct cmg_chars); + + if (off > size) + return 0; + if (off + count > size) + count = size - off; + memcpy(buf, chp->cmg_chars + off, count); + return count; +} + +static struct bin_attribute chp_measurement_chars_attr = { + .attr = { + .name = "measurement_chars", + .mode = S_IRUSR, + .owner = THIS_MODULE, + }, + .size = sizeof(struct cmg_chars), + .read = chp_measurement_chars_read, +}; + +static void +chp_measurement_copy_block(struct cmg_entry *buf, + struct channel_subsystem *css, int chpid) +{ + void *area; + struct cmg_entry *entry, reference_buf; + int idx; + + if (chpid < 128) { + area = css->cub_addr1; + idx = chpid; + } else { + area = css->cub_addr2; + idx = chpid - 128; + } + entry = area + (idx * sizeof(struct cmg_entry)); + do { + memcpy(buf, entry, sizeof(*entry)); + memcpy(&reference_buf, entry, sizeof(*entry)); + } while (reference_buf.values[0] != buf->values[0]); +} + +static ssize_t +chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count) +{ + struct channel_path *chp; + struct channel_subsystem *css; + unsigned int size; + + chp = to_channelpath(container_of(kobj, struct device, kobj)); + css = to_css(chp->dev.parent); + + size = sizeof(struct cmg_chars); + + /* Only allow single reads. */ + if (off || count < size) + return 0; + chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id); + return count; +} + +static struct bin_attribute chp_measurement_attr = { + .attr = { + .name = "measurement", + .mode = S_IRUSR, + .owner = THIS_MODULE, + }, + .size = sizeof(struct cmg_entry), + .read = chp_measurement_read, +}; + +static void +chsc_remove_chp_cmg_attr(struct channel_path *chp) +{ + sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); + sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); +} + +static int +chsc_add_chp_cmg_attr(struct channel_path *chp) +{ + int ret; + + ret = sysfs_create_bin_file(&chp->dev.kobj, + &chp_measurement_chars_attr); + if (ret) + return ret; + ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); + if (ret) + sysfs_remove_bin_file(&chp->dev.kobj, + &chp_measurement_chars_attr); + return ret; +} + +static void +chsc_remove_cmg_attr(struct channel_subsystem *css) +{ + int i; + + for (i = 0; i <= __MAX_CHPID; i++) { + if (!css->chps[i]) + continue; + chsc_remove_chp_cmg_attr(css->chps[i]); + } +} + +static int +chsc_add_cmg_attr(struct channel_subsystem *css) +{ + int i, ret; + + ret = 0; + for (i = 0; i <= __MAX_CHPID; i++) { + if (!css->chps[i]) + continue; + ret = chsc_add_chp_cmg_attr(css->chps[i]); + if (ret) + goto cleanup; + } + return ret; +cleanup: + for (--i; i >= 0; i--) { + if (!css->chps[i]) + continue; + chsc_remove_chp_cmg_attr(css->chps[i]); + } + return ret; +} + + +static int +__chsc_do_secm(struct channel_subsystem *css, int enable, void *page) +{ + struct { + struct chsc_header request; + u32 operation_code : 2; + u32 : 30; + u32 key : 4; + u32 : 28; + u32 zeroes1; + u32 cub_addr1; + u32 zeroes2; + u32 cub_addr2; + u32 reserved[13]; + struct chsc_header response; + u32 status : 8; + u32 : 4; + u32 fmt : 4; + u32 : 16; + } *secm_area; + int ret, ccode; + + secm_area = page; + secm_area->request.length = 0x0050; + secm_area->request.code = 0x0016; + + secm_area->key = PAGE_DEFAULT_KEY; + secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; + secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; + + secm_area->operation_code = enable ? 0 : 1; + + ccode = chsc(secm_area); + if (ccode > 0) + return (ccode == 3) ? -ENODEV : -EBUSY; + + switch (secm_area->response.code) { + case 0x0001: /* Success. */ + ret = 0; + break; + case 0x0003: /* Invalid block. */ + case 0x0007: /* Invalid format. */ + case 0x0008: /* Other invalid block. */ + CIO_CRW_EVENT(2, "Error in chsc request block!\n"); + ret = -EINVAL; + break; + case 0x0004: /* Command not provided in model. */ + CIO_CRW_EVENT(2, "Model does not provide secm\n"); + ret = -EOPNOTSUPP; + break; + case 0x0102: /* cub adresses incorrect */ + CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n"); + ret = -EINVAL; + break; + case 0x0103: /* key error */ + CIO_CRW_EVENT(2, "Access key error in secm\n"); + ret = -EINVAL; + break; + case 0x0105: /* error while starting */ + CIO_CRW_EVENT(2, "Error while starting channel measurement\n"); + ret = -EIO; + break; + default: + CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", + secm_area->response.code); + ret = -EIO; + } + return ret; +} + +int +chsc_secm(struct channel_subsystem *css, int enable) +{ + void *secm_area; + int ret; + + secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!secm_area) + return -ENOMEM; + + mutex_lock(&css->mutex); + if (enable && !css->cm_enabled) { + css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!css->cub_addr1 || !css->cub_addr2) { + free_page((unsigned long)css->cub_addr1); + free_page((unsigned long)css->cub_addr2); + free_page((unsigned long)secm_area); + mutex_unlock(&css->mutex); + return -ENOMEM; + } + } + ret = __chsc_do_secm(css, enable, secm_area); + if (!ret) { + css->cm_enabled = enable; + if (css->cm_enabled) { + ret = chsc_add_cmg_attr(css); + if (ret) { + memset(secm_area, 0, PAGE_SIZE); + __chsc_do_secm(css, 0, secm_area); + css->cm_enabled = 0; + } + } else + chsc_remove_cmg_attr(css); + } + if (enable && !css->cm_enabled) { + free_page((unsigned long)css->cub_addr1); + free_page((unsigned long)css->cub_addr2); + } + mutex_unlock(&css->mutex); + free_page((unsigned long)secm_area); + return ret; +} + +/* * Files for the channel path entries. */ static ssize_t @@ -925,9 +1179,39 @@ chp_type_show(struct device *dev, struct device_attribute *attr, char *buf) static DEVICE_ATTR(type, 0444, chp_type_show, NULL); +static ssize_t +chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + + if (!chp) + return 0; + if (chp->cmg == -1) /* channel measurements not available */ + return sprintf(buf, "unknown\n"); + return sprintf(buf, "%x\n", chp->cmg); +} + +static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL); + +static ssize_t +chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + + if (!chp) + return 0; + if (chp->shared == -1) /* channel measurements not available */ + return sprintf(buf, "unknown\n"); + return sprintf(buf, "%x\n", chp->shared); +} + +static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); + static struct attribute * chp_attrs[] = { &dev_attr_status.attr, &dev_attr_type.attr, + &dev_attr_cmg.attr, + &dev_attr_shared.attr, NULL, }; @@ -966,10 +1250,8 @@ chsc_determine_channel_path_description(int chpid, if (!scpd_area) return -ENOMEM; - scpd_area->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0002, - }; + scpd_area->request.length = 0x0010; + scpd_area->request.code = 0x0002; scpd_area->first_chpid = chpid; scpd_area->last_chpid = chpid; @@ -1006,6 +1288,111 @@ out: return ret; } +static void +chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, + struct cmg_chars *chars) +{ + switch (chp->cmg) { + case 2: + case 3: + chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), + GFP_KERNEL); + if (chp->cmg_chars) { + int i, mask; + struct cmg_chars *cmg_chars; + + cmg_chars = chp->cmg_chars; + for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { + mask = 0x80 >> (i + 3); + if (cmcv & mask) + cmg_chars->values[i] = chars->values[i]; + else + cmg_chars->values[i] = 0; + } + } + break; + default: + /* No cmg-dependent data. */ + break; + } +} + +static int +chsc_get_channel_measurement_chars(struct channel_path *chp) +{ + int ccode, ret; + + struct { + struct chsc_header request; + u32 : 24; + u32 first_chpid : 8; + u32 : 24; + u32 last_chpid : 8; + u32 zeroes1; + struct chsc_header response; + u32 zeroes2; + u32 not_valid : 1; + u32 shared : 1; + u32 : 22; + u32 chpid : 8; + u32 cmcv : 5; + u32 : 11; + u32 cmgq : 8; + u32 cmg : 8; + u32 zeroes3; + u32 data[NR_MEASUREMENT_CHARS]; + } *scmc_area; + + scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!scmc_area) + return -ENOMEM; + + scmc_area->request.length = 0x0010; + scmc_area->request.code = 0x0022; + + scmc_area->first_chpid = chp->id; + scmc_area->last_chpid = chp->id; + + ccode = chsc(scmc_area); + if (ccode > 0) { + ret = (ccode == 3) ? -ENODEV : -EBUSY; + goto out; + } + + switch (scmc_area->response.code) { + case 0x0001: /* Success. */ + if (!scmc_area->not_valid) { + chp->cmg = scmc_area->cmg; + chp->shared = scmc_area->shared; + chsc_initialize_cmg_chars(chp, scmc_area->cmcv, + (struct cmg_chars *) + &scmc_area->data); + } else { + chp->cmg = -1; + chp->shared = -1; + } + ret = 0; + break; + case 0x0003: /* Invalid block. */ + case 0x0007: /* Invalid format. */ + case 0x0008: /* Invalid bit combination. */ + CIO_CRW_EVENT(2, "Error in chsc request block!\n"); + ret = -EINVAL; + break; + case 0x0004: /* Command not provided. */ + CIO_CRW_EVENT(2, "Model does not provide scmc\n"); + ret = -EOPNOTSUPP; + break; + default: + CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", + scmc_area->response.code); + ret = -EIO; + } +out: + free_page((unsigned long)scmc_area); + return ret; +} + /* * Entries for chpids on the system bus. * This replaces /proc/chpids. @@ -1016,10 +1403,9 @@ new_channel_path(int chpid) struct channel_path *chp; int ret; - chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL); + chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); if (!chp) return -ENOMEM; - memset(chp, 0, sizeof(struct channel_path)); /* fill in status, etc. */ chp->id = chpid; @@ -1034,6 +1420,22 @@ new_channel_path(int chpid) ret = chsc_determine_channel_path_description(chpid, &chp->desc); if (ret) goto out_free; + /* Get channel-measurement characteristics. */ + if (css_characteristics_avail && css_chsc_characteristics.scmc + && css_chsc_characteristics.secm) { + ret = chsc_get_channel_measurement_chars(chp); + if (ret) + goto out_free; + } else { + static int msg_done; + + if (!msg_done) { + printk(KERN_WARNING "cio: Channel measurements not " + "available, continuing.\n"); + msg_done = 1; + } + chp->cmg = -1; + } /* make it known to the system */ ret = device_register(&chp->dev); @@ -1046,8 +1448,19 @@ new_channel_path(int chpid) if (ret) { device_unregister(&chp->dev); goto out_free; - } else - css[0]->chps[chpid] = chp; + } + mutex_lock(&css[0]->mutex); + if (css[0]->cm_enabled) { + ret = chsc_add_chp_cmg_attr(chp); + if (ret) { + sysfs_remove_group(&chp->dev.kobj, &chp_attr_group); + device_unregister(&chp->dev); + mutex_unlock(&css[0]->mutex); + goto out_free; + } + } + css[0]->chps[chpid] = chp; + mutex_unlock(&css[0]->mutex); return ret; out_free: kfree(chp); @@ -1103,10 +1516,8 @@ chsc_enable_facility(int operation_code) sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); if (!sda_area) return -ENOMEM; - sda_area->request = (struct chsc_header) { - .length = 0x0400, - .code = 0x0031, - }; + sda_area->request.length = 0x0400; + sda_area->request.code = 0x0031; sda_area->operation_code = operation_code; ret = chsc(sda_area); @@ -1161,10 +1572,8 @@ chsc_determine_css_characteristics(void) return -ENOMEM; } - scsc_area->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0010, - }; + scsc_area->request.length = 0x0010; + scsc_area->request.code = 0x0010; result = chsc(scsc_area); if (result) { diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 3e75095f35d..a259245780a 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -12,6 +12,16 @@ struct chsc_header { u16 code; }; +#define NR_MEASUREMENT_CHARS 5 +struct cmg_chars { + u32 values[NR_MEASUREMENT_CHARS]; +}; + +#define NR_MEASUREMENT_ENTRIES 8 +struct cmg_entry { + u32 values[NR_MEASUREMENT_ENTRIES]; +}; + struct channel_path_desc { u8 flags; u8 lsn; @@ -27,6 +37,10 @@ struct channel_path { int id; int state; struct channel_path_desc desc; + /* Channel-measurement related stuff: */ + int cmg; + int shared; + void *cmg_chars; struct device dev; }; @@ -52,7 +66,11 @@ struct css_general_char { struct css_chsc_char { u64 res; - u64 : 43; + u64 : 20; + u32 secm : 1; /* bit 84 */ + u32 : 1; + u32 scmc : 1; /* bit 86 */ + u32 : 20; u32 scssc : 1; /* bit 107 */ u32 scsscf : 1; /* bit 108 */ u32 : 19; @@ -67,6 +85,8 @@ extern int css_characteristics_avail; extern void *chsc_get_chp_desc(struct subchannel*, int); extern int chsc_enable_facility(int); +struct channel_subsystem; +extern int chsc_secm(struct channel_subsystem *, int); #define to_channelpath(device) container_of(device, struct channel_path, dev) diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 3c77d65960d..74ea8aac4b7 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -452,15 +452,50 @@ channel_subsystem_release(struct device *dev) struct channel_subsystem *css; css = to_css(dev); + mutex_destroy(&css->mutex); kfree(css); } +static ssize_t +css_cm_enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_subsystem *css = to_css(dev); + + if (!css) + return 0; + return sprintf(buf, "%x\n", css->cm_enabled); +} + +static ssize_t +css_cm_enable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct channel_subsystem *css = to_css(dev); + int ret; + + switch (buf[0]) { + case '0': + ret = css->cm_enabled ? chsc_secm(css, 0) : 0; + break; + case '1': + ret = css->cm_enabled ? 0 : chsc_secm(css, 1); + break; + default: + ret = -EINVAL; + } + return ret < 0 ? ret : count; +} + +static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); + static inline void __init setup_css(int nr) { u32 tod_high; memset(css[nr], 0, sizeof(struct channel_subsystem)); + mutex_init(&css[nr]->mutex); css[nr]->valid = 1; css[nr]->cssid = nr; sprintf(css[nr]->device.bus_id, "css%x", nr); @@ -507,6 +542,9 @@ init_channel_subsystem (void) ret = device_register(&css[i]->device); if (ret) goto out_free; + if (css_characteristics_avail && css_chsc_characteristics.secm) + device_create_file(&css[i]->device, + &dev_attr_cm_enable); } css_init_done = 1; @@ -519,6 +557,9 @@ out_free: out_unregister: while (i > 0) { i--; + if (css_characteristics_avail && css_chsc_characteristics.secm) + device_remove_file(&css[i]->device, + &dev_attr_cm_enable); device_unregister(&css[i]->device); } out_bus: @@ -589,10 +630,9 @@ css_enqueue_subchannel_slow(struct subchannel_id schid) struct slow_subchannel *new_slow_sch; unsigned long flags; - new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); + new_slow_sch = kzalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); if (!new_slow_sch) return -ENOMEM; - memset(new_slow_sch, 0, sizeof(struct slow_subchannel)); new_slow_sch->schid = schid; spin_lock_irqsave(&slow_subchannel_lock, flags); list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index b6375861cb3..74a257b2338 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -1,6 +1,7 @@ #ifndef _CSS_H #define _CSS_H +#include <linux/mutex.h> #include <linux/wait.h> #include <linux/workqueue.h> @@ -150,6 +151,11 @@ struct channel_subsystem { struct channel_path *chps[__MAX_CHPID + 1]; struct device device; struct pgid global_pgid; + struct mutex mutex; + /* channel measurement related */ + int cm_enabled; + void *cub_addr1; + void *cub_addr2; }; #define to_css(dev) container_of(dev, struct channel_subsystem, device) diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index afc4e88551a..8e3053c2a45 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -826,17 +826,15 @@ io_subchannel_probe (struct subchannel *sch) get_device(&cdev->dev); return 0; } - cdev = kmalloc (sizeof(*cdev), GFP_KERNEL); + cdev = kzalloc (sizeof(*cdev), GFP_KERNEL); if (!cdev) return -ENOMEM; - memset(cdev, 0, sizeof(struct ccw_device)); - cdev->private = kmalloc(sizeof(struct ccw_device_private), + cdev->private = kzalloc(sizeof(struct ccw_device_private), GFP_KERNEL | GFP_DMA); if (!cdev->private) { kfree(cdev); return -ENOMEM; } - memset(cdev->private, 0, sizeof(struct ccw_device_private)); atomic_set(&cdev->private->onoff, 0); cdev->dev = (struct device) { .parent = &sch->dev, diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index b302779e7cf..180b3bf8b90 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -827,6 +827,17 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) } return; } + /* + * Check if a halt or clear has been issued in the meanwhile. If yes, + * only deliver the halt/clear interrupt to the device driver as if it + * had killed the original request. + */ + if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { + cdev->private->flags.dosense = 0; + memset(&cdev->private->irb, 0, sizeof(struct irb)); + ccw_device_accumulate_irb(cdev, irb); + goto call_handler; + } /* Add basic sense info to irb. */ ccw_device_accumulate_basic_sense(cdev, irb); if (cdev->private->flags.dosense) { @@ -834,6 +845,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_do_sense(cdev, irb); return; } +call_handler: cdev->private->state = DEV_STATE_ONLINE; /* Call the handler. */ if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 3a50b190328..795abb5a65b 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -359,10 +359,9 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length) CIO_TRACE_EVENT (4, "rddevch"); CIO_TRACE_EVENT (4, sch->dev.bus_id); - rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + rdc_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); if (!rdc_ccw) return -ENOMEM; - memset(rdc_ccw, 0, sizeof(struct ccw1)); rdc_ccw->cmd_code = CCW_CMD_RDC; rdc_ccw->count = length; rdc_ccw->flags = CCW_FLAG_SLI; @@ -426,16 +425,14 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp if (!ciw || ciw->cmd == 0) return -EOPNOTSUPP; - rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); if (!rcd_ccw) return -ENOMEM; - memset(rcd_ccw, 0, sizeof(struct ccw1)); - rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA); + rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); if (!rcd_buf) { kfree(rcd_ccw); return -ENOMEM; } - memset (rcd_buf, 0, ciw->count); rcd_ccw->cmd_code = ciw->cmd; rcd_ccw->cda = (__u32) __pa (rcd_buf); rcd_ccw->count = ciw->count; diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 9ed37dc9a1b..814f9258ce0 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -1686,16 +1686,14 @@ qdio_alloc_qs(struct qdio_irq *irq_ptr, int result=-ENOMEM; for (i=0;i<no_input_qs;i++) { - q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL); + q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL); if (!q) { QDIO_PRINT_ERR("kmalloc of q failed!\n"); goto out; } - memset(q,0,sizeof(struct qdio_q)); - - q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); + q->slib = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!q->slib) { QDIO_PRINT_ERR("kmalloc of slib failed!\n"); goto out; @@ -1705,14 +1703,12 @@ qdio_alloc_qs(struct qdio_irq *irq_ptr, } for (i=0;i<no_output_qs;i++) { - q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL); + q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL); if (!q) { goto out; } - memset(q,0,sizeof(struct qdio_q)); - q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); if (!q->slib) { QDIO_PRINT_ERR("kmalloc of slib failed!\n"); @@ -2984,7 +2980,7 @@ qdio_allocate(struct qdio_initialize *init_data) qdio_allocate_do_dbf(init_data); /* create irq */ - irq_ptr=kmalloc(sizeof(struct qdio_irq), GFP_KERNEL | GFP_DMA); + irq_ptr = kzalloc(sizeof(struct qdio_irq), GFP_KERNEL | GFP_DMA); QDIO_DBF_TEXT0(0,setup,"irq_ptr:"); QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); @@ -2994,8 +2990,6 @@ qdio_allocate(struct qdio_initialize *init_data) return -ENOMEM; } - memset(irq_ptr,0,sizeof(struct qdio_irq)); - init_MUTEX(&irq_ptr->setting_up_sema); /* QDR must be in DMA area since CCW data address is only 32 bit */ @@ -3686,10 +3680,10 @@ qdio_get_qdio_memory(void) for (i=1;i<INDICATORS_PER_CACHELINE;i++) indicator_used[i]=0; - indicators=(__u32*)kmalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), + indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), GFP_KERNEL); - if (!indicators) return -ENOMEM; - memset(indicators,0,sizeof(__u32)*(INDICATORS_PER_CACHELINE)); + if (!indicators) + return -ENOMEM; return 0; } diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c index 4141919da80..be60795f4a7 100644 --- a/drivers/s390/crypto/z90hardware.c +++ b/drivers/s390/crypto/z90hardware.c @@ -2214,7 +2214,7 @@ ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, long_len = 128; } - tmp_size = ((mod_len <= 128) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) + + tmp_size = ((long_len <= 64) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) + CALLER_HEADER; memset(z90cMsg_p, 0, tmp_size); @@ -2479,8 +2479,16 @@ convert_response(unsigned char *response, unsigned char *buffer, if (reply_code) switch (reply_code) { + case REP82_ERROR_MACHINE_FAILURE: + if (errh_p->type == TYPE82_RSP_CODE) + PRINTKW("Machine check failure\n"); + else + PRINTKW("Module failure\n"); + return REC_HARDWAR_ERR; case REP82_ERROR_OPERAND_INVALID: + return REC_OPERAND_INV; case REP88_ERROR_MESSAGE_MALFORMD: + PRINTKW("Message malformed\n"); return REC_OPERAND_INV; case REP82_ERROR_OPERAND_SIZE: return REC_OPERAND_SIZE; diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c index 7d6f19030ef..982acc7303e 100644 --- a/drivers/s390/crypto/z90main.c +++ b/drivers/s390/crypto/z90main.c @@ -1,9 +1,9 @@ /* * linux/drivers/s390/crypto/z90main.c * - * z90crypt 1.3.2 + * z90crypt 1.3.3 * - * Copyright (C) 2001, 2004 IBM Corporation + * Copyright (C) 2001, 2005 IBM Corporation * Author(s): Robert Burroughs (burrough@us.ibm.com) * Eric Rossman (edrossma@us.ibm.com) * @@ -707,13 +707,12 @@ z90crypt_open(struct inode *inode, struct file *filp) if (quiesce_z90crypt) return -EQUIESCE; - private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL); + private_data_p = kzalloc(sizeof(struct priv_data), GFP_KERNEL); if (!private_data_p) { PRINTK("Memory allocate failed\n"); return -ENOMEM; } - memset((void *)private_data_p, 0, sizeof(struct priv_data)); private_data_p->status = STAT_OPEN; private_data_p->opener_pid = PID(); filp->private_data = private_data_p; @@ -991,6 +990,7 @@ remove_device(struct device *device_p) * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card) * PCIXCC_MCL3 ----- 128-2048 * CEX2C 512-2048 128-2048 + * CEX2A ??-2048 same (the lower limit is less than 128 bit...) * * ext_bitlens (extended bitlengths) is a global, since you should not apply an * MCL to just one card in a machine. We assume, at first, that all cards have @@ -2736,13 +2736,11 @@ create_z90crypt(int *cdx_p) z90crypt.max_count = Z90CRYPT_NUM_DEVS; z90crypt.cdx = *cdx_p; - hdware_blk_p = (struct hdware_block *) - kmalloc(sizeof(struct hdware_block), GFP_ATOMIC); + hdware_blk_p = kzalloc(sizeof(struct hdware_block), GFP_ATOMIC); if (!hdware_blk_p) { PDEBUG("kmalloc for hardware block failed\n"); return ENOMEM; } - memset(hdware_blk_p, 0x00, sizeof(struct hdware_block)); z90crypt.hdware_info = hdware_blk_p; return 0; @@ -2977,12 +2975,11 @@ create_crypto_device(int index) total_size = sizeof(struct device) + z90crypt.q_depth_array[index] * sizeof(int); - dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC); + dev_ptr = kzalloc(total_size, GFP_ATOMIC); if (!dev_ptr) { PRINTK("kmalloc device %d failed\n", index); return ENOMEM; } - memset(dev_ptr, 0, total_size); dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC); if (!dev_ptr->dev_resp_p) { kfree(dev_ptr); diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index acd2a3f005f..23d53bf9daf 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -310,7 +310,7 @@ claw_probe(struct ccwgroup_device *cgdev) printk(KERN_INFO "claw: variable cgdev =\n"); dumpit((char *)cgdev, sizeof(struct ccwgroup_device)); #endif - privptr = kmalloc(sizeof(struct claw_privbk), GFP_KERNEL); + privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); if (privptr == NULL) { probe_error(cgdev); put_device(&cgdev->dev); @@ -319,7 +319,6 @@ claw_probe(struct ccwgroup_device *cgdev) CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); return -ENOMEM; } - memset(privptr,0x00,sizeof(struct claw_privbk)); privptr->p_mtc_envelope= kmalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL); privptr->p_env = kmalloc(sizeof(struct claw_env), GFP_KERNEL); if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) { diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c index 6caf5fa6a3b..7145e2134cf 100644 --- a/drivers/s390/net/fsm.c +++ b/drivers/s390/net/fsm.c @@ -21,38 +21,34 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_ fsm_function_t *m; fsm *f; - this = (fsm_instance *)kmalloc(sizeof(fsm_instance), order); + this = kzalloc(sizeof(fsm_instance), order); if (this == NULL) { printk(KERN_WARNING "fsm(%s): init_fsm: Couldn't alloc instance\n", name); return NULL; } - memset(this, 0, sizeof(fsm_instance)); strlcpy(this->name, name, sizeof(this->name)); - f = (fsm *)kmalloc(sizeof(fsm), order); + f = kzalloc(sizeof(fsm), order); if (f == NULL) { printk(KERN_WARNING "fsm(%s): init_fsm: Couldn't alloc fsm\n", name); kfree_fsm(this); return NULL; } - memset(f, 0, sizeof(fsm)); f->nr_events = nr_events; f->nr_states = nr_states; f->event_names = event_names; f->state_names = state_names; this->f = f; - m = (fsm_function_t *)kmalloc( - sizeof(fsm_function_t) * nr_states * nr_events, order); + m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order); if (m == NULL) { printk(KERN_WARNING "fsm(%s): init_fsm: Couldn't alloc jumptable\n", name); kfree_fsm(this); return NULL; } - memset(m, 0, sizeof(fsm_function_t) * f->nr_states * f->nr_events); f->jumpmatrix = m; for (i = 0; i < tmpl_len; i++) { diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c index 760e77ec5a1..6190be9dca9 100644 --- a/drivers/s390/net/iucv.c +++ b/drivers/s390/net/iucv.c @@ -386,7 +386,7 @@ iucv_init(void) } /* Note: GFP_DMA used used to get memory below 2G */ - iucv_external_int_buffer = kmalloc(sizeof(iucv_GeneralInterrupt), + iucv_external_int_buffer = kzalloc(sizeof(iucv_GeneralInterrupt), GFP_KERNEL|GFP_DMA); if (!iucv_external_int_buffer) { printk(KERN_WARNING @@ -396,10 +396,9 @@ iucv_init(void) bus_unregister(&iucv_bus); return -ENOMEM; } - memset(iucv_external_int_buffer, 0, sizeof(iucv_GeneralInterrupt)); /* Initialize parameter pool */ - iucv_param_pool = kmalloc(sizeof(iucv_param) * PARAM_POOL_SIZE, + iucv_param_pool = kzalloc(sizeof(iucv_param) * PARAM_POOL_SIZE, GFP_KERNEL|GFP_DMA); if (!iucv_param_pool) { printk(KERN_WARNING "%s: Could not allocate param pool\n", @@ -410,7 +409,6 @@ iucv_init(void) bus_unregister(&iucv_bus); return -ENOMEM; } - memset(iucv_param_pool, 0, sizeof(iucv_param) * PARAM_POOL_SIZE); /* Initialize irq queue */ INIT_LIST_HEAD(&iucv_irq_queue); @@ -793,15 +791,14 @@ iucv_register_program (__u8 pgmname[16], } max_connections = iucv_query_maxconn(); - iucv_pathid_table = kmalloc(max_connections * sizeof(handler *), - GFP_ATOMIC); + iucv_pathid_table = kcalloc(max_connections, sizeof(handler *), + GFP_ATOMIC); if (iucv_pathid_table == NULL) { printk(KERN_WARNING "%s: iucv_pathid_table storage " "allocation failed\n", __FUNCTION__); kfree(new_handler); return NULL; } - memset (iucv_pathid_table, 0, max_connections * sizeof(handler *)); } memset(new_handler, 0, sizeof (handler)); memcpy(new_handler->id.user_data, pgmname, diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 9cf88d7201d..edcf05d5d56 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -115,11 +115,10 @@ lcs_alloc_channel(struct lcs_channel *channel) LCS_DBF_TEXT(2, setup, "ichalloc"); for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { /* alloc memory fo iobuffer */ - channel->iob[cnt].data = (void *) - kmalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL); + channel->iob[cnt].data = + kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL); if (channel->iob[cnt].data == NULL) break; - memset(channel->iob[cnt].data, 0, LCS_IOBUFFERSIZE); channel->iob[cnt].state = BUF_STATE_EMPTY; } if (cnt < LCS_NUM_BUFFS) { @@ -182,10 +181,9 @@ lcs_alloc_card(void) LCS_DBF_TEXT(2, setup, "alloclcs"); - card = kmalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA); + card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA); if (card == NULL) return NULL; - memset(card, 0, sizeof(struct lcs_card)); card->lan_type = LCS_FRAME_TYPE_AUTO; card->pkt_seq = 0; card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT; @@ -793,10 +791,9 @@ lcs_alloc_reply(struct lcs_cmd *cmd) LCS_DBF_TEXT(4, trace, "getreply"); - reply = kmalloc(sizeof(struct lcs_reply), GFP_ATOMIC); + reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC); if (!reply) return NULL; - memset(reply,0,sizeof(struct lcs_reply)); atomic_set(&reply->refcnt,1); reply->sequence_no = cmd->sequence_no; reply->received = 0; diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 71d3853e868..260a93c8c44 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -1728,14 +1728,13 @@ static int netiucv_register_device(struct net_device *ndev) { struct netiucv_priv *priv = ndev->priv; - struct device *dev = kmalloc(sizeof(struct device), GFP_KERNEL); + struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); int ret; IUCV_DBF_TEXT(trace, 3, __FUNCTION__); if (dev) { - memset(dev, 0, sizeof(struct device)); snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name); dev->bus = &iucv_bus; dev->parent = iucv_root; @@ -1784,11 +1783,9 @@ netiucv_new_connection(struct net_device *dev, char *username) { struct iucv_connection **clist = &iucv_connections; struct iucv_connection *conn = - (struct iucv_connection *) - kmalloc(sizeof(struct iucv_connection), GFP_KERNEL); + kzalloc(sizeof(struct iucv_connection), GFP_KERNEL); if (conn) { - memset(conn, 0, sizeof(struct iucv_connection)); skb_queue_head_init(&conn->collect_queue); skb_queue_head_init(&conn->commit_queue); conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 82cb4af2f0e..44e226f211e 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -389,9 +389,8 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) struct qeth_eddp_data *eddp; QETH_DBF_TEXT(trace, 5, "eddpcrda"); - eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); + eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); if (eddp){ - memset(eddp, 0, sizeof(struct qeth_eddp_data)); eddp->nhl = nhl; eddp->thl = thl; memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr)); @@ -542,12 +541,11 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, QETH_DBF_TEXT(trace, 5, "creddpcg"); /* create the context and allocate pages */ - ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); + ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); if (ctx == NULL){ QETH_DBF_TEXT(trace, 2, "ceddpcn1"); return NULL; } - memset(ctx, 0, sizeof(struct qeth_eddp_context)); ctx->type = QETH_LARGE_SEND_EDDP; qeth_eddp_calc_num_pages(ctx, skb, hdr_len); if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){ @@ -555,13 +553,12 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, kfree(ctx); return NULL; } - ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC); + ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC); if (ctx->pages == NULL){ QETH_DBF_TEXT(trace, 2, "ceddpcn2"); kfree(ctx); return NULL; } - memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *)); for (i = 0; i < ctx->num_pages; ++i){ addr = (u8 *)__get_free_page(GFP_ATOMIC); if (addr == NULL){ @@ -573,15 +570,13 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, memset(addr, 0, PAGE_SIZE); ctx->pages[i] = addr; } - ctx->elements = kmalloc(ctx->num_elements * + ctx->elements = kcalloc(ctx->num_elements, sizeof(struct qeth_eddp_element), GFP_ATOMIC); if (ctx->elements == NULL){ QETH_DBF_TEXT(trace, 2, "ceddpcn4"); qeth_eddp_free_context(ctx); return NULL; } - memset(ctx->elements, 0, - ctx->num_elements * sizeof(struct qeth_eddp_element)); /* reset num_elements; will be incremented again in fill_buffer to * reflect number of actually used elements */ ctx->num_elements = 0; diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 021cd5d08c6..b3c6e790779 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -297,12 +297,10 @@ qeth_alloc_card(void) struct qeth_card *card; QETH_DBF_TEXT(setup, 2, "alloccrd"); - card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card), - GFP_DMA|GFP_KERNEL); + card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL); if (!card) return NULL; QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); - memset(card, 0, sizeof(struct qeth_card)); if (qeth_setup_channel(&card->read)) { kfree(card); return NULL; @@ -1632,9 +1630,8 @@ qeth_alloc_reply(struct qeth_card *card) { struct qeth_reply *reply; - reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC); + reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); if (reply){ - memset(reply, 0, sizeof(struct qeth_reply)); atomic_set(&reply->refcnt, 1); reply->card = card; }; @@ -3348,13 +3345,11 @@ qeth_qdio_establish(struct qeth_card *card) QETH_DBF_TEXT(setup, 2, "qdioest"); - qib_param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), + qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), GFP_KERNEL); if (!qib_param_field) return -ENOMEM; - memset(qib_param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char)); - qeth_create_qib_param_field(card, qib_param_field); qeth_create_qib_param_field_blkt(card, qib_param_field); @@ -4819,9 +4814,8 @@ qeth_arp_query(struct qeth_card *card, char *udata) /* get size of userspace buffer and mask_bits -> 6 bytes */ if (copy_from_user(&qinfo, udata, 6)) return -EFAULT; - if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))) + if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL))) return -ENOMEM; - memset(qinfo.udata, 0, qinfo.udata_len); qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, IPA_CMD_ASS_ARP_QUERY_INFO, @@ -4969,11 +4963,10 @@ qeth_snmp_command(struct qeth_card *card, char *udata) return -EFAULT; } qinfo.udata_len = ureq->hdr.data_len; - if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){ + if (!(qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL))){ kfree(ureq); return -ENOMEM; } - memset(qinfo.udata, 0, qinfo.udata_len); qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, @@ -5564,12 +5557,11 @@ qeth_get_addr_buffer(enum qeth_prot_versions prot) { struct qeth_ipaddr *addr; - addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); + addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); if (addr == NULL) { PRINT_WARN("Not enough memory to add address\n"); return NULL; } - memset(addr,0,sizeof(struct qeth_ipaddr)); addr->type = QETH_IP_TYPE_NORMAL; addr->proto = prot; return addr; diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c index f2a076a2b2f..882d419e416 100644 --- a/drivers/s390/net/qeth_sys.c +++ b/drivers/s390/net/qeth_sys.c @@ -1145,11 +1145,10 @@ qeth_dev_ipato_add_store(const char *buf, size_t count, if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits))) return rc; - if (!(ipatoe = kmalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL))){ + if (!(ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL))){ PRINT_WARN("No memory to allocate ipato entry\n"); return -ENOMEM; } - memset(ipatoe, 0, sizeof(struct qeth_ipato_entry)); ipatoe->proto = proto; memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16); ipatoe->mask_bits = mask_bits; diff --git a/drivers/s390/s390_rdev.c b/drivers/s390/s390_rdev.c index e3f64716982..3c7145d9f9a 100644 --- a/drivers/s390/s390_rdev.c +++ b/drivers/s390/s390_rdev.c @@ -27,10 +27,9 @@ s390_root_dev_register(const char *name) if (!strlen(name)) return ERR_PTR(-EINVAL); - dev = kmalloc(sizeof(struct device), GFP_KERNEL); + dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); - memset(dev, 0, sizeof(struct device)); strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); dev->release = s390_root_dev_release; ret = device_register(dev); |