diff options
Diffstat (limited to 'drivers')
75 files changed, 1341 insertions, 748 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 879250d3d06..ff8c4beaace 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -51,6 +51,8 @@ config CRYPTO_DEV_PADLOCK_SHA If unsure say M. The compiled module will be called padlock-sha.ko +source "arch/s390/crypto/Kconfig" + config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" depends on CRYPTO && X86_32 && PCI diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig index ae89b9b8874..165af398fde 100644 --- a/drivers/s390/Kconfig +++ b/drivers/s390/Kconfig @@ -103,14 +103,8 @@ config CCW_CONSOLE depends on TN3215_CONSOLE || TN3270_CONSOLE default y -config SCLP - bool "Support for SCLP" - help - Include support for the SCLP interface to the service element. - config SCLP_TTY bool "Support for SCLP line mode terminal" - depends on SCLP help Include support for IBM SCLP line-mode terminals. @@ -123,7 +117,6 @@ config SCLP_CONSOLE config SCLP_VT220_TTY bool "Support for SCLP VT220-compatible terminal" - depends on SCLP help Include support for an IBM SCLP VT220-compatible terminal. @@ -136,7 +129,6 @@ config SCLP_VT220_CONSOLE config SCLP_CPI tristate "Control-Program Identification" - depends on SCLP help This option enables the hardware console interface for system identification. This is commonly used for workload management and diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile index 9803c9352d7..5a888704a8d 100644 --- a/drivers/s390/Makefile +++ b/drivers/s390/Makefile @@ -2,6 +2,8 @@ # Makefile for the S/390 specific device drivers # +CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w + obj-y += s390mach.o sysinfo.o s390_rdev.o obj-y += cio/ block/ char/ crypto/ net/ scsi/ diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 492b68bcd7c..eb5dc62f0d9 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -37,6 +37,7 @@ */ debug_info_t *dasd_debug_area; struct dasd_discipline *dasd_diag_discipline_pointer; +void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); MODULE_DESCRIPTION("Linux on S/390 DASD device driver," @@ -51,7 +52,6 @@ static int dasd_alloc_queue(struct dasd_device * device); static void dasd_setup_queue(struct dasd_device * device); static void dasd_free_queue(struct dasd_device * device); static void dasd_flush_request_queue(struct dasd_device *); -static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); static int dasd_flush_ccw_queue(struct dasd_device *, int); static void dasd_tasklet(struct dasd_device *); static void do_kick_device(struct work_struct *); @@ -483,7 +483,7 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF; /* * Add profiling information for cqr before execution. */ -static inline void +static void dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, struct request *req) { @@ -505,7 +505,7 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, /* * Add profiling information for cqr after execution. */ -static inline void +static void dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, struct request *req) { @@ -1022,8 +1022,6 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) era = dasd_era_none; - else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) - era = dasd_era_fatal; /* don't recover this request */ else if (irb->esw.esw0.erw.cons) era = device->discipline->examine_error(cqr, irb); else @@ -1104,7 +1102,7 @@ __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) /* * Process ccw request queue. */ -static inline void +static void __dasd_process_ccw_queue(struct dasd_device * device, struct list_head *final_queue) { @@ -1127,7 +1125,9 @@ restart: cqr->status = DASD_CQR_FAILED; cqr->stopclk = get_clock(); } else { - if (cqr->irb.esw.esw0.erw.cons) { + if (cqr->irb.esw.esw0.erw.cons && + test_bit(DASD_CQR_FLAGS_USE_ERP, + &cqr->flags)) { erp_fn = device->discipline-> erp_action(cqr); erp_fn(cqr); @@ -1181,7 +1181,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data) /* * Fetch requests from the block device queue. */ -static inline void +static void __dasd_process_blk_queue(struct dasd_device * device) { request_queue_t *queue; @@ -1232,6 +1232,19 @@ __dasd_process_blk_queue(struct dasd_device * device) if (IS_ERR(cqr)) { if (PTR_ERR(cqr) == -ENOMEM) break; /* terminate request queue loop */ + if (PTR_ERR(cqr) == -EAGAIN) { + /* + * The current request cannot be build right + * now, we have to try later. If this request + * is the head-of-queue we stop the device + * for 1/2 second. + */ + if (!list_empty(&device->ccw_queue)) + break; + device->stopped |= DASD_STOPPED_PENDING; + dasd_set_timer(device, HZ/2); + break; + } DBF_DEV_EVENT(DBF_ERR, device, "CCW creation failed (rc=%ld) " "on request %p", @@ -1254,7 +1267,7 @@ __dasd_process_blk_queue(struct dasd_device * device) * Take a look at the first request on the ccw queue and check * if it reached its expire time. If so, terminate the IO. */ -static inline void +static void __dasd_check_expire(struct dasd_device * device) { struct dasd_ccw_req *cqr; @@ -1285,7 +1298,7 @@ __dasd_check_expire(struct dasd_device * device) * Take a look at the first request on the ccw queue and check * if it needs to be started. */ -static inline void +static void __dasd_start_head(struct dasd_device * device) { struct dasd_ccw_req *cqr; diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 4d01040c2c6..8b9d68f6e01 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -170,7 +170,6 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb) /* log the erp chain if fatal error occurred */ if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { dasd_log_sense(cqr, irb); - dasd_log_ccw(cqr, 0, irb->scsw.cpa); } return era; @@ -2640,7 +2639,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) struct dasd_ccw_req *erp = NULL; struct dasd_device *device = cqr->device; - __u32 cpa = cqr->irb.scsw.cpa; struct dasd_ccw_req *temp_erp = NULL; if (device->features & DASD_FEATURE_ERPLOG) { @@ -2706,9 +2704,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) } } - if (erp->status == DASD_CQR_FAILED) - dasd_log_ccw(erp, 1, cpa); - /* enqueue added ERP request */ if (erp->status == DASD_CQR_FILLED) { erp->status = DASD_CQR_QUEUED; diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 5943266152f..ed70852cc91 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -136,7 +136,7 @@ __setup ("dasd=", dasd_call_setup); /* * Read a device busid/devno from a string. */ -static inline int +static int dasd_busid(char **str, int *id0, int *id1, int *devno) { int val, old_style; @@ -182,7 +182,7 @@ dasd_busid(char **str, int *id0, int *id1, int *devno) * only one: "ro" for read-only devices. The default feature set * is empty (value 0). */ -static inline int +static int dasd_feature_list(char *str, char **endp) { int features, len, rc; @@ -341,7 +341,7 @@ dasd_parse_range( char *parsestring ) { return ERR_PTR(-EINVAL); } -static inline char * +static char * dasd_parse_next_element( char *parsestring ) { char * residual_str; residual_str = dasd_parse_keyword(parsestring); diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 53db58a6861..ab782bb46ac 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -43,7 +43,7 @@ MODULE_LICENSE("GPL"); #define DIAG_MAX_RETRIES 32 #define DIAG_TIMEOUT 50 * HZ -struct dasd_discipline dasd_diag_discipline; +static struct dasd_discipline dasd_diag_discipline; struct dasd_diag_private { struct dasd_diag_characteristics rdc_data; @@ -90,7 +90,7 @@ static inline int dia250(void *iob, int cmd) * block offset. On success, return zero and set end_block to contain the * number of blocks on the device minus the specified offset. Return non-zero * otherwise. */ -static __inline__ int +static inline int mdsk_init_io(struct dasd_device *device, unsigned int blocksize, blocknum_t offset, blocknum_t *end_block) { @@ -117,7 +117,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize, /* Remove block I/O environment for device. Return zero on success, non-zero * otherwise. */ -static __inline__ int +static inline int mdsk_term_io(struct dasd_device * device) { struct dasd_diag_private *private; @@ -576,7 +576,7 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, "dump sense not available for DIAG data"); } -struct dasd_discipline dasd_diag_discipline = { +static struct dasd_discipline dasd_diag_discipline = { .owner = THIS_MODULE, .name = "DIAG", .ebcname = "DIAG", diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index fdaa471e845..cecab2274a6 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -134,44 +134,7 @@ ceil_quot(unsigned int d1, unsigned int d2) return (d1 + (d2 - 1)) / d2; } -static inline int -bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl) -{ - unsigned int fl1, fl2, int1, int2; - int bpr; - - switch (rdc->formula) { - case 0x01: - fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc)); - fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0, - ECKD_F1(rdc)); - bpr = fl1 + fl2; - break; - case 0x02: - int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1); - int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1); - fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl + - ECKD_F6(rdc) + ECKD_F4(rdc) * int1, - ECKD_F1(rdc)); - fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl + - ECKD_F6(rdc) + ECKD_F4(rdc) * int2, - ECKD_F1(rdc)); - bpr = fl1 + fl2; - break; - default: - bpr = 0; - break; - } - return bpr; -} - -static inline unsigned int -bytes_per_track(struct dasd_eckd_characteristics *rdc) -{ - return *(unsigned int *) (rdc->byte_per_track) >> 8; -} - -static inline unsigned int +static unsigned int recs_per_track(struct dasd_eckd_characteristics * rdc, unsigned int kl, unsigned int dl) { @@ -204,37 +167,39 @@ recs_per_track(struct dasd_eckd_characteristics * rdc, return 0; } -static inline void +static int check_XRC (struct ccw1 *de_ccw, struct DE_eckd_data *data, struct dasd_device *device) { struct dasd_eckd_private *private; + int rc; private = (struct dasd_eckd_private *) device->private; + if (!private->rdc_data.facilities.XRC_supported) + return 0; /* switch on System Time Stamp - needed for XRC Support */ - if (private->rdc_data.facilities.XRC_supported) { - - data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ - data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ - - data->ep_sys_time = get_clock (); - - de_ccw->count = sizeof (struct DE_eckd_data); - de_ccw->flags |= CCW_FLAG_SLI; - } + data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ + data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ - return; + rc = get_sync_clock(&data->ep_sys_time); + /* Ignore return code if sync clock is switched off. */ + if (rc == -ENOSYS || rc == -EACCES) + rc = 0; -} /* end check_XRC */ + de_ccw->count = sizeof (struct DE_eckd_data); + de_ccw->flags |= CCW_FLAG_SLI; + return rc; +} -static inline void +static int define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, int totrk, int cmd, struct dasd_device * device) { struct dasd_eckd_private *private; struct ch_t geo, beg, end; + int rc = 0; private = (struct dasd_eckd_private *) device->private; @@ -263,12 +228,12 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, case DASD_ECKD_CCW_WRITE_KD_MT: data->mask.perm = 0x02; data->attributes.operation = private->attrib.operation; - check_XRC (ccw, data, device); + rc = check_XRC (ccw, data, device); break; case DASD_ECKD_CCW_WRITE_CKD: case DASD_ECKD_CCW_WRITE_CKD_MT: data->attributes.operation = DASD_BYPASS_CACHE; - check_XRC (ccw, data, device); + rc = check_XRC (ccw, data, device); break; case DASD_ECKD_CCW_ERASE: case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: @@ -276,7 +241,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, data->mask.perm = 0x3; data->mask.auth = 0x1; data->attributes.operation = DASD_BYPASS_CACHE; - check_XRC (ccw, data, device); + rc = check_XRC (ccw, data, device); break; default: DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); @@ -312,9 +277,10 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, data->beg_ext.head = beg.head; data->end_ext.cyl = end.cyl; data->end_ext.head = end.head; + return rc; } -static inline void +static void locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, int rec_on_trk, int no_rec, int cmd, struct dasd_device * device, int reclen) @@ -548,7 +514,7 @@ dasd_eckd_read_conf(struct dasd_device *device) /* * Build CP for Perform Subsystem Function - SSC. */ -struct dasd_ccw_req * +static struct dasd_ccw_req * dasd_eckd_build_psf_ssc(struct dasd_device *device) { struct dasd_ccw_req *cqr; @@ -1200,7 +1166,12 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) return cqr; ccw = cqr->cpaddr; /* First ccw is define extent. */ - define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device); + if (define_extent(ccw++, cqr->data, first_trk, + last_trk, cmd, device) == -EAGAIN) { + /* Clock not in sync and XRC is enabled. Try again later. */ + dasd_sfree_request(cqr, device); + return ERR_PTR(-EAGAIN); + } /* Build locate_record+read/write/ccws. */ idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); LO_data = (struct LO_eckd_data *) (idaws + cidaw); @@ -1380,7 +1351,7 @@ dasd_eckd_release(struct dasd_device *device) cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); - cqr->retries = 0; + cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; @@ -1420,7 +1391,7 @@ dasd_eckd_reserve(struct dasd_device *device) cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); - cqr->retries = 0; + cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; @@ -1459,7 +1430,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) cqr->device = device; clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); - cqr->retries = 0; + cqr->retries = 2; /* set retry counter to enable basic ERP */ cqr->expires = 2 * HZ; cqr->buildclk = get_clock(); cqr->status = DASD_CQR_FILLED; @@ -1609,7 +1580,7 @@ dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp) * Dump the range of CCWs into 'page' buffer * and return number of printed chars. */ -static inline int +static int dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) { int len, count; diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index e0bf30ebb21..6cedc914077 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -658,18 +658,24 @@ static struct file_operations dasd_eer_fops = { .owner = THIS_MODULE, }; -static struct miscdevice dasd_eer_dev = { - .minor = MISC_DYNAMIC_MINOR, - .name = "dasd_eer", - .fops = &dasd_eer_fops, -}; +static struct miscdevice *dasd_eer_dev = NULL; int __init dasd_eer_init(void) { int rc; - rc = misc_register(&dasd_eer_dev); + dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL); + if (!dasd_eer_dev) + return -ENOMEM; + + dasd_eer_dev->minor = MISC_DYNAMIC_MINOR; + dasd_eer_dev->name = "dasd_eer"; + dasd_eer_dev->fops = &dasd_eer_fops; + + rc = misc_register(dasd_eer_dev); if (rc) { + kfree(dasd_eer_dev); + dasd_eer_dev = NULL; MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " "register misc device"); return rc; @@ -680,5 +686,9 @@ int __init dasd_eer_init(void) void dasd_eer_exit(void) { - WARN_ON(misc_deregister(&dasd_eer_dev) != 0); + if (dasd_eer_dev) { + WARN_ON(misc_deregister(dasd_eer_dev) != 0); + kfree(dasd_eer_dev); + dasd_eer_dev = NULL; + } } diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index 58a65097922..caa5d91420f 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c @@ -152,25 +152,6 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr) } /* end default_erp_postaction */ -/* - * Print the hex dump of the memory used by a request. This includes - * all error recovery ccws that have been chained in from of the - * real request. - */ -static inline void -hex_dump_memory(struct dasd_device *device, void *data, int len) -{ - int *pint; - - pint = (int *) data; - while (len > 0) { - DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x", - pint, pint[0], pint[1], pint[2], pint[3]); - pint += 4; - len -= 16; - } -} - void dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) { @@ -182,69 +163,8 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) device->discipline->dump_sense(device, cqr, irb); } -void -dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa) -{ - struct dasd_device *device; - struct dasd_ccw_req *lcqr; - struct ccw1 *ccw; - int cplength; - - device = cqr->device; - /* log the channel program */ - for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) { - DEV_MESSAGE(KERN_ERR, device, - "(%s) ERP chain report for req: %p", - caller == 0 ? "EXAMINE" : "ACTION", lcqr); - hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req)); - - cplength = 1; - ccw = lcqr->cpaddr; - while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC)) - cplength++; - - if (cplength > 40) { /* log only parts of the CP */ - DEV_MESSAGE(KERN_ERR, device, "%s", - "Start of channel program:"); - hex_dump_memory(device, lcqr->cpaddr, - 40*sizeof(struct ccw1)); - - DEV_MESSAGE(KERN_ERR, device, "%s", - "End of channel program:"); - hex_dump_memory(device, lcqr->cpaddr + cplength - 10, - 10*sizeof(struct ccw1)); - } else { /* log the whole CP */ - DEV_MESSAGE(KERN_ERR, device, "%s", - "Channel program (complete):"); - hex_dump_memory(device, lcqr->cpaddr, - cplength*sizeof(struct ccw1)); - } - - if (lcqr != cqr) - continue; - - /* - * Log bytes arround failed CCW but only if we did - * not log the whole CP of the CCW is outside the - * logged CP. - */ - if (cplength > 40 || - ((addr_t) cpa < (addr_t) lcqr->cpaddr && - (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) { - - DEV_MESSAGE(KERN_ERR, device, - "Failed CCW (%p) (area):", - (void *) (long) cpa); - hex_dump_memory(device, cqr->cpaddr - 10, - 20*sizeof(struct ccw1)); - } - } - -} /* end log_erp_chain */ - EXPORT_SYMBOL(dasd_default_erp_action); EXPORT_SYMBOL(dasd_default_erp_postaction); EXPORT_SYMBOL(dasd_alloc_erp_request); EXPORT_SYMBOL(dasd_free_erp_request); EXPORT_SYMBOL(dasd_log_sense); -EXPORT_SYMBOL(dasd_log_ccw); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index b857fd5893f..be0909e3922 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -75,7 +75,7 @@ static struct ccw_driver dasd_fba_driver = { .notify = dasd_generic_notify, }; -static inline void +static void define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, int blksize, int beg, int nr) { @@ -95,7 +95,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, data->ext_end = nr - 1; } -static inline void +static void locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, int block_nr, int block_ct) { diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index d163632101d..47ba4462708 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -147,7 +147,7 @@ dasd_destroy_partitions(struct dasd_device * device) */ memset(&bpart, 0, sizeof(struct blkpg_partition)); memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); - barg.data = (void __user *) &bpart; + barg.data = (void __force __user *) &bpart; barg.op = BLKPG_DEL_PARTITION; for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index fb725e3b08f..a2cc69e1141 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -559,7 +559,6 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int, struct dasd_device *); void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); void dasd_log_sense(struct dasd_ccw_req *, struct irb *); -void dasd_log_ccw(struct dasd_ccw_req *, int, __u32); /* externals in dasd_3370_erp.c */ dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index bfa010f6dab..8b7e11815d7 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -28,7 +28,7 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL; static struct proc_dir_entry *dasd_devices_entry = NULL; static struct proc_dir_entry *dasd_statistics_entry = NULL; -static inline char * +static char * dasd_get_user_string(const char __user *user_buf, size_t user_len) { char *buffer; @@ -154,7 +154,7 @@ static struct file_operations dasd_devices_file_ops = { .release = seq_release, }; -static inline int +static int dasd_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len) { @@ -167,8 +167,8 @@ dasd_calc_metrics(char *page, char **start, off_t off, return len; } -static inline char * -dasd_statistics_array(char *str, int *array, int shift) +static char * +dasd_statistics_array(char *str, unsigned int *array, int shift) { int i; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index be9b05347b4..1340451ea40 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -102,7 +102,7 @@ dcssblk_release_segment(struct device *dev) * device needs to be enqueued before the semaphore is * freed. */ -static inline int +static int dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) { int minor, found; @@ -230,7 +230,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch SEGMENT_SHARED); if (rc < 0) { BUG_ON(rc == -EINVAL); - if (rc == -EIO || rc == -ENOENT) + if (rc != -EAGAIN) goto removeseg; } else { dev_info->is_shared = 1; @@ -253,7 +253,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch SEGMENT_EXCLUSIVE); if (rc < 0) { BUG_ON(rc == -EINVAL); - if (rc == -EIO || rc == -ENOENT) + if (rc != -EAGAIN) goto removeseg; } else { dev_info->is_shared = 0; diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index c3e97b4fc18..293e667b50f 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -2,7 +2,8 @@ # S/390 character devices # -obj-y += ctrlchar.o keyboard.o defkeymap.o +obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ + sclp_info.o obj-$(CONFIG_TN3270) += raw3270.o obj-$(CONFIG_TN3270_CONSOLE) += con3270.o @@ -11,7 +12,6 @@ obj-$(CONFIG_TN3270_FS) += fs3270.o obj-$(CONFIG_TN3215) += con3215.o -obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o obj-$(CONFIG_SCLP_TTY) += sclp_tty.o obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 25b5d7a6641..9a328f14a64 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c @@ -1121,7 +1121,7 @@ static const struct tty_operations tty3215_ops = { * 3215 tty registration code called from tty_init(). * Most kernel services (incl. kmalloc) are available at this poimt. */ -int __init +static int __init tty3215_init(void) { struct tty_driver *driver; diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 7566be89068..8e7f2d7633d 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -69,8 +69,7 @@ static void con3270_update(struct con3270 *); /* * Setup timeout for a device. On timeout trigger an update. */ -void -con3270_set_timer(struct con3270 *cp, int expires) +static void con3270_set_timer(struct con3270 *cp, int expires) { if (expires == 0) { if (timer_pending(&cp->timer)) diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c index 17027d918cf..564baca01b7 100644 --- a/drivers/s390/char/defkeymap.c +++ b/drivers/s390/char/defkeymap.c @@ -5,6 +5,8 @@ #include <linux/types.h> #include <linux/keyboard.h> #include <linux/kd.h> +#include <linux/kbd_kern.h> +#include <linux/kbd_diacr.h> u_short plain_map[NR_KEYS] = { 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 0893d306ae8..e1a746269c4 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -23,7 +23,7 @@ #include "raw3270.h" #include "ctrlchar.h" -struct raw3270_fn fs3270_fn; +static struct raw3270_fn fs3270_fn; struct fs3270 { struct raw3270_view view; @@ -401,7 +401,7 @@ fs3270_release(struct raw3270_view *view) } /* View to a 3270 device. Can be console, tty or fullscreen. */ -struct raw3270_fn fs3270_fn = { +static struct raw3270_fn fs3270_fn = { .activate = fs3270_activate, .deactivate = fs3270_deactivate, .intv = (void *) fs3270_irq, diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 3e86fd1756e..f62f9a4e895 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -148,6 +148,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc) } } +#if 0 /* * Generate ebcdic -> ascii translation table from kbd_data. */ @@ -173,6 +174,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc) } } } +#endif /* * We have a combining character DIACR here, followed by the character CH. diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index cdb24f52811..9e451acc649 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -67,8 +67,8 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) return -EINVAL; } -static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, - struct monwrite_hdr *monhdr) +static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, + struct monwrite_hdr *monhdr) { struct mon_buf *entry, *next; diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 7a84014f203..8facd14adb7 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -29,7 +29,7 @@ #include <linux/device.h> #include <linux/mutex.h> -struct class *class3270; +static struct class *class3270; /* The main 3270 data structure. */ struct raw3270 { @@ -86,7 +86,7 @@ DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue); /* * Encode array for 12 bit 3270 addresses. */ -unsigned char raw3270_ebcgraf[64] = { +static unsigned char raw3270_ebcgraf[64] = { 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 8a056df09d6..f171de3b0b1 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t { /* Internal state: is a request active at the sclp? */ static volatile enum sclp_running_state_t { sclp_running_state_idle, - sclp_running_state_running + sclp_running_state_running, + sclp_running_state_reset_pending } sclp_running_state = sclp_running_state_idle; /* Internal state: is a read request pending? */ @@ -88,15 +89,15 @@ static volatile enum sclp_mask_state_t { /* Timeout intervals in seconds.*/ #define SCLP_BUSY_INTERVAL 10 -#define SCLP_RETRY_INTERVAL 15 +#define SCLP_RETRY_INTERVAL 30 static void sclp_process_queue(void); static int sclp_init_mask(int calculate); static int sclp_init(void); /* Perform service call. Return 0 on success, non-zero otherwise. */ -static int -service_call(sclp_cmdw_t command, void *sccb) +int +sclp_service_call(sclp_cmdw_t command, void *sccb) { int cc; @@ -113,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb) return 0; } -/* Request timeout handler. Restart the request queue. If DATA is non-zero, - * force restart of running request. */ +static inline void __sclp_make_read_req(void); + static void -sclp_request_timeout(unsigned long data) +__sclp_queue_read_req(void) { - unsigned long flags; - - if (data) { - spin_lock_irqsave(&sclp_lock, flags); - sclp_running_state = sclp_running_state_idle; - spin_unlock_irqrestore(&sclp_lock, flags); + if (sclp_reading_state == sclp_reading_state_idle) { + sclp_reading_state = sclp_reading_state_reading; + __sclp_make_read_req(); + /* Add request to head of queue */ + list_add(&sclp_read_req.list, &sclp_req_queue); } - sclp_process_queue(); } /* Set up request retry timer. Called while sclp_lock is locked. */ @@ -140,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), add_timer(&sclp_request_timer); } +/* Request timeout handler. Restart the request queue. If DATA is non-zero, + * force restart of running request. */ +static void +sclp_request_timeout(unsigned long data) +{ + unsigned long flags; + + spin_lock_irqsave(&sclp_lock, flags); + if (data) { + if (sclp_running_state == sclp_running_state_running) { + /* Break running state and queue NOP read event request + * to get a defined interface state. */ + __sclp_queue_read_req(); + sclp_running_state = sclp_running_state_idle; + } + } else { + __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, + sclp_request_timeout, 0); + } + spin_unlock_irqrestore(&sclp_lock, flags); + sclp_process_queue(); +} + /* Try to start a request. Return zero if the request was successfully * started or if it will be started at a later time. Return non-zero otherwise. * Called while sclp_lock is locked. */ @@ -151,7 +173,7 @@ __sclp_start_request(struct sclp_req *req) if (sclp_running_state != sclp_running_state_idle) return 0; del_timer(&sclp_request_timer); - rc = service_call(req->command, req->sccb); + rc = sclp_service_call(req->command, req->sccb); req->start_count++; if (rc == 0) { @@ -191,7 +213,15 @@ sclp_process_queue(void) rc = __sclp_start_request(req); if (rc == 0) break; - /* Request failed. */ + /* Request failed */ + if (req->start_count > 1) { + /* Cannot abort already submitted request - could still + * be active at the SCLP */ + __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, + sclp_request_timeout, 0); + break; + } + /* Post-processing for aborted request */ list_del(&req->list); if (req->callback) { spin_unlock_irqrestore(&sclp_lock, flags); @@ -221,7 +251,8 @@ sclp_add_request(struct sclp_req *req) list_add_tail(&req->list, &sclp_req_queue); rc = 0; /* Start if request is first in list */ - if (req->list.prev == &sclp_req_queue) { + if (sclp_running_state == sclp_running_state_idle && + req->list.prev == &sclp_req_queue) { rc = __sclp_start_request(req); if (rc) list_del(&req->list); @@ -294,7 +325,7 @@ __sclp_make_read_req(void) sccb = (struct sccb_header *) sclp_read_sccb; clear_page(sccb); memset(&sclp_read_req, 0, sizeof(struct sclp_req)); - sclp_read_req.command = SCLP_CMDW_READDATA; + sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA; sclp_read_req.status = SCLP_REQ_QUEUED; sclp_read_req.start_count = 0; sclp_read_req.callback = sclp_read_cb; @@ -334,6 +365,8 @@ sclp_interrupt_handler(__u16 code) finished_sccb = S390_lowcore.ext_params & 0xfffffff8; evbuf_pending = S390_lowcore.ext_params & 0x3; if (finished_sccb) { + del_timer(&sclp_request_timer); + sclp_running_state = sclp_running_state_reset_pending; req = __sclp_find_req(finished_sccb); if (req) { /* Request post-processing */ @@ -348,13 +381,8 @@ sclp_interrupt_handler(__u16 code) sclp_running_state = sclp_running_state_idle; } if (evbuf_pending && sclp_receive_mask != 0 && - sclp_reading_state == sclp_reading_state_idle && - sclp_activation_state == sclp_activation_state_active ) { - sclp_reading_state = sclp_reading_state_reading; - __sclp_make_read_req(); - /* Add request to head of queue */ - list_add(&sclp_read_req.list, &sclp_req_queue); - } + sclp_activation_state == sclp_activation_state_active) + __sclp_queue_read_req(); spin_unlock(&sclp_lock); sclp_process_queue(); } @@ -374,6 +402,7 @@ sclp_sync_wait(void) unsigned long flags; unsigned long cr0, cr0_sync; u64 timeout; + int irq_context; /* We'll be disabling timer interrupts, so we need a custom timeout * mechanism */ @@ -386,7 +415,9 @@ sclp_sync_wait(void) } local_irq_save(flags); /* Prevent bottom half from executing once we force interrupts open */ - local_bh_disable(); + irq_context = in_interrupt(); + if (!irq_context) + local_bh_disable(); /* Enable service-signal interruption, disable timer interrupts */ trace_hardirqs_on(); __ctl_store(cr0, 0, 0); @@ -402,19 +433,19 @@ sclp_sync_wait(void) get_clock() > timeout && del_timer(&sclp_request_timer)) sclp_request_timer.function(sclp_request_timer.data); - barrier(); cpu_relax(); } local_irq_disable(); __ctl_load(cr0, 0, 0); - _local_bh_enable(); + if (!irq_context) + _local_bh_enable(); local_irq_restore(flags); } EXPORT_SYMBOL(sclp_sync_wait); /* Dispatch changes in send and receive mask to registered listeners. */ -static inline void +static void sclp_dispatch_state_change(void) { struct list_head *l; @@ -597,7 +628,7 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask) sccb = (struct init_sccb *) sclp_init_sccb; clear_page(sccb); memset(&sclp_init_req, 0, sizeof(struct sclp_req)); - sclp_init_req.command = SCLP_CMDW_WRITEMASK; + sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; sclp_init_req.status = SCLP_REQ_FILLED; sclp_init_req.start_count = 0; sclp_init_req.callback = NULL; @@ -800,7 +831,7 @@ sclp_check_interface(void) for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { __sclp_make_init_req(0, 0); sccb = (struct init_sccb *) sclp_init_req.sccb; - rc = service_call(sclp_init_req.command, sccb); + rc = sclp_service_call(sclp_init_req.command, sccb); if (rc == -EIO) break; sclp_init_req.status = SCLP_REQ_RUNNING; diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 2c71d6ee7b5..7d29ab45a6e 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -12,7 +12,7 @@ #include <linux/types.h> #include <linux/list.h> - +#include <asm/sclp.h> #include <asm/ebcdic.h> /* maximum number of pages concerning our own memory management */ @@ -49,9 +49,11 @@ typedef unsigned int sclp_cmdw_t; -#define SCLP_CMDW_READDATA 0x00770005 -#define SCLP_CMDW_WRITEDATA 0x00760005 -#define SCLP_CMDW_WRITEMASK 0x00780005 +#define SCLP_CMDW_READ_EVENT_DATA 0x00770005 +#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 +#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 +#define SCLP_CMDW_READ_SCP_INFO 0x00020001 +#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 #define GDS_ID_MDSMU 0x1310 #define GDS_ID_MDSRouteInfo 0x1311 @@ -66,13 +68,6 @@ typedef unsigned int sclp_cmdw_t; typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ -struct sccb_header { - u16 length; - u8 function_code; - u8 control_mask[3]; - u16 response_code; -} __attribute__((packed)); - struct gds_subvector { u8 length; u8 key; @@ -131,6 +126,7 @@ void sclp_unregister(struct sclp_register *reg); int sclp_remove_processed(struct sccb_header *sccb); int sclp_deactivate(void); int sclp_reactivate(void); +int sclp_service_call(sclp_cmdw_t command, void *sccb); /* useful inlines */ diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index 86864f64171..ead1043d788 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -66,7 +66,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc) } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); } -static inline void +static void sclp_conbuf_emit(void) { struct sclp_buffer* buffer; diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c index 4f873ae148b..65aa2c85737 100644 --- a/drivers/s390/char/sclp_cpi.c +++ b/drivers/s390/char/sclp_cpi.c @@ -169,7 +169,7 @@ cpi_prepare_req(void) } /* prepare request data structure presented to SCLP driver */ - req->command = SCLP_CMDW_WRITEDATA; + req->command = SCLP_CMDW_WRITE_EVENT_DATA; req->sccb = sccb; req->status = SCLP_REQ_FILLED; req->callback = cpi_callback; diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c new file mode 100644 index 00000000000..7bcbe643b08 --- /dev/null +++ b/drivers/s390/char/sclp_info.c @@ -0,0 +1,57 @@ +/* + * drivers/s390/char/sclp_info.c + * + * Copyright IBM Corp. 2007 + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> + */ + +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <asm/sclp.h> +#include "sclp.h" + +struct sclp_readinfo_sccb s390_readinfo_sccb; + +void __init sclp_readinfo_early(void) +{ + sclp_cmdw_t command; + struct sccb_header *sccb; + int ret; + + __ctl_set_bit(0, 9); /* enable service signal subclass mask */ + + sccb = &s390_readinfo_sccb.header; + command = SCLP_CMDW_READ_SCP_INFO_FORCED; + while (1) { + u16 response; + + memset(&s390_readinfo_sccb, 0, sizeof(s390_readinfo_sccb)); + sccb->length = sizeof(s390_readinfo_sccb); + sccb->control_mask[2] = 0x80; + + ret = sclp_service_call(command, &s390_readinfo_sccb); + + if (ret == -EIO) + goto out; + if (ret == -EBUSY) + continue; + + __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | + PSW_MASK_WAIT | PSW_DEFAULT_KEY); + local_irq_disable(); + barrier(); + + response = sccb->response_code; + + if (response == 0x10) + break; + + if (response != 0x1f0 || command == SCLP_CMDW_READ_SCP_INFO) + break; + + command = SCLP_CMDW_READ_SCP_INFO; + } +out: + __ctl_clear_bit(0, 9); /* disable service signal subclass mask */ +} diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 0c92d3909cc..2486783ea58 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c @@ -460,7 +460,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer, sccb->msg_buf.header.type = EvTyp_PMsgCmd; else return -ENOSYS; - buffer->request.command = SCLP_CMDW_WRITEDATA; + buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; buffer->request.status = SCLP_REQ_FILLED; buffer->request.callback = sclp_writedata_callback; buffer->request.callback_data = buffer; diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 2d173e5c8a0..90536f60bf5 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -721,7 +721,7 @@ static const struct tty_operations sclp_ops = { .ioctl = sclp_tty_ioctl, }; -int __init +static int __init sclp_tty_init(void) { struct tty_driver *driver; diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 723bf4191bf..544f137d70d 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -207,7 +207,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request) request->sclp_req.status = SCLP_REQ_FAILED; return -EIO; } - request->sclp_req.command = SCLP_CMDW_WRITEDATA; + request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA; request->sclp_req.status = SCLP_REQ_FILLED; request->sclp_req.callback = sclp_vt220_callback; request->sclp_req.callback_data = (void *) request; @@ -669,7 +669,7 @@ static const struct tty_operations sclp_vt220_ops = { /* * Register driver with SCLP and Linux and initialize internal tty structures. */ -int __init +static int __init sclp_vt220_tty_init(void) { struct tty_driver *driver; diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index c9f1c4c8bb1..bb4ff537729 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h @@ -3,7 +3,7 @@ * tape device driver for 3480/3490E/3590 tapes. * * S390 and zSeries version - * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2001,2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -99,7 +99,11 @@ enum tape_op { TO_DIS, /* Tape display */ TO_ASSIGN, /* Assign tape to channel path */ TO_UNASSIGN, /* Unassign tape from channel path */ - TO_SIZE /* #entries in tape_op_t */ + TO_CRYPT_ON, /* Enable encrpytion */ + TO_CRYPT_OFF, /* Disable encrpytion */ + TO_KEKL_SET, /* Set KEK label */ + TO_KEKL_QUERY, /* Query KEK label */ + TO_SIZE, /* #entries in tape_op_t */ }; /* Forward declaration */ @@ -112,6 +116,7 @@ enum tape_request_status { TAPE_REQUEST_IN_IO, /* request is currently in IO */ TAPE_REQUEST_DONE, /* request is completed. */ TAPE_REQUEST_CANCEL, /* request should be canceled. */ + TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */ }; /* Tape CCW request */ @@ -164,10 +169,11 @@ struct tape_discipline { * The discipline irq function either returns an error code (<0) which * means that the request has failed with an error or one of the following: */ -#define TAPE_IO_SUCCESS 0 /* request successful */ -#define TAPE_IO_PENDING 1 /* request still running */ -#define TAPE_IO_RETRY 2 /* retry to current request */ -#define TAPE_IO_STOP 3 /* stop the running request */ +#define TAPE_IO_SUCCESS 0 /* request successful */ +#define TAPE_IO_PENDING 1 /* request still running */ +#define TAPE_IO_RETRY 2 /* retry to current request */ +#define TAPE_IO_STOP 3 /* stop the running request */ +#define TAPE_IO_LONG_BUSY 4 /* delay the running request */ /* Char Frontend Data */ struct tape_char_data { @@ -242,6 +248,10 @@ struct tape_device { /* Function to start or stop the next request later. */ struct delayed_work tape_dnr; + + /* Timer for long busy */ + struct timer_list lb_timeout; + }; /* Externals from tape_core.c */ diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 9df912f6318..50f5edab83d 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c @@ -2,7 +2,7 @@ * drivers/s390/char/tape_3590.c * tape device discipline for 3590 tapes. * - * Copyright (C) IBM Corp. 2001,2006 + * Copyright IBM Corp. 2001,2006 * Author(s): Stefan Bader <shbader@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> +#include <asm/ebcdic.h> #define TAPE_DBF_AREA tape_3590_dbf @@ -30,7 +31,7 @@ EXPORT_SYMBOL(TAPE_DBF_AREA); * - Read Device (buffered) log: BRA * - Read Library log: BRA * - Swap Devices: BRA - * - Long Busy: BRA + * - Long Busy: implemented * - Special Intercept: BRA * - Read Alternate: implemented *******************************************************************/ @@ -94,6 +95,332 @@ static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { [0xae] = "Subsystem environmental alert", }; +static int crypt_supported(struct tape_device *device) +{ + return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device)); +} + +static int crypt_enabled(struct tape_device *device) +{ + return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device)); +} + +static void ext_to_int_kekl(struct tape390_kekl *in, + struct tape3592_kekl *out) +{ + int i; + + memset(out, 0, sizeof(*out)); + if (in->type == TAPE390_KEKL_TYPE_HASH) + out->flags |= 0x40; + if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH) + out->flags |= 0x80; + strncpy(out->label, in->label, 64); + for (i = strlen(in->label); i < sizeof(out->label); i++) + out->label[i] = ' '; + ASCEBC(out->label, sizeof(out->label)); +} + +static void int_to_ext_kekl(struct tape3592_kekl *in, + struct tape390_kekl *out) +{ + memset(out, 0, sizeof(*out)); + if(in->flags & 0x40) + out->type = TAPE390_KEKL_TYPE_HASH; + else + out->type = TAPE390_KEKL_TYPE_LABEL; + if(in->flags & 0x80) + out->type_on_tape = TAPE390_KEKL_TYPE_HASH; + else + out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; + memcpy(out->label, in->label, sizeof(in->label)); + EBCASC(out->label, sizeof(in->label)); + strstrip(out->label); +} + +static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in, + struct tape390_kekl_pair *out) +{ + if (in->count == 0) { + out->kekl[0].type = TAPE390_KEKL_TYPE_NONE; + out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE; + out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; + out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; + } else if (in->count == 1) { + int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); + out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; + out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; + } else if (in->count == 2) { + int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); + int_to_ext_kekl(&in->kekl[1], &out->kekl[1]); + } else { + printk("Invalid KEKL number: %d\n", in->count); + BUG(); + } +} + +static int check_ext_kekl(struct tape390_kekl *kekl) +{ + if (kekl->type == TAPE390_KEKL_TYPE_NONE) + goto invalid; + if (kekl->type > TAPE390_KEKL_TYPE_HASH) + goto invalid; + if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE) + goto invalid; + if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH) + goto invalid; + if ((kekl->type == TAPE390_KEKL_TYPE_HASH) && + (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL)) + goto invalid; + + return 0; +invalid: + return -EINVAL; +} + +static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls) +{ + if (check_ext_kekl(&kekls->kekl[0])) + goto invalid; + if (check_ext_kekl(&kekls->kekl[1])) + goto invalid; + + return 0; +invalid: + return -EINVAL; +} + +/* + * Query KEKLs + */ +static int tape_3592_kekl_query(struct tape_device *device, + struct tape390_kekl_pair *ext_kekls) +{ + struct tape_request *request; + struct tape3592_kekl_query_order *order; + struct tape3592_kekl_query_data *int_kekls; + int rc; + + DBF_EVENT(6, "tape3592_kekl_query\n"); + int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA); + if (!int_kekls) + return -ENOMEM; + request = tape_alloc_request(2, sizeof(*order)); + if (IS_ERR(request)) { + rc = PTR_ERR(request); + goto fail_malloc; + } + order = request->cpdata; + memset(order,0,sizeof(*order)); + order->code = 0xe2; + order->max_count = 2; + request->op = TO_KEKL_QUERY; + tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); + tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls), + int_kekls); + rc = tape_do_io(device, request); + if (rc) + goto fail_request; + int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls); + + rc = 0; +fail_request: + tape_free_request(request); +fail_malloc: + kfree(int_kekls); + return rc; +} + +/* + * IOCTL: Query KEKLs + */ +static int tape_3592_ioctl_kekl_query(struct tape_device *device, + unsigned long arg) +{ + int rc; + struct tape390_kekl_pair *ext_kekls; + + DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n"); + if (!crypt_supported(device)) + return -ENOSYS; + if (!crypt_enabled(device)) + return -EUNATCH; + ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); + if (!ext_kekls) + return -ENOMEM; + rc = tape_3592_kekl_query(device, ext_kekls); + if (rc != 0) + goto fail; + if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) { + rc = -EFAULT; + goto fail; + } + rc = 0; +fail: + kfree(ext_kekls); + return rc; +} + +static int tape_3590_mttell(struct tape_device *device, int mt_count); + +/* + * Set KEKLs + */ +static int tape_3592_kekl_set(struct tape_device *device, + struct tape390_kekl_pair *ext_kekls) +{ + struct tape_request *request; + struct tape3592_kekl_set_order *order; + + DBF_EVENT(6, "tape3592_kekl_set\n"); + if (check_ext_kekl_pair(ext_kekls)) { + DBF_EVENT(6, "invalid kekls\n"); + return -EINVAL; + } + if (tape_3590_mttell(device, 0) != 0) + return -EBADSLT; + request = tape_alloc_request(1, sizeof(*order)); + if (IS_ERR(request)) + return PTR_ERR(request); + order = request->cpdata; + memset(order, 0, sizeof(*order)); + order->code = 0xe3; + order->kekls.count = 2; + ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]); + ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]); + request->op = TO_KEKL_SET; + tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); + + return tape_do_io_free(device, request); +} + +/* + * IOCTL: Set KEKLs + */ +static int tape_3592_ioctl_kekl_set(struct tape_device *device, + unsigned long arg) +{ + int rc; + struct tape390_kekl_pair *ext_kekls; + + DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n"); + if (!crypt_supported(device)) + return -ENOSYS; + if (!crypt_enabled(device)) + return -EUNATCH; + ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); + if (!ext_kekls) + return -ENOMEM; + if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) { + rc = -EFAULT; + goto out; + } + rc = tape_3592_kekl_set(device, ext_kekls); +out: + kfree(ext_kekls); + return rc; +} + +/* + * Enable encryption + */ +static int tape_3592_enable_crypt(struct tape_device *device) +{ + struct tape_request *request; + char *data; + + DBF_EVENT(6, "tape_3592_enable_crypt\n"); + if (!crypt_supported(device)) + return -ENOSYS; + request = tape_alloc_request(2, 72); + if (IS_ERR(request)) + return PTR_ERR(request); + data = request->cpdata; + memset(data,0,72); + + data[0] = 0x05; + data[36 + 0] = 0x03; + data[36 + 1] = 0x03; + data[36 + 4] = 0x40; + data[36 + 6] = 0x01; + data[36 + 14] = 0x2f; + data[36 + 18] = 0xc3; + data[36 + 35] = 0x72; + request->op = TO_CRYPT_ON; + tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); + tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); + return tape_do_io_free(device, request); +} + +/* + * Disable encryption + */ +static int tape_3592_disable_crypt(struct tape_device *device) +{ + struct tape_request *request; + char *data; + + DBF_EVENT(6, "tape_3592_disable_crypt\n"); + if (!crypt_supported(device)) + return -ENOSYS; + request = tape_alloc_request(2, 72); + if (IS_ERR(request)) + return PTR_ERR(request); + data = request->cpdata; + memset(data,0,72); + + data[0] = 0x05; + data[36 + 0] = 0x03; + data[36 + 1] = 0x03; + data[36 + 35] = 0x32; + + request->op = TO_CRYPT_OFF; + tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); + tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); + + return tape_do_io_free(device, request); +} + +/* + * IOCTL: Set encryption status + */ +static int tape_3592_ioctl_crypt_set(struct tape_device *device, + unsigned long arg) +{ + struct tape390_crypt_info info; + + DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n"); + if (!crypt_supported(device)) + return -ENOSYS; + if (copy_from_user(&info, (char __user *)arg, sizeof(info))) + return -EFAULT; + if (info.status & ~TAPE390_CRYPT_ON_MASK) + return -EINVAL; + if (info.status & TAPE390_CRYPT_ON_MASK) + return tape_3592_enable_crypt(device); + else + return tape_3592_disable_crypt(device); +} + +static int tape_3590_sense_medium(struct tape_device *device); + +/* + * IOCTL: Query enryption status + */ +static int tape_3592_ioctl_crypt_query(struct tape_device *device, + unsigned long arg) +{ + DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n"); + if (!crypt_supported(device)) + return -ENOSYS; + tape_3590_sense_medium(device); + if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device), + sizeof(TAPE_3590_CRYPT_INFO(device)))) + return -EFAULT; + else + return 0; +} + /* * 3590 IOCTL Overload */ @@ -109,6 +436,14 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) return tape_std_display(device, &disp); } + case TAPE390_KEKL_SET: + return tape_3592_ioctl_kekl_set(device, arg); + case TAPE390_KEKL_QUERY: + return tape_3592_ioctl_kekl_query(device, arg); + case TAPE390_CRYPT_SET: + return tape_3592_ioctl_crypt_set(device, arg); + case TAPE390_CRYPT_QUERY: + return tape_3592_ioctl_crypt_query(device, arg); default: return -EINVAL; /* no additional ioctls */ } @@ -248,6 +583,12 @@ tape_3590_work_handler(struct work_struct *work) case TO_READ_ATTMSG: tape_3590_read_attmsg(p->device); break; + case TO_CRYPT_ON: + tape_3592_enable_crypt(p->device); + break; + case TO_CRYPT_OFF: + tape_3592_disable_crypt(p->device); + break; default: DBF_EVENT(3, "T3590: work handler undefined for " "operation 0x%02x\n", p->op); @@ -365,6 +706,33 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request) } #endif +static void tape_3590_med_state_set(struct tape_device *device, + struct tape_3590_med_sense *sense) +{ + struct tape390_crypt_info *c_info; + + c_info = &TAPE_3590_CRYPT_INFO(device); + + if (sense->masst == MSENSE_UNASSOCIATED) { + tape_med_state_set(device, MS_UNLOADED); + TAPE_3590_CRYPT_INFO(device).medium_status = 0; + return; + } + if (sense->masst != MSENSE_ASSOCIATED_MOUNT) { + PRINT_ERR("Unknown medium state: %x\n", sense->masst); + return; + } + tape_med_state_set(device, MS_LOADED); + c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK; + if (sense->flags & MSENSE_CRYPT_MASK) { + PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags); + c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK; + } else { + DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags); + c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK; + } +} + /* * The done handler is called at device/channel end and wakes up the sleeping * process @@ -372,9 +740,10 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request) static int tape_3590_done(struct tape_device *device, struct tape_request *request) { - struct tape_3590_med_sense *sense; + struct tape_3590_disc_data *disc_data; DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); + disc_data = device->discdata; switch (request->op) { case TO_BSB: @@ -394,13 +763,20 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) break; case TO_RUN: tape_med_state_set(device, MS_UNLOADED); + tape_3590_schedule_work(device, TO_CRYPT_OFF); break; case TO_MSEN: - sense = (struct tape_3590_med_sense *) request->cpdata; - if (sense->masst == MSENSE_UNASSOCIATED) - tape_med_state_set(device, MS_UNLOADED); - if (sense->masst == MSENSE_ASSOCIATED_MOUNT) - tape_med_state_set(device, MS_LOADED); + tape_3590_med_state_set(device, request->cpdata); + break; + case TO_CRYPT_ON: + TAPE_3590_CRYPT_INFO(device).status + |= TAPE390_CRYPT_ON_MASK; + *(device->modeset_byte) |= 0x03; + break; + case TO_CRYPT_OFF: + TAPE_3590_CRYPT_INFO(device).status + &= ~TAPE390_CRYPT_ON_MASK; + *(device->modeset_byte) &= ~0x03; break; case TO_RBI: /* RBI seems to succeed even without medium loaded. */ case TO_NOP: /* Same to NOP. */ @@ -409,8 +785,9 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) case TO_DIS: case TO_ASSIGN: case TO_UNASSIGN: - break; case TO_SIZE: + case TO_KEKL_SET: + case TO_KEKL_QUERY: break; } return TAPE_IO_SUCCESS; @@ -540,10 +917,8 @@ static int tape_3590_erp_long_busy(struct tape_device *device, struct tape_request *request, struct irb *irb) { - /* FIXME: how about WAITING for a minute ? */ - PRINT_WARN("(%s): Device is busy! Please wait a minute!\n", - device->cdev->dev.bus_id); - return tape_3590_erp_basic(device, request, irb, -EBUSY); + DBF_EVENT(6, "Device is busy\n"); + return TAPE_IO_LONG_BUSY; } /* @@ -951,6 +1326,34 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb) device->cdev->dev.bus_id, sense->mc); } +static int tape_3590_crypt_error(struct tape_device *device, + struct tape_request *request, struct irb *irb) +{ + u8 cu_rc, ekm_rc1; + u16 ekm_rc2; + u32 drv_rc; + char *bus_id, *sense; + + sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data; + bus_id = device->cdev->dev.bus_id; + cu_rc = sense[0]; + drv_rc = *((u32*) &sense[5]) & 0xffffff; + ekm_rc1 = sense[9]; + ekm_rc2 = *((u16*) &sense[10]); + if ((cu_rc == 0) && (ekm_rc2 == 0xee31)) + /* key not defined on EKM */ + return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED); + if ((cu_rc == 1) || (cu_rc == 2)) + /* No connection to EKM */ + return tape_3590_erp_basic(device, request, irb, -ENOTCONN); + + PRINT_ERR("(%s): Unable to get encryption key from EKM\n", bus_id); + PRINT_ERR("(%s): CU=%02X DRIVE=%06X EKM=%02X:%04X\n", bus_id, cu_rc, + drv_rc, ekm_rc1, ekm_rc2); + + return tape_3590_erp_basic(device, request, irb, -ENOKEY); +} + /* * 3590 error Recovery routine: * If possible, it tries to recover from the error. If this is not possible, @@ -979,6 +1382,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, sense = (struct tape_3590_sense *) irb->ecw; + DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc); + /* * First check all RC-QRCs where we want to do something special * - "break": basic error recovery is done @@ -999,6 +1404,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, case 0x2231: tape_3590_print_era_msg(device, irb); return tape_3590_erp_special_interrupt(device, request, irb); + case 0x2240: + return tape_3590_crypt_error(device, request, irb); case 0x3010: DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", @@ -1020,6 +1427,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, DBF_EVENT(2, "(%08x): Rewind Unload complete\n", device->cdev_id); tape_med_state_set(device, MS_UNLOADED); + tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, 0); case 0x4010: @@ -1030,9 +1438,15 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, PRINT_WARN("(%s): Tape operation when medium not loaded\n", device->cdev->dev.bus_id); tape_med_state_set(device, MS_UNLOADED); + tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); case 0x4012: /* Device Long Busy */ + /* XXX: Also use long busy handling here? */ + DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id); tape_3590_print_era_msg(device, irb); + return tape_3590_erp_basic(device, request, irb, -EBUSY); + case 0x4014: + DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id); return tape_3590_erp_long_busy(device, request, irb); case 0x5010: @@ -1064,6 +1478,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, case 0x5120: case 0x1120: tape_med_state_set(device, MS_UNLOADED); + tape_3590_schedule_work(device, TO_CRYPT_OFF); return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); case 0x6020: @@ -1142,21 +1557,47 @@ tape_3590_setup_device(struct tape_device *device) { int rc; struct tape_3590_disc_data *data; + char *rdc_data; DBF_EVENT(6, "3590 device setup\n"); - data = kmalloc(sizeof(struct tape_3590_disc_data), - GFP_KERNEL | GFP_DMA); + data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA); if (data == NULL) return -ENOMEM; data->read_back_op = READ_PREVIOUS; device->discdata = data; - if ((rc = tape_std_assign(device)) == 0) { - /* Try to find out if medium is loaded */ - if ((rc = tape_3590_sense_medium(device)) != 0) - DBF_LH(3, "3590 medium sense returned %d\n", rc); + rdc_data = kmalloc(64, GFP_KERNEL | GFP_DMA); + if (!rdc_data) { + rc = -ENOMEM; + goto fail_kmalloc; + } + rc = read_dev_chars(device->cdev, (void**)&rdc_data, 64); + if (rc) { + DBF_LH(3, "Read device characteristics failed!\n"); + goto fail_kmalloc; + } + rc = tape_std_assign(device); + if (rc) + goto fail_rdc_data; + if (rdc_data[31] == 0x13) { + PRINT_INFO("Device has crypto support\n"); + data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK; + tape_3592_disable_crypt(device); + } else { + DBF_EVENT(6, "Device has NO crypto support\n"); } + /* Try to find out if medium is loaded */ + rc = tape_3590_sense_medium(device); + if (rc) { + DBF_LH(3, "3590 medium sense returned %d\n", rc); + goto fail_rdc_data; + } + return 0; +fail_rdc_data: + kfree(rdc_data); +fail_kmalloc: + kfree(data); return rc; } diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h index cf274b9445a..aa5138807af 100644 --- a/drivers/s390/char/tape_3590.h +++ b/drivers/s390/char/tape_3590.h @@ -2,7 +2,7 @@ * drivers/s390/char/tape_3590.h * tape device discipline for 3590 tapes. * - * Copyright (C) IBM Corp. 2001,2006 + * Copyright IBM Corp. 2001,2006 * Author(s): Stefan Bader <shbader@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> @@ -38,16 +38,22 @@ #define MSENSE_UNASSOCIATED 0x00 #define MSENSE_ASSOCIATED_MOUNT 0x01 #define MSENSE_ASSOCIATED_UMOUNT 0x02 +#define MSENSE_CRYPT_MASK 0x00000010 #define TAPE_3590_MAX_MSG 0xb0 /* Datatypes */ struct tape_3590_disc_data { - unsigned char modeset_byte; + struct tape390_crypt_info crypt_info; int read_back_op; }; +#define TAPE_3590_CRYPT_INFO(device) \ + ((struct tape_3590_disc_data*)(device->discdata))->crypt_info +#define TAPE_3590_READ_BACK_OP(device) \ + ((struct tape_3590_disc_data*)(device->discdata))->read_back_op + struct tape_3590_sense { unsigned int command_rej:1; @@ -118,7 +124,48 @@ struct tape_3590_sense { struct tape_3590_med_sense { unsigned int macst:4; unsigned int masst:4; - char pad[127]; + char pad1[7]; + unsigned int flags; + char pad2[116]; +} __attribute__ ((packed)); + +/* Datastructures for 3592 encryption support */ + +struct tape3592_kekl { + __u8 flags; + char label[64]; +} __attribute__ ((packed)); + +struct tape3592_kekl_pair { + __u8 count; + struct tape3592_kekl kekl[2]; +} __attribute__ ((packed)); + +struct tape3592_kekl_query_data { + __u16 len; + __u8 fmt; + __u8 mc; + __u32 id; + __u8 flags; + struct tape3592_kekl_pair kekls; + char reserved[116]; +} __attribute__ ((packed)); + +struct tape3592_kekl_query_order { + __u8 code; + __u8 flags; + char reserved1[2]; + __u8 max_count; + char reserved2[35]; +} __attribute__ ((packed)); + +struct tape3592_kekl_set_order { + __u8 code; + __u8 flags; + char reserved1[2]; + __u8 op; + struct tape3592_kekl_pair kekls; + char reserved2[120]; } __attribute__ ((packed)); #endif /* _TAPE_3590_H */ diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index c8a89b3b87d..dd0ecaed592 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -73,7 +73,7 @@ tapeblock_trigger_requeue(struct tape_device *device) /* * Post finished request. */ -static inline void +static void tapeblock_end_request(struct request *req, int uptodate) { if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) @@ -108,7 +108,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data) /* * Feed the tape device CCW queue with requests supplied in a list. */ -static inline int +static int tapeblock_start_request(struct tape_device *device, struct request *req) { struct tape_request * ccw_req; diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 31198c8f271..9faea04e11e 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -3,7 +3,7 @@ * character device frontend for tape device driver * * S390 and zSeries version - * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2001,2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> @@ -89,22 +89,7 @@ tapechar_cleanup_device(struct tape_device *device) device->nt = NULL; } -/* - * Terminate write command (we write two TMs and skip backward over last) - * This ensures that the tape is always correctly terminated. - * When the user writes afterwards a new file, he will overwrite the - * second TM and therefore one TM will remain to separate the - * two files on the tape... - */ -static inline void -tapechar_terminate_write(struct tape_device *device) -{ - if (tape_mtop(device, MTWEOF, 1) == 0 && - tape_mtop(device, MTWEOF, 1) == 0) - tape_mtop(device, MTBSR, 1); -} - -static inline int +static int tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) { struct idal_buffer *new; @@ -137,7 +122,7 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) /* * Tape device read function */ -ssize_t +static ssize_t tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) { struct tape_device *device; @@ -201,7 +186,7 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) /* * Tape device write function */ -ssize_t +static ssize_t tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) { struct tape_device *device; @@ -291,7 +276,7 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t /* * Character frontend tape device open function. */ -int +static int tapechar_open (struct inode *inode, struct file *filp) { struct tape_device *device; @@ -326,7 +311,7 @@ tapechar_open (struct inode *inode, struct file *filp) * Character frontend tape device release function. */ -int +static int tapechar_release(struct inode *inode, struct file *filp) { struct tape_device *device; diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index c6c2e918b99..e2a8a1a04ba 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -3,7 +3,7 @@ * basic function of the tape device driver * * S390 and zSeries version - * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright IBM Corp. 2001,2006 * Author(s): Carsten Otte <cotte@de.ibm.com> * Michael Holzheu <holzheu@de.ibm.com> * Tuan Ngo-Anh <ngoanh@de.ibm.com> @@ -26,9 +26,11 @@ #include "tape_std.h" #define PRINTK_HEADER "TAPE_CORE: " +#define LONG_BUSY_TIMEOUT 180 /* seconds */ static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); static void tape_delayed_next_request(struct work_struct *); +static void tape_long_busy_timeout(unsigned long data); /* * One list to contain all tape devices of all disciplines, so @@ -69,10 +71,12 @@ const char *tape_op_verbose[TO_SIZE] = [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", [TO_READ_ATTMSG] = "RAT", [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", - [TO_UNASSIGN] = "UAS" + [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON", + [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS", + [TO_KEKL_QUERY] = "KLQ", }; -static inline int +static int busid_to_int(char *bus_id) { int dec; @@ -252,7 +256,7 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) /* * Stop running ccw. Has to be called with the device lock held. */ -static inline int +static int __tape_cancel_io(struct tape_device *device, struct tape_request *request) { int retries; @@ -346,6 +350,9 @@ tape_generic_online(struct tape_device *device, return -EINVAL; } + init_timer(&device->lb_timeout); + device->lb_timeout.function = tape_long_busy_timeout; + /* Let the discipline have a go at the device. */ device->discipline = discipline; if (!try_module_get(discipline->owner)) { @@ -385,7 +392,7 @@ out: return rc; } -static inline void +static void tape_cleanup_device(struct tape_device *device) { tapeblock_cleanup_device(device); @@ -563,7 +570,7 @@ tape_generic_probe(struct ccw_device *cdev) return ret; } -static inline void +static void __tape_discard_requests(struct tape_device *device) { struct tape_request * request; @@ -703,7 +710,7 @@ tape_free_request (struct tape_request * request) kfree(request); } -static inline int +static int __tape_start_io(struct tape_device *device, struct tape_request *request) { int rc; @@ -733,7 +740,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request) return rc; } -static inline void +static void __tape_start_next_request(struct tape_device *device) { struct list_head *l, *n; @@ -801,7 +808,23 @@ tape_delayed_next_request(struct work_struct *work) spin_unlock_irq(get_ccwdev_lock(device->cdev)); } -static inline void +static void tape_long_busy_timeout(unsigned long data) +{ + struct tape_request *request; + struct tape_device *device; + + device = (struct tape_device *) data; + spin_lock_irq(get_ccwdev_lock(device->cdev)); + request = list_entry(device->req_queue.next, struct tape_request, list); + if (request->status != TAPE_REQUEST_LONG_BUSY) + BUG(); + DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); + __tape_start_next_request(device); + device->lb_timeout.data = (unsigned long) tape_put_device(device); + spin_unlock_irq(get_ccwdev_lock(device->cdev)); +} + +static void __tape_end_request( struct tape_device * device, struct tape_request * request, @@ -878,7 +901,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, * and starts it if the tape is idle. Has to be called with * the device lock held. */ -static inline int +static int __tape_start_request(struct tape_device *device, struct tape_request *request) { int rc; @@ -1094,7 +1117,22 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) /* May be an unsolicited irq */ if(request != NULL) request->rescnt = irb->scsw.count; - + else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) && + !list_empty(&device->req_queue)) { + /* Not Ready to Ready after long busy ? */ + struct tape_request *req; + req = list_entry(device->req_queue.next, + struct tape_request, list); + if (req->status == TAPE_REQUEST_LONG_BUSY) { + DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); + if (del_timer(&device->lb_timeout)) { + device->lb_timeout.data = (unsigned long) + tape_put_device(device); + __tape_start_next_request(device); + } + return; + } + } if (irb->scsw.dstat != 0x0c) { /* Set the 'ONLINE' flag depending on sense byte 1 */ if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) @@ -1142,6 +1180,15 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) break; case TAPE_IO_PENDING: break; + case TAPE_IO_LONG_BUSY: + device->lb_timeout.data = + (unsigned long)tape_get_device_reference(device); + device->lb_timeout.expires = jiffies + + LONG_BUSY_TIMEOUT * HZ; + DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); + add_timer(&device->lb_timeout); + request->status = TAPE_REQUEST_LONG_BUSY; + break; case TAPE_IO_RETRY: rc = __tape_start_io(device, request); if (rc) diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 09844621edc..bc33068b9ce 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -36,7 +36,7 @@ struct tty_driver *tty3270_driver; static int tty3270_max_index; -struct raw3270_fn tty3270_fn; +static struct raw3270_fn tty3270_fn; struct tty3270_cell { unsigned char character; @@ -119,8 +119,7 @@ static void tty3270_update(struct tty3270 *); /* * Setup timeout for a device. On timeout trigger an update. */ -void -tty3270_set_timer(struct tty3270 *tp, int expires) +static void tty3270_set_timer(struct tty3270 *tp, int expires) { if (expires == 0) { if (timer_pending(&tp->timer) && del_timer(&tp->timer)) @@ -841,7 +840,7 @@ tty3270_del_views(void) } } -struct raw3270_fn tty3270_fn = { +static struct raw3270_fn tty3270_fn = { .activate = tty3270_activate, .deactivate = tty3270_deactivate, .intv = (void *) tty3270_irq, @@ -1754,8 +1753,7 @@ static const struct tty_operations tty3270_ops = { .set_termios = tty3270_set_termios }; -void -tty3270_notifier(int index, int active) +static void tty3270_notifier(int index, int active) { if (active) tty_register_device(tty3270_driver, index, NULL); @@ -1767,8 +1765,7 @@ tty3270_notifier(int index, int active) * 3270 tty registration code called from tty_init(). * Most kernel services (incl. kmalloc) are available at this poimt. */ -int __init -tty3270_init(void) +static int __init tty3270_init(void) { struct tty_driver *driver; int ret; diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 6cb23040954..4f894dc2373 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -128,9 +128,8 @@ static iucv_interrupt_ops_t vmlogrdr_iucvops = { .MessagePending = vmlogrdr_iucv_MessagePending, }; - -DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); -DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); +static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); +static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); /* * pointer to system service private structure diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 12c2d6b746e..aa65df4dfce 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -43,7 +43,7 @@ typedef enum {add, free} range_action; * Function: blacklist_range * (Un-)blacklist the devices from-to */ -static inline void +static void blacklist_range (range_action action, unsigned int from, unsigned int to, unsigned int ssid) { @@ -69,7 +69,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to, * Get devno/busid from given string. * Shamelessly grabbed from dasd_devmap.c. */ -static inline int +static int blacklist_busid(char **str, int *id0, int *ssid, int *devno) { int val, old_style; @@ -123,10 +123,10 @@ confused: return 1; } -static inline int +static int blacklist_parse_parameters (char *str, range_action action) { - unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid; + int from, to, from_id0, to_id0, from_ssid, to_ssid; while (*str != 0 && *str != '\n') { range_action ra = action; @@ -227,7 +227,7 @@ is_blacklisted (int ssid, int devno) * Function: blacklist_parse_proc_parameters * parse the stuff which is piped to /proc/cio_ignore */ -static inline void +static void blacklist_parse_proc_parameters (char *buf) { if (strncmp (buf, "free ", 5) == 0) { diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 38954f5cd14..d48e3ca4752 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -53,7 +53,7 @@ ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer, static struct bus_type ccwgroup_bus_type; -static inline void +static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) { int i; @@ -104,7 +104,7 @@ ccwgroup_release (struct device *dev) kfree(gdev); } -static inline int +static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) { char str[8]; @@ -424,7 +424,7 @@ ccwgroup_probe_ccwdev(struct ccw_device *cdev) return 0; } -static inline struct ccwgroup_device * +static struct ccwgroup_device * __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) { struct ccwgroup_device *gdev; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index cbab8d2ce5c..6f05a44e381 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -93,7 +93,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) u16 sch; /* subchannel */ u8 chpid[8]; /* chpids 0-7 */ u16 fla[8]; /* full link addresses 0-7 */ - } *ssd_area; + } __attribute__ ((packed)) *ssd_area; ssd_area = page; @@ -277,7 +277,7 @@ out_unreg: return 0; } -static inline void +static void s390_set_chpid_offline( __u8 chpid) { char dbf_txt[15]; @@ -338,7 +338,7 @@ s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) return 0x80 >> chp; } -static inline int +static int s390_process_res_acc_new_sch(struct subchannel_id schid) { struct schib schib; @@ -444,7 +444,7 @@ __get_chpid_from_lir(void *data) u32 andesc[28]; /* incident-specific information */ u32 isinfo[28]; - } *lir; + } __attribute__ ((packed)) *lir; lir = data; if (!(lir->iq&0x80)) @@ -461,154 +461,146 @@ __get_chpid_from_lir(void *data) return (u16) (lir->indesc[0]&0x000000ff); } -int -chsc_process_crw(void) +struct chsc_sei_area { + struct chsc_header request; + u32 reserved1; + u32 reserved2; + u32 reserved3; + struct chsc_header response; + u32 reserved4; + u8 flags; + u8 vf; /* validity flags */ + u8 rs; /* reporting source */ + u8 cc; /* content code */ + u16 fla; /* full link address */ + u16 rsid; /* reporting source id */ + u32 reserved5; + u32 reserved6; + u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ + /* ccdf has to be big enough for a link-incident record */ +} __attribute__ ((packed)); + +static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) +{ + int chpid; + + CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", + sei_area->rs, sei_area->rsid); + if (sei_area->rs != 4) + return 0; + chpid = __get_chpid_from_lir(sei_area->ccdf); + if (chpid < 0) + CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); + else + s390_set_chpid_offline(chpid); + + return 0; +} + +static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) { - int chpid, ret; struct res_acc_data res_data; - struct { - struct chsc_header request; - u32 reserved1; - u32 reserved2; - u32 reserved3; - struct chsc_header response; - u32 reserved4; - u8 flags; - u8 vf; /* validity flags */ - u8 rs; /* reporting source */ - u8 cc; /* content code */ - u16 fla; /* full link address */ - u16 rsid; /* reporting source id */ - u32 reserved5; - u32 reserved6; - u32 ccdf[96]; /* content-code dependent field */ - /* ccdf has to be big enough for a link-incident record */ - } *sei_area; + struct device *dev; + int status; + int rc; + + CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " + "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); + if (sei_area->rs != 4) + return 0; + /* allocate a new channel path structure, if needed */ + status = get_chp_status(sei_area->rsid); + if (status < 0) + new_channel_path(sei_area->rsid); + else if (!status) + return 0; + dev = get_device(&css[0]->chps[sei_area->rsid]->dev); + memset(&res_data, 0, sizeof(struct res_acc_data)); + res_data.chp = to_channelpath(dev); + if ((sei_area->vf & 0xc0) != 0) { + res_data.fla = sei_area->fla; + if ((sei_area->vf & 0xc0) == 0xc0) + /* full link address */ + res_data.fla_mask = 0xffff; + else + /* link address */ + res_data.fla_mask = 0xff00; + } + rc = s390_process_res_acc(&res_data); + put_device(dev); + + return rc; +} + +static int chsc_process_sei(struct chsc_sei_area *sei_area) +{ + int rc; + + /* Check if we might have lost some information. */ + if (sei_area->flags & 0x40) + CIO_CRW_EVENT(2, "chsc: event overflow\n"); + /* which kind of information was stored? */ + rc = 0; + switch (sei_area->cc) { + case 1: /* link incident*/ + rc = chsc_process_sei_link_incident(sei_area); + break; + case 2: /* i/o resource accessibiliy */ + rc = chsc_process_sei_res_acc(sei_area); + break; + default: /* other stuff */ + CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", + sei_area->cc); + break; + } + + return rc; +} + +int chsc_process_crw(void) +{ + struct chsc_sei_area *sei_area; + int ret; + int rc; if (!sei_page) return 0; - /* - * build the chsc request block for store event information - * and do the call - * This function is only called by the machine check handler thread, - * so we don't need locking for the sei_page. - */ + /* Access to sei_page is serialized through machine check handler + * thread, so no need for locking. */ sei_area = sei_page; CIO_TRACE_EVENT( 2, "prcss"); ret = 0; do { - int ccode, status; - struct device *dev; memset(sei_area, 0, sizeof(*sei_area)); - memset(&res_data, 0, sizeof(struct res_acc_data)); sei_area->request.length = 0x0010; sei_area->request.code = 0x000e; + if (chsc(sei_area)) + break; - ccode = chsc(sei_area); - if (ccode > 0) - return 0; - - switch (sei_area->response.code) { - /* for debug purposes, check for problems */ - case 0x0001: - CIO_CRW_EVENT(4, "chsc_process_crw: event information " - "successfully stored\n"); - break; /* everything ok */ - case 0x0002: - CIO_CRW_EVENT(2, - "chsc_process_crw: invalid command!\n"); - return 0; - case 0x0003: - CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " - "request block!\n"); - return 0; - case 0x0005: - CIO_CRW_EVENT(2, "chsc_process_crw: no event " - "information stored\n"); - return 0; - default: - CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", + if (sei_area->response.code == 0x0001) { + CIO_CRW_EVENT(4, "chsc: sei successful\n"); + rc = chsc_process_sei(sei_area); + if (rc) + ret = rc; + } else { + CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", sei_area->response.code); - return 0; - } - - /* Check if we might have lost some information. */ - if (sei_area->flags & 0x40) - CIO_CRW_EVENT(2, "chsc_process_crw: Event information " - "has been lost due to overflow!\n"); - - if (sei_area->rs != 4) { - CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " - "(%04X) isn't a chpid!\n", - sei_area->rsid); - continue; - } - - /* which kind of information was stored? */ - switch (sei_area->cc) { - case 1: /* link incident*/ - CIO_CRW_EVENT(4, "chsc_process_crw: " - "channel subsystem reports link incident," - " reporting source is chpid %x\n", - sei_area->rsid); - chpid = __get_chpid_from_lir(sei_area->ccdf); - if (chpid < 0) - CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", - __FUNCTION__); - else - s390_set_chpid_offline(chpid); - break; - - case 2: /* i/o resource accessibiliy */ - CIO_CRW_EVENT(4, "chsc_process_crw: " - "channel subsystem reports some I/O " - "devices may have become accessible\n"); - pr_debug("Data received after sei: \n"); - pr_debug("Validity flags: %x\n", sei_area->vf); - - /* allocate a new channel path structure, if needed */ - status = get_chp_status(sei_area->rsid); - if (status < 0) - new_channel_path(sei_area->rsid); - else if (!status) - break; - dev = get_device(&css[0]->chps[sei_area->rsid]->dev); - res_data.chp = to_channelpath(dev); - pr_debug("chpid: %x", sei_area->rsid); - if ((sei_area->vf & 0xc0) != 0) { - res_data.fla = sei_area->fla; - if ((sei_area->vf & 0xc0) == 0xc0) { - pr_debug(" full link addr: %x", - sei_area->fla); - res_data.fla_mask = 0xffff; - } else { - pr_debug(" link addr: %x", - sei_area->fla); - res_data.fla_mask = 0xff00; - } - } - ret = s390_process_res_acc(&res_data); - pr_debug("\n\n"); - put_device(dev); - break; - - default: /* other stuff */ - CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", - sei_area->cc); + ret = 0; break; } } while (sei_area->flags & 0x80); + return ret; } -static inline int +static int __chp_add_new_sch(struct subchannel_id schid) { struct schib schib; int ret; - if (stsch(schid, &schib)) + if (stsch_err(schid, &schib)) /* We're through */ return need_rescan ? -EAGAIN : -ENXIO; @@ -709,7 +701,7 @@ chp_process_crw(int chpid, int on) return chp_add(chpid); } -static inline int check_for_io_on_path(struct subchannel *sch, int index) +static int check_for_io_on_path(struct subchannel *sch, int index) { int cc; @@ -741,7 +733,7 @@ static void terminate_internal_io(struct subchannel *sch) sch->driver->termination(&sch->dev); } -static inline void +static void __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) { int chp, old_lpm; @@ -967,8 +959,8 @@ static struct bin_attribute chp_measurement_attr = { static void chsc_remove_chp_cmg_attr(struct channel_path *chp) { - sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); - sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); + device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); + device_remove_bin_file(&chp->dev, &chp_measurement_attr); } static int @@ -976,14 +968,12 @@ chsc_add_chp_cmg_attr(struct channel_path *chp) { int ret; - ret = sysfs_create_bin_file(&chp->dev.kobj, - &chp_measurement_chars_attr); + ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); if (ret) return ret; - ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); + ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); if (ret) - sysfs_remove_bin_file(&chp->dev.kobj, - &chp_measurement_chars_attr); + device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); return ret; } @@ -1042,7 +1032,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) u32 : 4; u32 fmt : 4; u32 : 16; - } *secm_area; + } __attribute__ ((packed)) *secm_area; int ret, ccode; secm_area = page; @@ -1253,7 +1243,7 @@ chsc_determine_channel_path_description(int chpid, struct chsc_header response; u32 zeroes2; struct channel_path_desc desc; - } *scpd_area; + } __attribute__ ((packed)) *scpd_area; scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scpd_area) @@ -1350,7 +1340,7 @@ chsc_get_channel_measurement_chars(struct channel_path *chp) u32 cmg : 8; u32 zeroes3; u32 data[NR_MEASUREMENT_CHARS]; - } *scmc_area; + } __attribute__ ((packed)) *scmc_area; scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scmc_area) @@ -1517,7 +1507,7 @@ chsc_enable_facility(int operation_code) u32 reserved5:4; u32 format2:4; u32 reserved6:24; - } *sda_area; + } __attribute__ ((packed)) *sda_area; sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); if (!sda_area) @@ -1569,7 +1559,7 @@ chsc_determine_css_characteristics(void) u32 reserved4; u32 general_char[510]; u32 chsc_char[518]; - } *scsc_area; + } __attribute__ ((packed)) *scsc_area; scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scsc_area) { diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index a259245780a..0fb2b024208 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -10,17 +10,17 @@ struct chsc_header { u16 length; u16 code; -}; +} __attribute__ ((packed)); #define NR_MEASUREMENT_CHARS 5 struct cmg_chars { u32 values[NR_MEASUREMENT_CHARS]; -}; +} __attribute__ ((packed)); #define NR_MEASUREMENT_ENTRIES 8 struct cmg_entry { u32 values[NR_MEASUREMENT_ENTRIES]; -}; +} __attribute__ ((packed)); struct channel_path_desc { u8 flags; @@ -31,7 +31,7 @@ struct channel_path_desc { u8 zeroes; u8 chla; u8 chpp; -}; +} __attribute__ ((packed)); struct channel_path { int id; @@ -47,6 +47,9 @@ struct channel_path { extern void s390_process_css( void ); extern void chsc_validate_chpids(struct subchannel *); extern void chpid_is_actually_online(int); +extern int css_get_ssd_info(struct subchannel *); +extern int chsc_process_crw(void); +extern int chp_process_crw(int, int); struct css_general_char { u64 : 41; diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index ae1bf231d08..b3a56dc5f68 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -122,7 +122,7 @@ cio_get_options (struct subchannel *sch) * Use tpi to get a pending interrupt, call the interrupt handler and * return a pointer to the subchannel structure. */ -static inline int +static int cio_tpi(void) { struct tpi_info *tpi_info; @@ -152,7 +152,7 @@ cio_tpi(void) return 1; } -static inline int +static int cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) { char dbf_text[15]; @@ -585,7 +585,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) * This device must not be known to Linux. So we simply * say that there is no device and return ENODEV. */ - CIO_MSG_EVENT(0, "Blacklisted device detected " + CIO_MSG_EVENT(4, "Blacklisted device detected " "at devno %04X, subchannel set %x\n", sch->schib.pmcw.dev, sch->schid.ssid); err = -ENODEV; @@ -646,7 +646,7 @@ do_IRQ (struct pt_regs *regs) * Make sure that the i/o interrupt did not "overtake" * the last HZ timer interrupt. */ - account_ticks(); + account_ticks(S390_lowcore.int_clock); /* * Get interrupt information from lowcore */ @@ -832,7 +832,7 @@ cio_get_console_subchannel(void) } #endif -static inline int +static int __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) { int retry, cc; @@ -850,7 +850,20 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) return -EBUSY; /* uhm... */ } -static inline int +/* we can't use the normal udelay here, since it enables external interrupts */ + +static void udelay_reset(unsigned long usecs) +{ + uint64_t start_cc, end_cc; + + asm volatile ("STCK %0" : "=m" (start_cc)); + do { + cpu_relax(); + asm volatile ("STCK %0" : "=m" (end_cc)); + } while (((end_cc - start_cc)/4096) < usecs); +} + +static int __clear_subchannel_easy(struct subchannel_id schid) { int retry; @@ -865,7 +878,7 @@ __clear_subchannel_easy(struct subchannel_id schid) if (schid_equal(&ti.schid, &schid)) return 0; } - udelay(100); + udelay_reset(100); } return -EBUSY; } @@ -882,11 +895,11 @@ static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr) int rc; pgm_check_occured = 0; - s390_reset_pgm_handler = cio_reset_pgm_check_handler; + s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; rc = stsch(schid, addr); - s390_reset_pgm_handler = NULL; + s390_base_pgm_handler_fn = NULL; - /* The program check handler could have changed pgm_check_occured */ + /* The program check handler could have changed pgm_check_occured. */ barrier(); if (pgm_check_occured) @@ -944,7 +957,7 @@ static void css_reset(void) /* Reset subchannels. */ for_each_subchannel(__shutdown_subchannel_easy, NULL); /* Reset channel paths. */ - s390_reset_mcck_handler = s390_reset_chpids_mcck_handler; + s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; /* Enable channel report machine checks. */ __ctl_set_bit(14, 28); /* Temporarily reenable machine checks. */ @@ -969,7 +982,7 @@ static void css_reset(void) local_mcck_disable(); /* Disable channel report machine checks. */ __ctl_clear_bit(14, 28); - s390_reset_mcck_handler = NULL; + s390_base_mcck_handler_fn = NULL; } static struct reset_call css_reset_call = { diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 828b2d334f0..90b22faabbf 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -519,8 +519,8 @@ struct cmb { /* insert a single device into the cmb_area list * called with cmb_area.lock held from alloc_cmb */ -static inline int alloc_cmb_single (struct ccw_device *cdev, - struct cmb_data *cmb_data) +static int alloc_cmb_single(struct ccw_device *cdev, + struct cmb_data *cmb_data) { struct cmb *cmb; struct ccw_device_private *node; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 9d6c0244686..fe0ace7aece 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -30,7 +30,7 @@ struct channel_subsystem *css[__MAX_CSSID + 1]; int css_characteristics_avail = 0; -inline int +int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) { struct subchannel_id schid; @@ -108,9 +108,6 @@ css_subchannel_release(struct device *dev) } } -extern int css_get_ssd_info(struct subchannel *sch); - - int css_sch_device_register(struct subchannel *sch) { int ret; @@ -187,7 +184,7 @@ get_subchannel_by_schid(struct subchannel_id schid) return dev ? to_subchannel(dev) : NULL; } -static inline int css_get_subchannel_status(struct subchannel *sch) +static int css_get_subchannel_status(struct subchannel *sch) { struct schib schib; @@ -299,7 +296,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) /* Will be done on the slow path. */ return -EAGAIN; } - if (stsch(schid, &schib) || !schib.pmcw.dnv) { + if (stsch_err(schid, &schib) || !schib.pmcw.dnv) { /* Unusable - ignore. */ return 0; } @@ -417,7 +414,7 @@ static void reprobe_all(struct work_struct *unused) need_reprobe); } -DECLARE_WORK(css_reprobe_work, reprobe_all); +static DECLARE_WORK(css_reprobe_work, reprobe_all); /* Schedule reprobing of all unregistered subchannels. */ void css_schedule_reprobe(void) @@ -578,7 +575,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); -static inline int __init setup_css(int nr) +static int __init setup_css(int nr) { u32 tod_high; int ret; diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 3464c5b875c..ca2bab932a8 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -143,6 +143,8 @@ extern void css_sch_device_unregister(struct subchannel *); extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); +extern int css_process_crw(int, int); +extern void css_reiterate_subchannels(void); #define __MAX_SUBCHANNEL 65535 #define __MAX_SSID 3 diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 803579053c2..e322111fb36 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -138,7 +138,6 @@ struct bus_type ccw_bus_type; static int io_subchannel_probe (struct subchannel *); static int io_subchannel_remove (struct subchannel *); -void io_subchannel_irq (struct device *); static int io_subchannel_notify(struct device *, int); static void io_subchannel_verify(struct device *); static void io_subchannel_ioterm(struct device *); @@ -235,11 +234,8 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf) ssize_t ret = 0; int chp; - if (ssd) - for (chp = 0; chp < 8; chp++) - ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); - else - ret += sprintf (buf, "n/a"); + for (chp = 0; chp < 8; chp++) + ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); ret += sprintf (buf+ret, "\n"); return min((ssize_t)PAGE_SIZE, ret); } @@ -552,13 +548,13 @@ static struct attribute_group ccwdev_attr_group = { .attrs = ccwdev_attrs, }; -static inline int +static int device_add_files (struct device *dev) { return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); } -static inline void +static void device_remove_files(struct device *dev) { sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 29db6341d63..b66338b7657 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -74,6 +74,7 @@ extern struct workqueue_struct *ccw_device_notify_work; extern wait_queue_head_t ccw_device_init_wq; extern atomic_t ccw_device_init_count; +void io_subchannel_irq (struct device *pdev); void io_subchannel_recog_done(struct ccw_device *cdev); int ccw_device_cancel_halt_clear(struct ccw_device *); @@ -118,6 +119,7 @@ int ccw_device_stlck(struct ccw_device *); /* qdio needs this. */ void ccw_device_set_timeout(struct ccw_device *, int); extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); +extern struct bus_type ccw_bus_type; /* Channel measurement facility related */ void retry_set_schib(struct ccw_device *cdev); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index eed14572fc3..51238e7555b 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -206,7 +206,7 @@ ccw_device_handle_oper(struct ccw_device *cdev) * been varied online on the SE so we have to find out by magic (i. e. driving * the channel subsystem to device selection and updating our path masks). */ -static inline void +static void __recover_lost_chpids(struct subchannel *sch, int old_lpm) { int mask, i; @@ -387,7 +387,7 @@ ccw_device_done(struct ccw_device *cdev, int state) put_device (&cdev->dev); } -static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) +static int cmp_pgid(struct pgid *p1, struct pgid *p2) { char *c1; char *c2; @@ -842,6 +842,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) call_handler_unsol: if (cdev->handler) cdev->handler (cdev, 0, irb); + if (cdev->private->flags.doverify) + ccw_device_online_verify(cdev, 0); return; } /* Accumulate status and find out if a basic sense is needed. */ @@ -892,7 +894,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) /* * Got an interrupt for a basic sense. */ -void +static void ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) { struct irb *irb; diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index d269607336e..d7b25b8f71d 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -302,7 +302,7 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) wake_up(&cdev->private->wait_q); } -static inline int +static int __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) { int ret; diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index bdcf930f7be..6b1caea622e 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -25,7 +25,7 @@ * Check for any kind of channel or interface control check but don't * issue the message for the console device */ -static inline void +static void ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) { if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | @@ -72,7 +72,7 @@ ccw_device_path_notoper(struct ccw_device *cdev) /* * Copy valid bits from the extended control word to device irb. */ -static inline void +static void ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) { /* @@ -94,7 +94,7 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) /* * Check if extended status word is valid. */ -static inline int +static int ccw_device_accumulate_esw_valid(struct irb *irb) { if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) @@ -109,7 +109,7 @@ ccw_device_accumulate_esw_valid(struct irb *irb) /* * Copy valid bits from the extended status word to device irb. */ -static inline void +static void ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) { struct irb *cdev_irb; diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 6fd1940842e..d726cd5777d 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -66,7 +66,6 @@ MODULE_LICENSE("GPL"); /******************** HERE WE GO ***********************************/ static const char version[] = "QDIO base support version 2"; -extern struct bus_type ccw_bus_type; static int qdio_performance_stats = 0; static int proc_perf_file_registration; @@ -138,7 +137,7 @@ qdio_release_q(struct qdio_q *q) } /*check ccq */ -static inline int +static int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) { char dbf_text[15]; @@ -153,7 +152,7 @@ qdio_check_ccq(struct qdio_q *q, unsigned int ccq) return -EIO; } /* EQBS: extract buffer states */ -static inline int +static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, unsigned int *start, unsigned int *cnt) { @@ -188,7 +187,7 @@ again: } /* SQBS: set buffer states */ -static inline int +static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, unsigned int *start, unsigned int *cnt) { @@ -315,7 +314,7 @@ __do_siga_output(struct qdio_q *q, unsigned int *busy_bit) * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns * an access exception */ -static inline int +static int qdio_siga_output(struct qdio_q *q) { int cc; @@ -349,7 +348,7 @@ qdio_siga_output(struct qdio_q *q) return cc; } -static inline int +static int qdio_siga_input(struct qdio_q *q) { int cc; @@ -421,7 +420,7 @@ tiqdio_sched_tl(void) tasklet_hi_schedule(&tiqdio_tasklet); } -static inline void +static void qdio_mark_tiq(struct qdio_q *q) { unsigned long flags; @@ -471,7 +470,7 @@ qdio_mark_q(struct qdio_q *q) tasklet_schedule(&q->tasklet); } -static inline int +static int qdio_stop_polling(struct qdio_q *q) { #ifdef QDIO_USE_PROCESSING_STATE @@ -525,7 +524,7 @@ qdio_stop_polling(struct qdio_q *q) * sophisticated locking outside of unmark_q, so that we don't need to * disable the interrupts :-) */ -static inline void +static void qdio_unmark_q(struct qdio_q *q) { unsigned long flags; @@ -691,7 +690,7 @@ qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q) return q->first_to_check; } -static inline int +static int qdio_get_outbound_buffer_frontier(struct qdio_q *q) { struct qdio_irq *irq; @@ -774,7 +773,7 @@ out: } /* all buffers are processed */ -static inline int +static int qdio_is_outbound_q_done(struct qdio_q *q) { int no_used; @@ -796,7 +795,7 @@ qdio_is_outbound_q_done(struct qdio_q *q) return (no_used==0); } -static inline int +static int qdio_has_outbound_q_moved(struct qdio_q *q) { int i; @@ -816,7 +815,7 @@ qdio_has_outbound_q_moved(struct qdio_q *q) } } -static inline void +static void qdio_kick_outbound_q(struct qdio_q *q) { int result; @@ -905,7 +904,7 @@ qdio_kick_outbound_q(struct qdio_q *q) } } -static inline void +static void qdio_kick_outbound_handler(struct qdio_q *q) { int start, end, real_end, count; @@ -942,7 +941,7 @@ qdio_kick_outbound_handler(struct qdio_q *q) q->error_status_flags=0; } -static inline void +static void __qdio_outbound_processing(struct qdio_q *q) { int siga_attempts; @@ -1002,7 +1001,7 @@ qdio_outbound_processing(struct qdio_q *q) /************************* INBOUND ROUTINES *******************************/ -static inline int +static int qdio_get_inbound_buffer_frontier(struct qdio_q *q) { struct qdio_irq *irq; @@ -1133,7 +1132,7 @@ out: return q->first_to_check; } -static inline int +static int qdio_has_inbound_q_moved(struct qdio_q *q) { int i; @@ -1167,7 +1166,7 @@ qdio_has_inbound_q_moved(struct qdio_q *q) } /* means, no more buffers to be filled */ -static inline int +static int tiqdio_is_inbound_q_done(struct qdio_q *q) { int no_used; @@ -1228,7 +1227,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q) return 0; } -static inline int +static int qdio_is_inbound_q_done(struct qdio_q *q) { int no_used; @@ -1296,7 +1295,7 @@ qdio_is_inbound_q_done(struct qdio_q *q) } } -static inline void +static void qdio_kick_inbound_handler(struct qdio_q *q) { int count, start, end, real_end, i; @@ -1343,7 +1342,7 @@ qdio_kick_inbound_handler(struct qdio_q *q) } } -static inline void +static void __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) { struct qdio_irq *irq_ptr; @@ -1442,7 +1441,7 @@ tiqdio_inbound_processing(struct qdio_q *q) __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); } -static inline void +static void __qdio_inbound_processing(struct qdio_q *q) { int q_laps=0; @@ -1493,7 +1492,7 @@ qdio_inbound_processing(struct qdio_q *q) /************************* MAIN ROUTINES *******************************/ #ifdef QDIO_USE_PROCESSING_STATE -static inline int +static int tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) { if (!q) { @@ -1545,7 +1544,7 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) } #endif /* QDIO_USE_PROCESSING_STATE */ -static inline void +static void tiqdio_inbound_checks(void) { struct qdio_q *q; @@ -1949,7 +1948,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) mb(); } -static inline void +static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) { char dbf_text[15]; @@ -1966,7 +1965,7 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) } -static inline void +static void qdio_handle_pci(struct qdio_irq *irq_ptr) { int i; @@ -2002,7 +2001,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr) static void qdio_establish_handle_irq(struct ccw_device*, int, int); -static inline void +static void qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, int cstat, int dstat) { @@ -2229,7 +2228,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, return cc; } -static inline void +static void qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, unsigned long token) { @@ -2740,7 +2739,7 @@ qdio_free(struct ccw_device *cdev) return 0; } -static inline void +static void qdio_allocate_do_dbf(struct qdio_initialize *init_data) { char dbf_text[20]; /* if a printf printed out more than 8 chars */ @@ -2773,7 +2772,7 @@ qdio_allocate_do_dbf(struct qdio_initialize *init_data) QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); } -static inline void +static void qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) { irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; @@ -2792,7 +2791,7 @@ qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; } -static inline void +static void qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, int j, int iqfmt) { @@ -2813,7 +2812,7 @@ qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, } -static inline void +static void qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) { int i; @@ -2839,7 +2838,7 @@ qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) } } -static inline void +static void qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) { int i; @@ -2865,7 +2864,7 @@ qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) } } -static inline int +static int qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, int dstat) { @@ -3014,7 +3013,7 @@ qdio_allocate(struct qdio_initialize *init_data) return 0; } -int qdio_fill_irq(struct qdio_initialize *init_data) +static int qdio_fill_irq(struct qdio_initialize *init_data) { int i; char dbf_text[15]; @@ -3367,7 +3366,7 @@ qdio_activate(struct ccw_device *cdev, int flags) } /* buffers filled forwards again to make Rick happy */ -static inline void +static void qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) { @@ -3386,7 +3385,7 @@ qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, } } -static inline void +static void qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) { @@ -3407,7 +3406,7 @@ qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, } } -static inline void +static void do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) @@ -3443,7 +3442,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, qdio_mark_q(q); } -static inline void +static void do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, unsigned int qidx, unsigned int count, struct qdio_buffer *buffers) diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 81b5899f401..c7d1355237b 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -465,7 +465,7 @@ static int ap_device_probe(struct device *dev) * Flush all requests from the request/pending queue of an AP device. * @ap_dev: pointer to the AP device. */ -static inline void __ap_flush_queue(struct ap_device *ap_dev) +static void __ap_flush_queue(struct ap_device *ap_dev) { struct ap_message *ap_msg, *next; @@ -587,7 +587,7 @@ static struct bus_attribute *const ap_bus_attrs[] = { /** * Pick one of the 16 ap domains. */ -static inline int ap_select_domain(void) +static int ap_select_domain(void) { int queue_depth, device_type, count, max_count, best_domain; int rc, i, j; @@ -825,7 +825,7 @@ static inline void ap_schedule_poll_timer(void) * required, bit 2^1 is set if the poll timer needs to get armed * Returns 0 if the device is still present, -ENODEV if not. */ -static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) +static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) { struct ap_queue_status status; struct ap_message *ap_msg; @@ -872,7 +872,7 @@ static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) * required, bit 2^1 is set if the poll timer needs to get armed * Returns 0 if the device is still present, -ENODEV if not. */ -static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) +static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) { struct ap_queue_status status; struct ap_message *ap_msg; diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 1edc10a7a6f..b9e59bc9435 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -791,7 +791,7 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd, return rc; } -long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, +static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (cmd == ICARSAMODEXPO) @@ -833,8 +833,8 @@ static struct miscdevice zcrypt_misc_device = { */ static struct proc_dir_entry *zcrypt_entry; -static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, - unsigned int len) +static int sprintcl(unsigned char *outaddr, unsigned char *addr, + unsigned int len) { int hl, i; @@ -845,8 +845,8 @@ static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, return hl; } -static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, - unsigned int len) +static int sprintrw(unsigned char *outaddr, unsigned char *addr, + unsigned int len) { int hl, inl, c, cx; @@ -865,8 +865,8 @@ static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, return hl; } -static inline int sprinthx(unsigned char *title, unsigned char *outaddr, - unsigned char *addr, unsigned int len) +static int sprinthx(unsigned char *title, unsigned char *outaddr, + unsigned char *addr, unsigned int len) { int hl, inl, r, rx; @@ -885,8 +885,8 @@ static inline int sprinthx(unsigned char *title, unsigned char *outaddr, return hl; } -static inline int sprinthx4(unsigned char *title, unsigned char *outaddr, - unsigned int *array, unsigned int len) +static int sprinthx4(unsigned char *title, unsigned char *outaddr, + unsigned int *array, unsigned int len) { int hl, r; @@ -943,7 +943,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, zcrypt_qdepth_mask(workarea); len += sprinthx("Waiting work element counts", resp_buff+len, workarea, AP_DEVICES); - zcrypt_perdev_reqcnt((unsigned int *) workarea); + zcrypt_perdev_reqcnt((int *) workarea); len += sprinthx4("Per-device successfully completed request counts", resp_buff+len,(unsigned int *) workarea, AP_DEVICES); *eof = 1; diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index 32e37014345..818ffe05ac0 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c @@ -191,10 +191,10 @@ static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev, * * Returns 0 on success or -EFAULT. */ -static inline int convert_type84(struct zcrypt_device *zdev, - struct ap_message *reply, - char __user *outputdata, - unsigned int outputdatalength) +static int convert_type84(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) { struct type84_hdr *t84h = reply->message; char *data; diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index b7153c1e15c..252443b6bd1 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -709,7 +709,8 @@ out_free: * PCIXCC/CEX2C device to the request distributor * @xcRB: pointer to the send_cprb request buffer */ -long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB) +static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, + struct ica_xcRB *xcRB) { struct ap_message ap_msg; struct response_type resp_type = { diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 95f4e105cb9..7809a79feec 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -121,7 +121,7 @@ MODULE_LICENSE("GPL"); #define DEBUG #endif - char debug_buffer[255]; +static char debug_buffer[255]; /** * Debug Facility Stuff */ @@ -223,16 +223,14 @@ static void claw_timer ( struct chbk * p_ch ); /* Functions */ static int add_claw_reads(struct net_device *dev, struct ccwbk* p_first, struct ccwbk* p_last); -static void inline ccw_check_return_code (struct ccw_device *cdev, - int return_code); -static void inline ccw_check_unit_check (struct chbk * p_ch, - unsigned char sense ); +static void ccw_check_return_code (struct ccw_device *cdev, int return_code); +static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense ); static int find_link(struct net_device *dev, char *host_name, char *ws_name ); static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); static int init_ccw_bk(struct net_device *dev); static void probe_error( struct ccwgroup_device *cgdev); static struct net_device_stats *claw_stats(struct net_device *dev); -static int inline pages_to_order_of_mag(int num_of_pages); +static int pages_to_order_of_mag(int num_of_pages); static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); #ifdef DEBUG static void dumpit (char *buf, int len); @@ -1310,7 +1308,7 @@ claw_timer ( struct chbk * p_ch ) * of magnitude get_free_pages() has an upper order of 9 * *--------------------------------------------------------------------*/ -static int inline +static int pages_to_order_of_mag(int num_of_pages) { int order_of_mag=1; /* assume 2 pages */ @@ -1482,7 +1480,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, * * *-------------------------------------------------------------------*/ -static void inline +static void ccw_check_return_code(struct ccw_device *cdev, int return_code) { #ifdef FUNCTRACE @@ -1529,7 +1527,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code) * ccw_check_unit_check * *--------------------------------------------------------------------*/ -static void inline +static void ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) { struct net_device *dev = p_ch->ndev; diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 03cc263fe0d..5a84fbbc661 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c @@ -369,7 +369,7 @@ ctc_dump_skb(struct sk_buff *skb, int offset) * @param ch The channel where this skb has been received. * @param pskb The received skb. */ -static __inline__ void +static void ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) { struct net_device *dev = ch->netdev; @@ -512,7 +512,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) * @param ch The channel, the error belongs to. * @param return_code The error code to inspect. */ -static void inline +static void ccw_check_return_code(struct channel *ch, int return_code, char *msg) { DBF_TEXT(trace, 5, __FUNCTION__); @@ -547,7 +547,7 @@ ccw_check_return_code(struct channel *ch, int return_code, char *msg) * @param ch The channel, the sense code belongs to. * @param sense The sense code to inspect. */ -static void inline +static void ccw_unit_check(struct channel *ch, unsigned char sense) { DBF_TEXT(trace, 5, __FUNCTION__); @@ -603,7 +603,7 @@ ctc_purge_skb_queue(struct sk_buff_head *q) } } -static __inline__ int +static int ctc_checkalloc_buffer(struct channel *ch, int warn) { DBF_TEXT(trace, 5, __FUNCTION__); diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c index e965f03a729..76728ae4b84 100644 --- a/drivers/s390/net/cu3088.c +++ b/drivers/s390/net/cu3088.c @@ -57,7 +57,7 @@ static struct ccw_device_id cu3088_ids[] = { static struct ccw_driver cu3088_driver; -struct device *cu3088_root_dev; +static struct device *cu3088_root_dev; static ssize_t group_write(struct device_driver *drv, const char *buf, size_t count) diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index e5665b6743a..b97dd15bdb9 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -828,7 +828,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) /** * Emit buffer of a lan comand. */ -void +static void lcs_lancmd_timeout(unsigned long data) { struct lcs_reply *reply, *list_reply, *r; @@ -1360,7 +1360,7 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb) return 0; } -void +static void lcs_schedule_recovery(struct lcs_card *card) { LCS_DBF_TEXT(2, trace, "startrec"); @@ -1990,7 +1990,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char } -DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); +static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); static ssize_t lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index d7d1cc0a5c8..3346088f47e 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -2053,7 +2053,7 @@ out_free_ndev: return ret; } -DRIVER_ATTR(connection, 0200, NULL, conn_write); +static DRIVER_ATTR(connection, 0200, NULL, conn_write); static ssize_t remove_write (struct device_driver *drv, const char *buf, size_t count) @@ -2112,7 +2112,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) return -EINVAL; } -DRIVER_ATTR(remove, 0200, NULL, remove_write); +static DRIVER_ATTR(remove, 0200, NULL, remove_write); static void netiucv_banner(void) diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 6bb558a9a03..7c735e1fe06 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -49,7 +49,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, return buffers_needed; } -static inline void +static void qeth_eddp_free_context(struct qeth_eddp_context *ctx) { int i; @@ -91,7 +91,7 @@ qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) } } -static inline int +static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, struct qeth_eddp_context *ctx) { @@ -196,7 +196,7 @@ out: return flush_cnt; } -static inline void +static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, struct qeth_eddp_data *eddp, int data_len) { @@ -256,7 +256,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, ctx->offset += eddp->thl; } -static inline void +static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, __wsum *hcsum) { @@ -302,7 +302,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, } } -static inline void +static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, struct qeth_eddp_data *eddp, int data_len, __wsum hcsum) @@ -349,7 +349,7 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); } -static inline __wsum +static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) { __wsum phcsum; /* pseudo header checksum */ @@ -363,7 +363,7 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); } -static inline __wsum +static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) { __be32 proto; @@ -381,7 +381,7 @@ qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) return phcsum; } -static inline struct qeth_eddp_data * +static struct qeth_eddp_data * qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) { struct qeth_eddp_data *eddp; @@ -399,7 +399,7 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) return eddp; } -static inline void +static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, struct qeth_eddp_data *eddp) { @@ -464,7 +464,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, } } -static inline int +static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, struct sk_buff *skb, struct qeth_hdr *qhdr) { @@ -505,7 +505,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, return 0; } -static inline void +static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, int hdr_len) { @@ -529,7 +529,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, (skb_shinfo(skb)->gso_segs + 1); } -static inline struct qeth_eddp_context * +static struct qeth_eddp_context * qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, int hdr_len) { @@ -581,7 +581,7 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, return ctx; } -static inline struct qeth_eddp_context * +static struct qeth_eddp_context * qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr *qhdr) { @@ -625,5 +625,3 @@ qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb, } return NULL; } - - diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index d2efa5ff125..2257e45594b 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -651,7 +651,7 @@ __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo, return 0; } -static inline int +static int __qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, int same_type) { @@ -795,7 +795,7 @@ qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) return rc; } -static inline void +static void __qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) { struct qeth_ipaddr *addr, *tmp; @@ -882,7 +882,7 @@ static void qeth_layer2_add_multicast(struct qeth_card *); static void qeth_add_multicast_ipv6(struct qeth_card *); #endif -static inline int +static int qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) { unsigned long flags; @@ -920,7 +920,7 @@ qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) wake_up(&card->wait_q); } -static inline int +static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) { unsigned long flags; @@ -1764,9 +1764,9 @@ out: qeth_release_buffer(channel,iob); } -static inline void +static void qeth_prepare_control_data(struct qeth_card *card, int len, -struct qeth_cmd_buffer *iob) + struct qeth_cmd_buffer *iob) { qeth_setup_ccw(&card->write,iob->data,len); iob->callback = qeth_release_buffer; @@ -2160,7 +2160,7 @@ qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, return 0; } -static inline struct sk_buff * +static struct sk_buff * qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) { struct sk_buff* skb; @@ -2179,7 +2179,7 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) return skb; } -static inline struct sk_buff * +static struct sk_buff * qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, struct qdio_buffer_element **__element, int *__offset, struct qeth_hdr **hdr) @@ -2264,7 +2264,7 @@ no_mem: return NULL; } -static inline __be16 +static __be16 qeth_type_trans(struct sk_buff *skb, struct net_device *dev) { struct qeth_card *card; @@ -2297,7 +2297,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev) return htons(ETH_P_802_2); } -static inline void +static void qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr *hdr) { @@ -2351,7 +2351,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, fake_llc->ethertype = ETH_P_IP; } -static inline void +static void qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr *hdr) { @@ -2420,7 +2420,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; } -static inline __u16 +static __u16 qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr *hdr) { @@ -2476,7 +2476,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, return vlan_id; } -static inline void +static void qeth_process_inbound_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf, int index) { @@ -2528,7 +2528,7 @@ qeth_process_inbound_buffer(struct qeth_card *card, } } -static inline struct qeth_buffer_pool_entry * +static struct qeth_buffer_pool_entry * qeth_get_buffer_pool_entry(struct qeth_card *card) { struct qeth_buffer_pool_entry *entry; @@ -2543,7 +2543,7 @@ qeth_get_buffer_pool_entry(struct qeth_card *card) return NULL; } -static inline void +static void qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) { struct qeth_buffer_pool_entry *pool_entry; @@ -2570,7 +2570,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) buf->state = QETH_QDIO_BUF_EMPTY; } -static inline void +static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf) { @@ -2595,7 +2595,7 @@ qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } -static inline void +static void qeth_queue_input_buffer(struct qeth_card *card, int index) { struct qeth_qdio_q *queue = card->qdio.in_q; @@ -2699,7 +2699,7 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status, card->perf_stats.inbound_start_time; } -static inline int +static int qeth_handle_send_error(struct qeth_card *card, struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err, unsigned int siga_err) @@ -2821,7 +2821,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, * Switched to packing state if the number of used buffers on a queue * reaches a certain limit. */ -static inline void +static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) { if (!queue->do_pack) { @@ -2842,7 +2842,7 @@ qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) * In that case 1 is returned to inform the caller. If no buffer * has to be flushed, zero is returned. */ -static inline int +static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) { struct qeth_qdio_out_buffer *buffer; @@ -2877,7 +2877,7 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) * Checks if there is a packing buffer and prepares it to be flushed. * In that case returns 1, otherwise zero. */ -static inline int +static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) { struct qeth_qdio_out_buffer *buffer; @@ -2894,7 +2894,7 @@ qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) return 0; } -static inline void +static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) { int index; @@ -3594,7 +3594,7 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev, } } -static inline int +static int qeth_send_packet(struct qeth_card *, struct sk_buff *); static int @@ -3759,7 +3759,7 @@ qeth_stop(struct net_device *dev) return 0; } -static inline int +static int qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { int cast_type = RTN_UNSPEC; @@ -3806,7 +3806,7 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) return cast_type; } -static inline int +static int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, int ipv, int cast_type) { @@ -3853,7 +3853,7 @@ qeth_get_ip_version(struct sk_buff *skb) } } -static inline struct qeth_hdr * +static struct qeth_hdr * __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) { #ifdef CONFIG_QETH_VLAN @@ -3882,14 +3882,14 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); } -static inline void +static void __qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) { if (orig_skb != new_skb) dev_kfree_skb_any(new_skb); } -static inline struct sk_buff * +static struct sk_buff * qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr **hdr, int ipv) { @@ -3940,7 +3940,7 @@ qeth_get_qeth_hdr_flags6(int cast_type) return ct | QETH_CAST_UNICAST; } -static inline void +static void qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, struct sk_buff *skb) { @@ -3977,7 +3977,7 @@ qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, } } -static inline void +static void qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, struct sk_buff *skb, int cast_type) { @@ -4068,7 +4068,7 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, } } -static inline void +static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill) { @@ -4112,7 +4112,7 @@ __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, *next_element_to_fill = element; } -static inline int +static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, struct sk_buff *skb) @@ -4171,7 +4171,7 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue, return flush_cnt; } -static inline int +static int qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed, @@ -4222,7 +4222,7 @@ out: return -EBUSY; } -static inline int +static int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed, struct qeth_eddp_context *ctx) @@ -4328,7 +4328,7 @@ out: return rc; } -static inline int +static int qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb, int elems) { @@ -4349,7 +4349,7 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr, } -static inline int +static int qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) { int ipv = 0; @@ -4536,7 +4536,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) } -static inline const char * +static const char * qeth_arp_get_error_cause(int *rc) { switch (*rc) { @@ -4597,7 +4597,7 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries) return rc; } -static inline void +static void qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, struct qeth_arp_query_data *qdata, int entry_size, int uentry_size) @@ -5214,7 +5214,7 @@ qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) spin_unlock_irqrestore(&card->vlanlock, flags); } -static inline void +static void qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, unsigned short vid) { @@ -5625,7 +5625,7 @@ qeth_delete_mc_addresses(struct qeth_card *card) spin_unlock_irqrestore(&card->ip_lock, flags); } -static inline void +static void qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) { struct qeth_ipaddr *ipm; @@ -5711,7 +5711,7 @@ qeth_layer2_add_multicast(struct qeth_card *card) } #ifdef CONFIG_QETH_IPV6 -static inline void +static void qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) { struct qeth_ipaddr *ipm; @@ -6022,7 +6022,7 @@ qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd) return rc; } -static inline void +static void qeth_fill_netmask(u8 *netmask, unsigned int len) { int i,j; @@ -6626,7 +6626,7 @@ qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode) return rc; } -static inline int +static int qeth_setadapter_hstr(struct qeth_card *card) { int rc; @@ -6889,7 +6889,7 @@ qeth_send_simple_setassparms(struct qeth_card *card, return rc; } -static inline int +static int qeth_start_ipa_arp_processing(struct qeth_card *card) { int rc; @@ -7529,7 +7529,7 @@ qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, wake_up(&card->wait_q); } -static inline int +static int qeth_threads_running(struct qeth_card *card, unsigned long threads) { unsigned long flags; @@ -8118,7 +8118,7 @@ qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto, spin_unlock_irqrestore(&card->ip_lock, flags); } -static inline void +static void qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) { int i, j; diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c index 5836737ac58..d518419cd0c 100644 --- a/drivers/s390/net/qeth_sys.c +++ b/drivers/s390/net/qeth_sys.c @@ -328,7 +328,7 @@ qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const c static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, qeth_dev_bufcnt_store); -static inline ssize_t +static ssize_t qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, char *buf) { @@ -368,7 +368,7 @@ qeth_dev_route4_show(struct device *dev, struct device_attribute *attr, char *bu return qeth_dev_route_show(card, &card->options.route4, buf); } -static inline ssize_t +static ssize_t qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, enum qeth_prot_versions prot, const char *buf, size_t count) { @@ -998,7 +998,7 @@ struct device_attribute dev_attr_##_id = { \ .store = _store, \ }; -int +static int qeth_check_layer2(struct qeth_card *card) { if (card->options.layer2) @@ -1100,7 +1100,7 @@ static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644, qeth_dev_ipato_invert4_show, qeth_dev_ipato_invert4_store); -static inline ssize_t +static ssize_t qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1146,7 +1146,7 @@ qeth_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); } -static inline int +static int qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, u8 *addr, int *mask_bits) { @@ -1178,7 +1178,7 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, return 0; } -static inline ssize_t +static ssize_t qeth_dev_ipato_add_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1223,7 +1223,7 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644, qeth_dev_ipato_add4_show, qeth_dev_ipato_add4_store); -static inline ssize_t +static ssize_t qeth_dev_ipato_del_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1361,7 +1361,7 @@ static struct attribute_group qeth_device_ipato_group = { .attrs = (struct attribute **)qeth_ipato_device_attrs, }; -static inline ssize_t +static ssize_t qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1407,7 +1407,7 @@ qeth_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); } -static inline int +static int qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, u8 *addr) { @@ -1418,7 +1418,7 @@ qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, return 0; } -static inline ssize_t +static ssize_t qeth_dev_vipa_add_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1451,7 +1451,7 @@ static QETH_DEVICE_ATTR(vipa_add4, add4, 0644, qeth_dev_vipa_add4_show, qeth_dev_vipa_add4_store); -static inline ssize_t +static ssize_t qeth_dev_vipa_del_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1542,7 +1542,7 @@ static struct attribute_group qeth_device_vipa_group = { .attrs = (struct attribute **)qeth_vipa_device_attrs, }; -static inline ssize_t +static ssize_t qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1588,7 +1588,7 @@ qeth_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); } -static inline int +static int qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, u8 *addr) { @@ -1599,7 +1599,7 @@ qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, return 0; } -static inline ssize_t +static ssize_t qeth_dev_rxip_add_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { @@ -1632,7 +1632,7 @@ static QETH_DEVICE_ATTR(rxip_add4, add4, 0644, qeth_dev_rxip_add4_show, qeth_dev_rxip_add4_store); -static inline ssize_t +static ssize_t qeth_dev_rxip_del_store(const char *buf, size_t count, struct qeth_card *card, enum qeth_prot_versions proto) { diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index e088b5e2871..806bb1a921e 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c @@ -13,22 +13,18 @@ #include <linux/errno.h> #include <linux/workqueue.h> #include <linux/time.h> +#include <linux/device.h> #include <linux/kthread.h> - +#include <asm/etr.h> #include <asm/lowcore.h> - +#include <asm/cio.h> +#include "cio/cio.h" +#include "cio/chsc.h" +#include "cio/css.h" #include "s390mach.h" static struct semaphore m_sem; -extern int css_process_crw(int, int); -extern int chsc_process_crw(void); -extern int chp_process_crw(int, int); -extern void css_reiterate_subchannels(void); - -extern struct workqueue_struct *slow_path_wq; -extern struct work_struct slow_path_work; - static NORET_TYPE void s390_handle_damage(char *msg) { @@ -470,6 +466,19 @@ s390_do_machine_check(struct pt_regs *regs) s390_handle_damage("unable to revalidate registers."); } + if (mci->cd) { + /* Timing facility damage */ + s390_handle_damage("TOD clock damaged"); + } + + if (mci->ed && mci->ec) { + /* External damage */ + if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC)) + etr_sync_check(); + if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) + etr_switch_to_local(); + } + if (mci->se) /* Storage error uncorrected */ s390_handle_damage("received storage error uncorrected " @@ -508,7 +517,7 @@ static int machine_check_init(void) { init_MUTEX_LOCKED(&m_sem); - ctl_clear_bit(14, 25); /* disable external damage MCH */ + ctl_set_bit(14, 25); /* enable external damage MCH */ ctl_set_bit(14, 27); /* enable system recovery MCH */ #ifdef CONFIG_MACHCHK_WARNING ctl_set_bit(14, 24); /* enable warning MCH */ @@ -529,7 +538,11 @@ arch_initcall(machine_check_init); static int __init machine_check_crw_init (void) { - kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); + struct task_struct *task; + + task = kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); + if (IS_ERR(task)) + return PTR_ERR(task); ctl_set_bit(14, 28); /* enable channel report MCH */ return 0; } diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h index 7abb42a09ae..d3ca4281a49 100644 --- a/drivers/s390/s390mach.h +++ b/drivers/s390/s390mach.h @@ -102,4 +102,7 @@ static inline int stcrw(struct crw *pcrw ) return ccode; } +#define ED_ETR_SYNC 12 /* External damage ETR sync check */ +#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ + #endif /* __s390mach */ diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 85093b71f9f..39a88526679 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -47,13 +47,12 @@ static int __init zfcp_module_init(void); static void zfcp_ns_gid_pn_handler(unsigned long); /* miscellaneous */ -static inline int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); -static inline void zfcp_sg_list_free(struct zfcp_sg_list *); -static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, - void __user *, size_t); -static inline int zfcp_sg_list_copy_to_user(void __user *, - struct zfcp_sg_list *, size_t); - +static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); +static void zfcp_sg_list_free(struct zfcp_sg_list *); +static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, + void __user *, size_t); +static int zfcp_sg_list_copy_to_user(void __user *, + struct zfcp_sg_list *, size_t); static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); #define ZFCP_CFDC_IOC_MAGIC 0xDD @@ -605,7 +604,7 @@ zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, * elements of the scatter-gather list. The maximum size of a single element * in the scatter-gather list is PAGE_SIZE. */ -static inline int +static int zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) { struct scatterlist *sg; @@ -652,7 +651,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) * Memory for each element in the scatter-gather list is freed. * Finally sg_list->sg is freed itself and sg_list->count is reset. */ -static inline void +static void zfcp_sg_list_free(struct zfcp_sg_list *sg_list) { struct scatterlist *sg; @@ -697,7 +696,7 @@ zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count) * @size: number of bytes to be copied * Return: 0 on success, -EFAULT if copy_from_user fails. */ -static inline int +static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, void __user *user_buffer, size_t size) @@ -735,7 +734,7 @@ zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, * @size: number of bytes to be copied * Return: 0 on success, -EFAULT if copy_to_user fails */ -static inline int +static int zfcp_sg_list_copy_to_user(void __user *user_buffer, struct zfcp_sg_list *sg_list, size_t size) @@ -1799,7 +1798,7 @@ static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = { * @code: reason code * @rc_table: table of reason codes and descriptions */ -static inline const char * +static const char * zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) { const char *descr = "unknown reason code"; @@ -1847,7 +1846,7 @@ zfcp_check_ct_response(struct ct_hdr *rjt) * @rjt_par: reject parameter acc. to FC-PH/FC-FS * @rc_table: table of reason codes and descriptions */ -static inline void +static void zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, const struct zfcp_rc_entry *rc_table) { diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 0aa3b1ac76a..d8191d115c1 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -31,7 +31,7 @@ MODULE_PARM_DESC(dbfsize, #define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER -static inline int +static int zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) { unsigned long long sec; @@ -106,7 +106,7 @@ zfcp_dbf_view_dump(char *out_buf, const char *label, return len; } -static inline int +static int zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, debug_entry_t * entry, char *out_buf) { @@ -130,7 +130,7 @@ zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, return len; } -inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) +void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_adapter *adapter = fsf_req->adapter; struct fsf_qtcb *qtcb = fsf_req->qtcb; @@ -241,7 +241,7 @@ inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); } -inline void +void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, struct fsf_status_read_buffer *status_buffer) { @@ -295,7 +295,7 @@ zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); } -inline void +void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, unsigned int qdio_error, unsigned int siga_error, int sbal_index, int sbal_count) @@ -316,7 +316,7 @@ zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); } -static inline int +static int zfcp_hba_dbf_view_response(char *out_buf, struct zfcp_hba_dbf_record_response *rec) { @@ -403,7 +403,7 @@ zfcp_hba_dbf_view_response(char *out_buf, return len; } -static inline int +static int zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) { int len = 0; @@ -424,7 +424,7 @@ zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) return len; } -static inline int +static int zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) { int len = 0; @@ -469,7 +469,7 @@ zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view, return len; } -struct debug_view zfcp_hba_dbf_view = { +static struct debug_view zfcp_hba_dbf_view = { "structured", NULL, &zfcp_dbf_view_header, @@ -478,7 +478,7 @@ struct debug_view zfcp_hba_dbf_view = { NULL }; -inline void +void _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id, void *buffer, int buflen) { @@ -519,7 +519,7 @@ _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); } -inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) +void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; struct zfcp_port *port = ct->port; @@ -531,7 +531,7 @@ inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) ct->req->length); } -inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) +void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; struct zfcp_port *port = ct->port; @@ -543,7 +543,7 @@ inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) ct->resp->length); } -static inline void +static void _zfcp_san_dbf_event_common_els(const char *tag, int level, struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id, u8 ls_code, void *buffer, int buflen) @@ -585,7 +585,7 @@ _zfcp_san_dbf_event_common_els(const char *tag, int level, spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); } -inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) +void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; @@ -597,7 +597,7 @@ inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) els->req->length); } -inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) +void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; @@ -608,7 +608,7 @@ inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) els->resp->length); } -inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) +void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) { struct zfcp_adapter *adapter = fsf_req->adapter; struct fsf_status_read_buffer *status_buffer = @@ -693,7 +693,7 @@ zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view, return len; } -struct debug_view zfcp_san_dbf_view = { +static struct debug_view zfcp_san_dbf_view = { "structured", NULL, &zfcp_dbf_view_header, @@ -702,7 +702,7 @@ struct debug_view zfcp_san_dbf_view = { NULL }; -static inline void +static void _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, struct zfcp_adapter *adapter, struct scsi_cmnd *scsi_cmnd, @@ -786,7 +786,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); } -inline void +void zfcp_scsi_dbf_event_result(const char *tag, int level, struct zfcp_adapter *adapter, struct scsi_cmnd *scsi_cmnd, @@ -796,7 +796,7 @@ zfcp_scsi_dbf_event_result(const char *tag, int level, adapter, scsi_cmnd, fsf_req, 0); } -inline void +void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, struct scsi_cmnd *scsi_cmnd, struct zfcp_fsf_req *new_fsf_req, @@ -806,7 +806,7 @@ zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, adapter, scsi_cmnd, new_fsf_req, old_req_id); } -inline void +void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, struct scsi_cmnd *scsi_cmnd) { @@ -884,7 +884,7 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view, return len; } -struct debug_view zfcp_scsi_dbf_view = { +static struct debug_view zfcp_scsi_dbf_view = { "structured", NULL, &zfcp_dbf_view_header, diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index c88babce9bc..88642dec080 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -200,7 +200,7 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout) * returns: 0 - initiated action successfully * <0 - failed to initiate action */ -int +static int zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) { int retval; @@ -295,7 +295,7 @@ zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask) * zfcp_erp_adisc - send ADISC ELS command * @port: port structure */ -int +static int zfcp_erp_adisc(struct zfcp_port *port) { struct zfcp_adapter *adapter = port->adapter; @@ -380,7 +380,7 @@ zfcp_erp_adisc(struct zfcp_port *port) * * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. */ -void +static void zfcp_erp_adisc_handler(unsigned long data) { struct zfcp_send_els *send_els; @@ -3141,7 +3141,6 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: if (result != ZFCP_ERP_SUCCEEDED) { - struct zfcp_port *port; list_for_each_entry(port, &adapter->port_list_head, list) if (port->rport && !atomic_test_mask(ZFCP_STATUS_PORT_WKA, diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index b8794d77285..cda0cc095ad 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -119,8 +119,8 @@ extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); -extern void set_host_byte(u32 *, char); -extern void set_driver_byte(u32 *, char); +extern void set_host_byte(int *, char); +extern void set_driver_byte(int *, char); extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 067f1519eb0..4b3ae3f22e7 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -4563,7 +4563,7 @@ zfcp_fsf_req_sbal_check(unsigned long *flags, /* * set qtcb pointer in fsf_req and initialize QTCB */ -static inline void +static void zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) { if (likely(fsf_req->qtcb != NULL)) { diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index dbd9f48e863..1e12a78e8ed 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -21,22 +21,22 @@ #include "zfcp_ext.h" -static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); +static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get (struct zfcp_qdio_queue *, int, int); static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp (struct zfcp_fsf_req *, int, int); -static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain +static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain (struct zfcp_fsf_req *, unsigned long); -static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next +static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next (struct zfcp_fsf_req *, unsigned long); -static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); +static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); -static inline void zfcp_qdio_sbale_fill +static void zfcp_qdio_sbale_fill (struct zfcp_fsf_req *, unsigned long, void *, int); -static inline int zfcp_qdio_sbals_from_segment +static int zfcp_qdio_sbals_from_segment (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); -static inline int zfcp_qdio_sbals_from_buffer +static int zfcp_qdio_sbals_from_buffer (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); static qdio_handler_t zfcp_qdio_request_handler; @@ -201,7 +201,7 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter) * returns: error flag * */ -static inline int +static int zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, unsigned int qdio_error, unsigned int siga_error, int first_element, int elements_processed) @@ -462,7 +462,7 @@ zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale) * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for * a struct zfcp_fsf_req */ -inline volatile struct qdio_buffer_element * +volatile struct qdio_buffer_element * zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) { return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, @@ -484,7 +484,7 @@ zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) * zfcp_qdio_sbale_curr - return current SBALE on request_queue for * a struct zfcp_fsf_req */ -inline volatile struct qdio_buffer_element * +volatile struct qdio_buffer_element * zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) { return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, @@ -499,7 +499,7 @@ zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) * * Note: We can assume at least one free SBAL in the request_queue when called. */ -static inline void +static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) { int count = atomic_read(&fsf_req->adapter->request_queue.free_count); @@ -517,7 +517,7 @@ zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) * * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. */ -static inline volatile struct qdio_buffer_element * +static volatile struct qdio_buffer_element * zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) { volatile struct qdio_buffer_element *sbale; @@ -554,7 +554,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) /** * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed */ -static inline volatile struct qdio_buffer_element * +static volatile struct qdio_buffer_element * zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) { if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) @@ -569,7 +569,7 @@ zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue * with zero from */ -static inline int +static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) { struct qdio_buffer **buf = queue->buffer; @@ -603,7 +603,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req) * zfcp_qdio_sbale_fill - set address and lenght in current SBALE * on request_queue */ -static inline void +static void zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, void *addr, int length) { @@ -624,7 +624,7 @@ zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, * Alignment and length of the segment determine how many SBALEs are needed * for the memory segment. */ -static inline int +static int zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, void *start_addr, unsigned long total_length) { @@ -659,7 +659,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, * @sg_count: number of elements in scatter-gather list * @max_sbals: upper bound for number of SBALs to be used */ -inline int +int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, struct scatterlist *sg, int sg_count, int max_sbals) { @@ -707,7 +707,7 @@ out: * @length: length of buffer * @max_sbals: upper bound for number of SBALs to be used */ -static inline int +static int zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, void *buffer, unsigned long length, int max_sbals) { @@ -728,7 +728,7 @@ zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, * @scsi_cmnd: either scatter-gather list or buffer contained herein is used * to fill SBALs */ -inline int +int zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) { diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 452d96f92a1..99db02062c3 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -90,7 +90,7 @@ zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) return fcp_sns_info_ptr; } -fcp_dl_t * +static fcp_dl_t * zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) { int additional_length = fcp_cmd->add_fcp_cdb_length << 2; @@ -124,19 +124,19 @@ zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl) * regarding the specified byte */ static inline void -set_byte(u32 * result, char status, char pos) +set_byte(int *result, char status, char pos) { *result |= status << (pos * 8); } void -set_host_byte(u32 * result, char status) +set_host_byte(int *result, char status) { set_byte(result, status, 2); } void -set_driver_byte(u32 * result, char status) +set_driver_byte(int *result, char status) { set_byte(result, status, 3); } @@ -280,7 +280,7 @@ out: return retval; } -void +static void zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) { struct completion *wait = (struct completion *) scpnt->SCp.ptr; @@ -324,7 +324,7 @@ zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt, * returns: 0 - success, SCSI command enqueued * !0 - failure */ -int +static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, void (*done) (struct scsi_cmnd *)) { @@ -380,7 +380,7 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id, * will handle late commands. (Usually, the normal completion of late * commands is ignored with respect to the running abort operation.) */ -int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) +static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) { struct Scsi_Host *scsi_host; struct zfcp_adapter *adapter; @@ -445,7 +445,7 @@ int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) return retval; } -int +static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) { int retval; @@ -541,7 +541,7 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, /** * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset */ -int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) +static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { struct zfcp_unit *unit; struct zfcp_adapter *adapter; diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index 1e788e815ce..090743d2f91 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c @@ -9,8 +9,14 @@ #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/init.h> +#include <linux/delay.h> #include <asm/ebcdic.h> +/* Sigh, math-emu. Don't ask. */ +#include <asm/sfp-util.h> +#include <math-emu/soft-fp.h> +#include <math-emu/single.h> + struct sysinfo_1_1_1 { char reserved_0[32]; char manufacturer[16]; @@ -198,7 +204,7 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len) * if the higher order 8 bits are not zero. Printing * a floating point number in the kernel is a no-no, * always print the number as 32 bit unsigned integer. - * The user-space needs to know about the stange + * The user-space needs to know about the strange * encoding of the alternate cpu capability. */ len += sprintf(page + len, "Capability: %u %u\n", @@ -351,3 +357,58 @@ static __init int create_proc_sysinfo(void) __initcall(create_proc_sysinfo); +/* + * CPU capability might have changed. Therefore recalculate loops_per_jiffy. + */ +void s390_adjust_jiffies(void) +{ + struct sysinfo_1_2_2 *info; + const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */ + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + FP_DECL_EX; + unsigned int capability; + + info = (void *) get_zeroed_page(GFP_KERNEL); + if (!info) + return; + + if (stsi(info, 1, 2, 2) != -ENOSYS) { + /* + * Major sigh. The cpu capability encoding is "special". + * If the first 9 bits of info->capability are 0 then it + * is a 32 bit unsigned integer in the range 0 .. 2^23. + * If the first 9 bits are != 0 then it is a 32 bit float. + * In addition a lower value indicates a proportionally + * higher cpu capacity. Bogomips are the other way round. + * To get to a halfway suitable number we divide 1e7 + * by the cpu capability number. Yes, that means a floating + * point division .. math-emu here we come :-) + */ + FP_UNPACK_SP(SA, &fmil); + if ((info->capability >> 23) == 0) + FP_FROM_INT_S(SB, info->capability, 32, int); + else + FP_UNPACK_SP(SB, &info->capability); + FP_DIV_S(SR, SA, SB); + FP_TO_INT_S(capability, SR, 32, 0); + } else + /* + * Really old machine without stsi block for basic + * cpu information. Report 42.0 bogomips. + */ + capability = 42; + loops_per_jiffy = capability * (500000/HZ); + free_page((unsigned long) info); +} + +/* + * calibrate the delay loop + */ +void __init calibrate_delay(void) +{ + s390_adjust_jiffies(); + /* Print the good old Bogomips line .. */ + printk(KERN_DEBUG "Calibrating delay loop (skipped)... " + "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ), + (loops_per_jiffy/(5000/HZ)) % 100); +} |