diff options
author | Cornelia Huck <cohuck@de.ibm.com> | 2006-01-06 00:19:22 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-06 08:33:51 -0800 |
commit | f97a56fb768e5fe9cd07c56ca47870136bb5530c (patch) | |
tree | 05108317a0cca7aa04cd68f4fcb7b7d3a295ddfa /drivers/s390/cio/chsc.c | |
parent | a8237fc4108060402d904bea5e1062e22e731969 (diff) |
[PATCH] s390: introduce for_each_subchannel
for_each_subchannel() is an iterator calling a function for every possible
subchannel id until non-zero is returned. Convert the current iterating
functions to it.
Signed-off-by: Cornelia Huck <cohuck@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/s390/cio/chsc.c')
-rw-r--r-- | drivers/s390/cio/chsc.c | 372 |
1 files changed, 193 insertions, 179 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index aff5d149b72..78e082311f4 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -310,9 +310,14 @@ s390_set_chpid_offline( __u8 chpid) queue_work(slow_path_wq, &slow_path_work); } +struct res_acc_data { + struct channel_path *chp; + u32 fla_mask; + u16 fla; +}; + static int -s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, - struct subchannel *sch) +s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) { int found; int chp; @@ -324,8 +329,9 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, * check if chpid is in information updated by ssd */ if (sch->ssd_info.valid && - sch->ssd_info.chpid[chp] == chpid && - (sch->ssd_info.fla[chp] & fla_mask) == fla) { + sch->ssd_info.chpid[chp] == res_data->chp->id && + (sch->ssd_info.fla[chp] & res_data->fla_mask) + == res_data->fla) { found = 1; break; } @@ -345,18 +351,80 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, return 0x80 >> chp; } +static inline int +s390_process_res_acc_new_sch(struct subchannel_id schid) +{ + struct schib schib; + int ret; + /* + * We don't know the device yet, but since a path + * may be available now to the device we'll have + * to do recognition again. + * Since we don't have any idea about which chpid + * that beast may be on we'll have to do a stsch + * on all devices, grr... + */ + if (stsch(schid, &schib)) + /* We're through */ + return need_rescan ? -EAGAIN : -ENXIO; + + /* Put it on the slow path. */ + ret = css_enqueue_subchannel_slow(schid); + if (ret) { + css_clear_subchannel_slow_list(); + need_rescan = 1; + return -EAGAIN; + } + return 0; +} + static int -s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) +__s390_process_res_acc(struct subchannel_id schid, void *data) { + int chp_mask, old_lpm; + struct res_acc_data *res_data; struct subchannel *sch; + + res_data = (struct res_acc_data *)data; + sch = get_subchannel_by_schid(schid); + if (!sch) + /* Check if a subchannel is newly available. */ + return s390_process_res_acc_new_sch(schid); + + spin_lock_irq(&sch->lock); + + chp_mask = s390_process_res_acc_sch(res_data, sch); + + if (chp_mask == 0) { + spin_unlock_irq(&sch->lock); + return 0; + } + old_lpm = sch->lpm; + sch->lpm = ((sch->schib.pmcw.pim & + sch->schib.pmcw.pam & + sch->schib.pmcw.pom) + | chp_mask) & sch->opm; + if (!old_lpm && sch->lpm) + device_trigger_reprobe(sch); + else if (sch->driver && sch->driver->verify) + sch->driver->verify(&sch->dev); + + spin_unlock_irq(&sch->lock); + put_device(&sch->dev); + return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; +} + + +static int +s390_process_res_acc (struct res_acc_data *res_data) +{ int rc; - struct subchannel_id schid; char dbf_txt[15]; - sprintf(dbf_txt, "accpr%x", chpid); + sprintf(dbf_txt, "accpr%x", res_data->chp->id); CIO_TRACE_EVENT( 2, dbf_txt); - if (fla != 0) { - sprintf(dbf_txt, "fla%x", fla); + if (res_data->fla != 0) { + sprintf(dbf_txt, "fla%x", res_data->fla); CIO_TRACE_EVENT( 2, dbf_txt); } @@ -367,71 +435,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) * The more information we have (info), the less scanning * will we have to do. */ - - if (!get_chp_status(chpid)) - return 0; /* no need to do the rest */ - - rc = 0; - init_subchannel_id(&schid); - do { - int chp_mask, old_lpm; - - sch = get_subchannel_by_schid(schid); - if (!sch) { - struct schib schib; - int ret; - /* - * We don't know the device yet, but since a path - * may be available now to the device we'll have - * to do recognition again. - * Since we don't have any idea about which chpid - * that beast may be on we'll have to do a stsch - * on all devices, grr... - */ - if (stsch(schid, &schib)) { - /* We're through */ - if (need_rescan) - rc = -EAGAIN; - break; - } - if (need_rescan) { - rc = -EAGAIN; - continue; - } - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(schid); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - rc = -EAGAIN; - continue; - } - - spin_lock_irq(&sch->lock); - - chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch); - - if (chp_mask == 0) { - - spin_unlock_irq(&sch->lock); - continue; - } - old_lpm = sch->lpm; - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | chp_mask) & sch->opm; - if (!old_lpm && sch->lpm) - device_trigger_reprobe(sch); - else if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); - - spin_unlock_irq(&sch->lock); - put_device(&sch->dev); - if (fla_mask == 0xffff) - break; - } while (schid.sch_no++ < __MAX_SUBCHANNEL); + rc = for_each_subchannel(__s390_process_res_acc, res_data); + if (css_slow_subchannels_exist()) + rc = -EAGAIN; + else if (rc != -EAGAIN) + rc = 0; return rc; } @@ -469,6 +477,7 @@ int chsc_process_crw(void) { int chpid, ret; + struct res_acc_data res_data; struct { struct chsc_header request; u32 reserved1; @@ -503,7 +512,7 @@ chsc_process_crw(void) do { int ccode, status; memset(sei_area, 0, sizeof(*sei_area)); - + memset(&res_data, 0, sizeof(struct res_acc_data)); sei_area->request = (struct chsc_header) { .length = 0x0010, .code = 0x000e, @@ -576,26 +585,23 @@ chsc_process_crw(void) if (status < 0) new_channel_path(sei_area->rsid); else if (!status) - return 0; - if ((sei_area->vf & 0x80) == 0) { - pr_debug("chpid: %x\n", sei_area->rsid); - ret = s390_process_res_acc(sei_area->rsid, - 0, 0); - } else if ((sei_area->vf & 0xc0) == 0x80) { - pr_debug("chpid: %x link addr: %x\n", - sei_area->rsid, sei_area->fla); - ret = s390_process_res_acc(sei_area->rsid, - sei_area->fla, - 0xff00); - } else if ((sei_area->vf & 0xc0) == 0xc0) { - pr_debug("chpid: %x full link addr: %x\n", - sei_area->rsid, sei_area->fla); - ret = s390_process_res_acc(sei_area->rsid, - sei_area->fla, - 0xffff); + break; + res_data.chp = chps[sei_area->rsid]; + pr_debug("chpid: %x", sei_area->rsid); + if ((sei_area->vf & 0xc0) != 0) { + res_data.fla = sei_area->fla; + if ((sei_area->vf & 0xc0) == 0xc0) { + pr_debug(" full link addr: %x", + sei_area->fla); + res_data.fla_mask = 0xffff; + } else { + pr_debug(" link addr: %x", + sei_area->fla); + res_data.fla_mask = 0xff00; + } } - pr_debug("\n"); - + ret = s390_process_res_acc(&res_data); + pr_debug("\n\n"); break; default: /* other stuff */ @@ -607,12 +613,70 @@ chsc_process_crw(void) return ret; } +static inline int +__chp_add_new_sch(struct subchannel_id schid) +{ + struct schib schib; + int ret; + + if (stsch(schid, &schib)) + /* We're through */ + return need_rescan ? -EAGAIN : -ENXIO; + + /* Put it on the slow path. */ + ret = css_enqueue_subchannel_slow(schid); + if (ret) { + css_clear_subchannel_slow_list(); + need_rescan = 1; + return -EAGAIN; + } + return 0; +} + + static int -chp_add(int chpid) +__chp_add(struct subchannel_id schid, void *data) { + int i; + struct channel_path *chp; struct subchannel *sch; - int ret, rc; - struct subchannel_id schid; + + chp = (struct channel_path *)data; + sch = get_subchannel_by_schid(schid); + if (!sch) + /* Check if the subchannel is now available. */ + return __chp_add_new_sch(schid); + spin_lock(&sch->lock); + for (i=0; i<8; i++) + if (sch->schib.pmcw.chpid[i] == chp->id) { + if (stsch(sch->schid, &sch->schib) != 0) { + /* Endgame. */ + spin_unlock(&sch->lock); + return -ENXIO; + } + break; + } + if (i==8) { + spin_unlock(&sch->lock); + return 0; + } + sch->lpm = ((sch->schib.pmcw.pim & + sch->schib.pmcw.pam & + sch->schib.pmcw.pom) + | 0x80 >> i) & sch->opm; + + if (sch->driver && sch->driver->verify) + sch->driver->verify(&sch->dev); + + spin_unlock(&sch->lock); + put_device(&sch->dev); + return 0; +} + +static int +chp_add(int chpid) +{ + int rc; char dbf_txt[15]; if (!get_chp_status(chpid)) @@ -621,60 +685,11 @@ chp_add(int chpid) sprintf(dbf_txt, "cadd%x", chpid); CIO_TRACE_EVENT(2, dbf_txt); - rc = 0; - init_subchannel_id(&schid); - do { - int i; - - sch = get_subchannel_by_schid(schid); - if (!sch) { - struct schib schib; - - if (stsch(schid, &schib)) { - /* We're through */ - if (need_rescan) - rc = -EAGAIN; - break; - } - if (need_rescan) { - rc = -EAGAIN; - continue; - } - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(schid); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - rc = -EAGAIN; - continue; - } - - spin_lock(&sch->lock); - for (i=0; i<8; i++) - if (sch->schib.pmcw.chpid[i] == chpid) { - if (stsch(sch->schid, &sch->schib) != 0) { - /* Endgame. */ - spin_unlock(&sch->lock); - return rc; - } - break; - } - if (i==8) { - spin_unlock(&sch->lock); - return rc; - } - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | 0x80 >> i) & sch->opm; - - if (sch->driver && sch->driver->verify) - sch->driver->verify(&sch->dev); - - spin_unlock(&sch->lock); - put_device(&sch->dev); - } while (schid.sch_no++ < __MAX_SUBCHANNEL); + rc = for_each_subchannel(__chp_add, chps[chpid]); + if (css_slow_subchannels_exist()) + rc = -EAGAIN; + if (rc != -EAGAIN) + rc = 0; return rc; } @@ -786,6 +801,29 @@ s390_subchannel_vary_chpid_on(struct device *dev, void *data) return 0; } +static int +__s390_vary_chpid_on(struct subchannel_id schid, void *data) +{ + struct schib schib; + struct subchannel *sch; + + sch = get_subchannel_by_schid(schid); + if (sch) { + put_device(&sch->dev); + return 0; + } + if (stsch(schid, &schib)) + /* We're through */ + return -ENXIO; + /* Put it on the slow path. */ + if (css_enqueue_subchannel_slow(schid)) { + css_clear_subchannel_slow_list(); + need_rescan = 1; + return -EAGAIN; + } + return 0; +} + /* * Function: s390_vary_chpid * Varies the specified chpid online or offline @@ -794,9 +832,7 @@ static int s390_vary_chpid( __u8 chpid, int on) { char dbf_text[15]; - int status, ret; - struct subchannel_id schid; - struct subchannel *sch; + int status; sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); CIO_TRACE_EVENT( 2, dbf_text); @@ -821,31 +857,9 @@ s390_vary_chpid( __u8 chpid, int on) bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? s390_subchannel_vary_chpid_on : s390_subchannel_vary_chpid_off); - if (!on) - goto out; - /* Scan for new devices on varied on path. */ - init_subchannel_id(&schid); - do { - struct schib schib; - - if (need_rescan) - break; - sch = get_subchannel_by_schid(schid); - if (sch) { - put_device(&sch->dev); - continue; - } - if (stsch(schid, &schib)) - /* We're through */ - break; - /* Put it on the slow path. */ - ret = css_enqueue_subchannel_slow(schid); - if (ret) { - css_clear_subchannel_slow_list(); - need_rescan = 1; - } - } while (schid.sch_no++ < __MAX_SUBCHANNEL); -out: + if (on) + /* Scan for new devices on varied on path. */ + for_each_subchannel(__s390_vary_chpid_on, NULL); if (need_rescan || css_slow_subchannels_exist()) queue_work(slow_path_wq, &slow_path_work); return 0; |