spin_unlock_irq(&sch->lock);
free_page((unsigned long)page);
if (!ret) {
- int j, chpid;
+ int j, chpid, mask;
/* Allocate channel path structures, if needed. */
for (j = 0; j < 8; j++) {
+ mask = 0x80 >> j;
chpid = sch->ssd_info.chpid[j];
- if (chpid && (get_chp_status(chpid) < 0))
+ if ((sch->schib.pmcw.pim & mask) &&
+ (get_chp_status(chpid) < 0))
new_channel_path(chpid);
}
}
sch = to_subchannel(dev);
chpid = data;
- for (j = 0; j < 8; j++)
- if (sch->schib.pmcw.chpid[j] == chpid->id)
+ for (j = 0; j < 8; j++) {
+ mask = 0x80 >> j;
+ if ((sch->schib.pmcw.pim & mask) &&
+ (sch->schib.pmcw.chpid[j] == chpid->id))
break;
+ }
if (j >= 8)
return 0;
- mask = 0x80 >> j;
spin_lock_irq(&sch->lock);
stsch(sch->schid, &schib);
cc = cio_clear(sch);
if (cc == -ENODEV)
goto out_unreg;
+ /* Request retry of internal operation. */
+ device_set_intretry(sch);
/* Call handler. */
if (sch->driver && sch->driver->termination)
sch->driver->termination(&sch->dev);
/* trigger path verification. */
if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
- else if (sch->vpm == mask)
+ else if (sch->lpm == mask)
goto out_unreg;
out_unlock:
spin_unlock_irq(&sch->lock);
struct res_acc_data *res_data;
struct subchannel *sch;
- res_data = (struct res_acc_data *)data;
+ res_data = data;
sch = get_subchannel_by_schid(schid);
if (!sch)
/* Check if a subchannel is newly available. */
u32 isinfo[28];
} *lir;
- lir = (struct lir*) data;
+ lir = data;
if (!(lir->iq&0x80))
/* NULL link incident record */
return -EINVAL;
static int
__chp_add(struct subchannel_id schid, void *data)
{
- int i;
+ int i, mask;
struct channel_path *chp;
struct subchannel *sch;
- chp = (struct channel_path *)data;
+ chp = data;
sch = get_subchannel_by_schid(schid);
if (!sch)
/* Check if the subchannel is now available. */
return __chp_add_new_sch(schid);
spin_lock_irq(&sch->lock);
- for (i=0; i<8; i++)
- if (sch->schib.pmcw.chpid[i] == chp->id) {
+ for (i=0; i<8; i++) {
+ mask = 0x80 >> i;
+ if ((sch->schib.pmcw.pim & mask) &&
+ (sch->schib.pmcw.chpid[i] == chp->id)) {
if (stsch(sch->schid, &sch->schib) != 0) {
/* Endgame. */
spin_unlock_irq(&sch->lock);
}
break;
}
+ }
if (i==8) {
spin_unlock_irq(&sch->lock);
return 0;
sch->lpm = ((sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom)
- | 0x80 >> i) & sch->opm;
+ | mask) & sch->opm;
if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
return chp_add(chpid);
}
-static inline int
-__check_for_io_and_kill(struct subchannel *sch, int index)
+static inline int check_for_io_on_path(struct subchannel *sch, int index)
{
int cc;
- if (!device_is_online(sch))
- /* cio could be doing I/O. */
- return 0;
cc = stsch(sch->schid, &sch->schib);
if (cc)
return 0;
- if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
- device_set_waiting(sch);
+ if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
return 1;
- }
return 0;
}
+static void terminate_internal_io(struct subchannel *sch)
+{
+ if (cio_clear(sch)) {
+ /* Recheck device in case clear failed. */
+ sch->lpm = 0;
+ if (device_trigger_verify(sch) != 0) {
+ if(css_enqueue_subchannel_slow(sch->schid)) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ }
+ }
+ return;
+ }
+ /* Request retry of internal operation. */
+ device_set_intretry(sch);
+ /* Call handler. */
+ if (sch->driver && sch->driver->termination)
+ sch->driver->termination(&sch->dev);
+}
+
static inline void
__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
{
device_trigger_reprobe(sch);
else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev);
- } else {
- sch->opm &= ~(0x80 >> chp);
- sch->lpm &= ~(0x80 >> chp);
- /*
- * Give running I/O a grace period in which it
- * can successfully terminate, even using the
- * just varied off path. Then kill it.
- */
- if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
+ break;
+ }
+ sch->opm &= ~(0x80 >> chp);
+ sch->lpm &= ~(0x80 >> chp);
+ if (check_for_io_on_path(sch, chp)) {
+ if (device_is_online(sch))
+ /* Path verification is done after killing. */
+ device_kill_io(sch);
+ else
+ /* Kill and retry internal I/O. */
+ terminate_internal_io(sch);
+ } else if (!sch->lpm) {
+ if (device_trigger_verify(sch) != 0) {
if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
}
- } else if (sch->driver && sch->driver->verify)
- sch->driver->verify(&sch->dev);
- }
+ }
+ } else if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
break;
}
spin_unlock_irqrestore(&sch->lock, flags);
return desc;
}
-static int reset_channel_path(struct channel_path *chp)
-{
- int cc;
-
- cc = rchp(chp->id);
- switch (cc) {
- case 0:
- return 0;
- case 2:
- return -EBUSY;
- default:
- return -ENODEV;
- }
-}
-
-static void reset_channel_paths_css(struct channel_subsystem *css)
-{
- int i;
-
- for (i = 0; i <= __MAX_CHPID; i++) {
- if (css->chps[i])
- reset_channel_path(css->chps[i]);
- }
-}
-
-void cio_reset_channel_paths(void)
-{
- int i;
-
- for (i = 0; i <= __MAX_CSSID; i++) {
- if (css[i] && css[i]->valid)
- reset_channel_paths_css(css[i]);
- }
-}
-
static int __init
chsc_alloc_sei_area(void)
{