]> err.no Git - linux-2.6/blob - drivers/s390/cio/chsc.c
d99f525eac08534183b77791a199215fb3ae1dac
[linux-2.6] / drivers / s390 / cio / chsc.c
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6  *                            IBM Corporation
7  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
8  *               Cornelia Huck (cornelia.huck@de.ibm.com)
9  *               Arnd Bergmann (arndb@de.ibm.com)
10  */
11
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16
17 #include <asm/cio.h>
18
19 #include "css.h"
20 #include "cio.h"
21 #include "cio_debug.h"
22 #include "ioasm.h"
23 #include "chpid.h"
24 #include "chp.h"
25 #include "chsc.h"
26
27 static void *sei_page;
28
29 /* FIXME: this is _always_ called for every subchannel. shouldn't we
30  *        process more than one at a time? */
31 static int
32 chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
33 {
34         int ccode, j;
35
36         struct {
37                 struct chsc_header request;
38                 u16 reserved1a:10;
39                 u16 ssid:2;
40                 u16 reserved1b:4;
41                 u16 f_sch;        /* first subchannel */
42                 u16 reserved2;
43                 u16 l_sch;        /* last subchannel */
44                 u32 reserved3;
45                 struct chsc_header response;
46                 u32 reserved4;
47                 u8 sch_valid : 1;
48                 u8 dev_valid : 1;
49                 u8 st        : 3; /* subchannel type */
50                 u8 zeroes    : 3;
51                 u8  unit_addr;    /* unit address */
52                 u16 devno;        /* device number */
53                 u8 path_mask;
54                 u8 fla_valid_mask;
55                 u16 sch;          /* subchannel */
56                 u8 chpid[8];      /* chpids 0-7 */
57                 u16 fla[8];       /* full link addresses 0-7 */
58         } __attribute__ ((packed)) *ssd_area;
59
60         ssd_area = page;
61
62         ssd_area->request.length = 0x0010;
63         ssd_area->request.code = 0x0004;
64
65         ssd_area->ssid = sch->schid.ssid;
66         ssd_area->f_sch = sch->schid.sch_no;
67         ssd_area->l_sch = sch->schid.sch_no;
68
69         ccode = chsc(ssd_area);
70         if (ccode > 0) {
71                 pr_debug("chsc returned with ccode = %d\n", ccode);
72                 return (ccode == 3) ? -ENODEV : -EBUSY;
73         }
74
75         switch (ssd_area->response.code) {
76         case 0x0001: /* everything ok */
77                 break;
78         case 0x0002:
79                 CIO_CRW_EVENT(2, "Invalid command!\n");
80                 return -EINVAL;
81         case 0x0003:
82                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
83                 return -EINVAL;
84         case 0x0004:
85                 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
86                 return -EOPNOTSUPP;
87         default:
88                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
89                               ssd_area->response.code);
90                 return -EIO;
91         }
92
93         /*
94          * ssd_area->st stores the type of the detected
95          * subchannel, with the following definitions:
96          *
97          * 0: I/O subchannel:     All fields have meaning
98          * 1: CHSC subchannel:    Only sch_val, st and sch
99          *                        have meaning
100          * 2: Message subchannel: All fields except unit_addr
101          *                        have meaning
102          * 3: ADM subchannel:     Only sch_val, st and sch
103          *                        have meaning
104          *
105          * Other types are currently undefined.
106          */
107         if (ssd_area->st > 3) { /* uhm, that looks strange... */
108                 CIO_CRW_EVENT(0, "Strange subchannel type %d"
109                               " for sch 0.%x.%04x\n", ssd_area->st,
110                               sch->schid.ssid, sch->schid.sch_no);
111                 /*
112                  * There may have been a new subchannel type defined in the
113                  * time since this code was written; since we don't know which
114                  * fields have meaning and what to do with it we just jump out
115                  */
116                 return 0;
117         } else {
118                 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
119                 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
120                               sch->schid.ssid, sch->schid.sch_no,
121                               type[ssd_area->st]);
122
123                 sch->ssd_info.valid = 1;
124                 sch->ssd_info.type = ssd_area->st;
125         }
126
127         if (ssd_area->st == 0 || ssd_area->st == 2) {
128                 for (j = 0; j < 8; j++) {
129                         if (!((0x80 >> j) & ssd_area->path_mask &
130                               ssd_area->fla_valid_mask))
131                                 continue;
132                         sch->ssd_info.chpid[j] = ssd_area->chpid[j];
133                         sch->ssd_info.fla[j]   = ssd_area->fla[j];
134                 }
135         }
136         return 0;
137 }
138
139 int
140 css_get_ssd_info(struct subchannel *sch)
141 {
142         int ret;
143         void *page;
144
145         page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
146         if (!page)
147                 return -ENOMEM;
148         spin_lock_irq(sch->lock);
149         ret = chsc_get_sch_desc_irq(sch, page);
150         if (ret) {
151                 static int cio_chsc_err_msg;
152                 
153                 if (!cio_chsc_err_msg) {
154                         printk(KERN_ERR
155                                "chsc_get_sch_descriptions:"
156                                " Error %d while doing chsc; "
157                                "processing some machine checks may "
158                                "not work\n", ret);
159                         cio_chsc_err_msg = 1;
160                 }
161         }
162         spin_unlock_irq(sch->lock);
163         free_page((unsigned long)page);
164         if (!ret) {
165                 int j, mask;
166                 struct chp_id chpid;
167
168                 chp_id_init(&chpid);
169                 /* Allocate channel path structures, if needed. */
170                 for (j = 0; j < 8; j++) {
171                         mask = 0x80 >> j;
172                         chpid.id = sch->ssd_info.chpid[j];
173                         if ((sch->schib.pmcw.pim & mask) &&
174                             !chp_is_registered(chpid))
175                                 chp_new(chpid);
176                 }
177         }
178         return ret;
179 }
180
181 static int
182 s390_subchannel_remove_chpid(struct device *dev, void *data)
183 {
184         int j;
185         int mask;
186         struct subchannel *sch;
187         struct chp_id *chpid;
188         struct schib schib;
189
190         sch = to_subchannel(dev);
191         chpid = data;
192         for (j = 0; j < 8; j++) {
193                 mask = 0x80 >> j;
194                 if ((sch->schib.pmcw.pim & mask) &&
195                     (sch->schib.pmcw.chpid[j] == chpid->id))
196                         break;
197         }
198         if (j >= 8)
199                 return 0;
200
201         spin_lock_irq(sch->lock);
202
203         stsch(sch->schid, &schib);
204         if (!schib.pmcw.dnv)
205                 goto out_unreg;
206         memcpy(&sch->schib, &schib, sizeof(struct schib));
207         /* Check for single path devices. */
208         if (sch->schib.pmcw.pim == 0x80)
209                 goto out_unreg;
210
211         if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
212             (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
213             (sch->schib.pmcw.lpum == mask)) {
214                 int cc;
215
216                 cc = cio_clear(sch);
217                 if (cc == -ENODEV)
218                         goto out_unreg;
219                 /* Request retry of internal operation. */
220                 device_set_intretry(sch);
221                 /* Call handler. */
222                 if (sch->driver && sch->driver->termination)
223                         sch->driver->termination(&sch->dev);
224                 goto out_unlock;
225         }
226
227         /* trigger path verification. */
228         if (sch->driver && sch->driver->verify)
229                 sch->driver->verify(&sch->dev);
230         else if (sch->lpm == mask)
231                 goto out_unreg;
232 out_unlock:
233         spin_unlock_irq(sch->lock);
234         return 0;
235 out_unreg:
236         spin_unlock_irq(sch->lock);
237         sch->lpm = 0;
238         if (css_enqueue_subchannel_slow(sch->schid)) {
239                 css_clear_subchannel_slow_list();
240                 need_rescan = 1;
241         }
242         return 0;
243 }
244
245 void chsc_chp_offline(struct chp_id chpid)
246 {
247         char dbf_txt[15];
248
249         sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
250         CIO_TRACE_EVENT(2, dbf_txt);
251
252         if (chp_get_status(chpid) <= 0)
253                 return;
254         bus_for_each_dev(&css_bus_type, NULL, &chpid,
255                          s390_subchannel_remove_chpid);
256
257         if (need_rescan || css_slow_subchannels_exist())
258                 queue_work(slow_path_wq, &slow_path_work);
259 }
260
261 struct res_acc_data {
262         struct chp_id chpid;
263         u32 fla_mask;
264         u16 fla;
265 };
266
267 static int s390_process_res_acc_sch(struct res_acc_data *res_data,
268                                     struct subchannel *sch)
269 {
270         int found;
271         int chp;
272         int ccode;
273
274         found = 0;
275         for (chp = 0; chp <= 7; chp++)
276                 /*
277                  * check if chpid is in information updated by ssd
278                  */
279                 if (sch->ssd_info.valid &&
280                     sch->ssd_info.chpid[chp] == res_data->chpid.id &&
281                     (sch->ssd_info.fla[chp] & res_data->fla_mask)
282                     == res_data->fla) {
283                         found = 1;
284                         break;
285                 }
286
287         if (found == 0)
288                 return 0;
289
290         /*
291          * Do a stsch to update our subchannel structure with the
292          * new path information and eventually check for logically
293          * offline chpids.
294          */
295         ccode = stsch(sch->schid, &sch->schib);
296         if (ccode > 0)
297                 return 0;
298
299         return 0x80 >> chp;
300 }
301
302 static int
303 s390_process_res_acc_new_sch(struct subchannel_id schid)
304 {
305         struct schib schib;
306         int ret;
307         /*
308          * We don't know the device yet, but since a path
309          * may be available now to the device we'll have
310          * to do recognition again.
311          * Since we don't have any idea about which chpid
312          * that beast may be on we'll have to do a stsch
313          * on all devices, grr...
314          */
315         if (stsch_err(schid, &schib))
316                 /* We're through */
317                 return need_rescan ? -EAGAIN : -ENXIO;
318
319         /* Put it on the slow path. */
320         ret = css_enqueue_subchannel_slow(schid);
321         if (ret) {
322                 css_clear_subchannel_slow_list();
323                 need_rescan = 1;
324                 return -EAGAIN;
325         }
326         return 0;
327 }
328
329 static int
330 __s390_process_res_acc(struct subchannel_id schid, void *data)
331 {
332         int chp_mask, old_lpm;
333         struct res_acc_data *res_data;
334         struct subchannel *sch;
335
336         res_data = data;
337         sch = get_subchannel_by_schid(schid);
338         if (!sch)
339                 /* Check if a subchannel is newly available. */
340                 return s390_process_res_acc_new_sch(schid);
341
342         spin_lock_irq(sch->lock);
343
344         chp_mask = s390_process_res_acc_sch(res_data, sch);
345
346         if (chp_mask == 0) {
347                 spin_unlock_irq(sch->lock);
348                 put_device(&sch->dev);
349                 return 0;
350         }
351         old_lpm = sch->lpm;
352         sch->lpm = ((sch->schib.pmcw.pim &
353                      sch->schib.pmcw.pam &
354                      sch->schib.pmcw.pom)
355                     | chp_mask) & sch->opm;
356         if (!old_lpm && sch->lpm)
357                 device_trigger_reprobe(sch);
358         else if (sch->driver && sch->driver->verify)
359                 sch->driver->verify(&sch->dev);
360
361         spin_unlock_irq(sch->lock);
362         put_device(&sch->dev);
363         return 0;
364 }
365
366
367 static int
368 s390_process_res_acc (struct res_acc_data *res_data)
369 {
370         int rc;
371         char dbf_txt[15];
372
373         sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
374                 res_data->chpid.id);
375         CIO_TRACE_EVENT( 2, dbf_txt);
376         if (res_data->fla != 0) {
377                 sprintf(dbf_txt, "fla%x", res_data->fla);
378                 CIO_TRACE_EVENT( 2, dbf_txt);
379         }
380
381         /*
382          * I/O resources may have become accessible.
383          * Scan through all subchannels that may be concerned and
384          * do a validation on those.
385          * The more information we have (info), the less scanning
386          * will we have to do.
387          */
388         rc = for_each_subchannel(__s390_process_res_acc, res_data);
389         if (css_slow_subchannels_exist())
390                 rc = -EAGAIN;
391         else if (rc != -EAGAIN)
392                 rc = 0;
393         return rc;
394 }
395
396 static int
397 __get_chpid_from_lir(void *data)
398 {
399         struct lir {
400                 u8  iq;
401                 u8  ic;
402                 u16 sci;
403                 /* incident-node descriptor */
404                 u32 indesc[28];
405                 /* attached-node descriptor */
406                 u32 andesc[28];
407                 /* incident-specific information */
408                 u32 isinfo[28];
409         } __attribute__ ((packed)) *lir;
410
411         lir = data;
412         if (!(lir->iq&0x80))
413                 /* NULL link incident record */
414                 return -EINVAL;
415         if (!(lir->indesc[0]&0xc0000000))
416                 /* node descriptor not valid */
417                 return -EINVAL;
418         if (!(lir->indesc[0]&0x10000000))
419                 /* don't handle device-type nodes - FIXME */
420                 return -EINVAL;
421         /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
422
423         return (u16) (lir->indesc[0]&0x000000ff);
424 }
425
426 struct chsc_sei_area {
427         struct chsc_header request;
428         u32 reserved1;
429         u32 reserved2;
430         u32 reserved3;
431         struct chsc_header response;
432         u32 reserved4;
433         u8  flags;
434         u8  vf;         /* validity flags */
435         u8  rs;         /* reporting source */
436         u8  cc;         /* content code */
437         u16 fla;        /* full link address */
438         u16 rsid;       /* reporting source id */
439         u32 reserved5;
440         u32 reserved6;
441         u8 ccdf[4096 - 16 - 24];        /* content-code dependent field */
442         /* ccdf has to be big enough for a link-incident record */
443 } __attribute__ ((packed));
444
445 static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
446 {
447         struct chp_id chpid;
448         int id;
449
450         CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
451                       sei_area->rs, sei_area->rsid);
452         if (sei_area->rs != 4)
453                 return 0;
454         id = __get_chpid_from_lir(sei_area->ccdf);
455         if (id < 0)
456                 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
457         else {
458                 chp_id_init(&chpid);
459                 chpid.id = id;
460                 chsc_chp_offline(chpid);
461         }
462
463         return 0;
464 }
465
466 static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
467 {
468         struct res_acc_data res_data;
469         struct chp_id chpid;
470         int status;
471         int rc;
472
473         CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
474                       "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
475         if (sei_area->rs != 4)
476                 return 0;
477         chp_id_init(&chpid);
478         chpid.id = sei_area->rsid;
479         /* allocate a new channel path structure, if needed */
480         status = chp_get_status(chpid);
481         if (status < 0)
482                 chp_new(chpid);
483         else if (!status)
484                 return 0;
485         memset(&res_data, 0, sizeof(struct res_acc_data));
486         res_data.chpid = chpid;
487         if ((sei_area->vf & 0xc0) != 0) {
488                 res_data.fla = sei_area->fla;
489                 if ((sei_area->vf & 0xc0) == 0xc0)
490                         /* full link address */
491                         res_data.fla_mask = 0xffff;
492                 else
493                         /* link address */
494                         res_data.fla_mask = 0xff00;
495         }
496         rc = s390_process_res_acc(&res_data);
497
498         return rc;
499 }
500
501 static int chsc_process_sei(struct chsc_sei_area *sei_area)
502 {
503         int rc;
504
505         /* Check if we might have lost some information. */
506         if (sei_area->flags & 0x40)
507                 CIO_CRW_EVENT(2, "chsc: event overflow\n");
508         /* which kind of information was stored? */
509         rc = 0;
510         switch (sei_area->cc) {
511         case 1: /* link incident*/
512                 rc = chsc_process_sei_link_incident(sei_area);
513                 break;
514         case 2: /* i/o resource accessibiliy */
515                 rc = chsc_process_sei_res_acc(sei_area);
516                 break;
517         default: /* other stuff */
518                 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
519                               sei_area->cc);
520                 break;
521         }
522
523         return rc;
524 }
525
526 int chsc_process_crw(void)
527 {
528         struct chsc_sei_area *sei_area;
529         int ret;
530         int rc;
531
532         if (!sei_page)
533                 return 0;
534         /* Access to sei_page is serialized through machine check handler
535          * thread, so no need for locking. */
536         sei_area = sei_page;
537
538         CIO_TRACE_EVENT( 2, "prcss");
539         ret = 0;
540         do {
541                 memset(sei_area, 0, sizeof(*sei_area));
542                 sei_area->request.length = 0x0010;
543                 sei_area->request.code = 0x000e;
544                 if (chsc(sei_area))
545                         break;
546
547                 if (sei_area->response.code == 0x0001) {
548                         CIO_CRW_EVENT(4, "chsc: sei successful\n");
549                         rc = chsc_process_sei(sei_area);
550                         if (rc)
551                                 ret = rc;
552                 } else {
553                         CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
554                                       sei_area->response.code);
555                         ret = 0;
556                         break;
557                 }
558         } while (sei_area->flags & 0x80);
559
560         return ret;
561 }
562
563 static int
564 __chp_add_new_sch(struct subchannel_id schid)
565 {
566         struct schib schib;
567         int ret;
568
569         if (stsch_err(schid, &schib))
570                 /* We're through */
571                 return need_rescan ? -EAGAIN : -ENXIO;
572
573         /* Put it on the slow path. */
574         ret = css_enqueue_subchannel_slow(schid);
575         if (ret) {
576                 css_clear_subchannel_slow_list();
577                 need_rescan = 1;
578                 return -EAGAIN;
579         }
580         return 0;
581 }
582
583
584 static int
585 __chp_add(struct subchannel_id schid, void *data)
586 {
587         int i, mask;
588         struct chp_id *chpid;
589         struct subchannel *sch;
590
591         chpid = data;
592         sch = get_subchannel_by_schid(schid);
593         if (!sch)
594                 /* Check if the subchannel is now available. */
595                 return __chp_add_new_sch(schid);
596         spin_lock_irq(sch->lock);
597         for (i=0; i<8; i++) {
598                 mask = 0x80 >> i;
599                 if ((sch->schib.pmcw.pim & mask) &&
600                     (sch->schib.pmcw.chpid[i] == chpid->id)) {
601                         if (stsch(sch->schid, &sch->schib) != 0) {
602                                 /* Endgame. */
603                                 spin_unlock_irq(sch->lock);
604                                 return -ENXIO;
605                         }
606                         break;
607                 }
608         }
609         if (i==8) {
610                 spin_unlock_irq(sch->lock);
611                 return 0;
612         }
613         sch->lpm = ((sch->schib.pmcw.pim &
614                      sch->schib.pmcw.pam &
615                      sch->schib.pmcw.pom)
616                     | mask) & sch->opm;
617
618         if (sch->driver && sch->driver->verify)
619                 sch->driver->verify(&sch->dev);
620
621         spin_unlock_irq(sch->lock);
622         put_device(&sch->dev);
623         return 0;
624 }
625
626 int chsc_chp_online(struct chp_id chpid)
627 {
628         int rc;
629         char dbf_txt[15];
630
631         sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
632         CIO_TRACE_EVENT(2, dbf_txt);
633
634         if (chp_get_status(chpid) == 0)
635                 return 0;
636         rc = for_each_subchannel(__chp_add, &chpid);
637         if (css_slow_subchannels_exist())
638                 rc = -EAGAIN;
639         if (rc != -EAGAIN)
640                 rc = 0;
641         return rc;
642 }
643
644 static int check_for_io_on_path(struct subchannel *sch, int index)
645 {
646         int cc;
647
648         cc = stsch(sch->schid, &sch->schib);
649         if (cc)
650                 return 0;
651         if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
652                 return 1;
653         return 0;
654 }
655
656 static void terminate_internal_io(struct subchannel *sch)
657 {
658         if (cio_clear(sch)) {
659                 /* Recheck device in case clear failed. */
660                 sch->lpm = 0;
661                 if (device_trigger_verify(sch) != 0) {
662                         if(css_enqueue_subchannel_slow(sch->schid)) {
663                                 css_clear_subchannel_slow_list();
664                                 need_rescan = 1;
665                         }
666                 }
667                 return;
668         }
669         /* Request retry of internal operation. */
670         device_set_intretry(sch);
671         /* Call handler. */
672         if (sch->driver && sch->driver->termination)
673                 sch->driver->termination(&sch->dev);
674 }
675
676 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
677                                          struct chp_id chpid, int on)
678 {
679         int chp, old_lpm;
680         unsigned long flags;
681
682         if (!sch->ssd_info.valid)
683                 return;
684         
685         spin_lock_irqsave(sch->lock, flags);
686         old_lpm = sch->lpm;
687         for (chp = 0; chp < 8; chp++) {
688                 if (sch->ssd_info.chpid[chp] != chpid.id)
689                         continue;
690
691                 if (on) {
692                         sch->opm |= (0x80 >> chp);
693                         sch->lpm |= (0x80 >> chp);
694                         if (!old_lpm)
695                                 device_trigger_reprobe(sch);
696                         else if (sch->driver && sch->driver->verify)
697                                 sch->driver->verify(&sch->dev);
698                         break;
699                 }
700                 sch->opm &= ~(0x80 >> chp);
701                 sch->lpm &= ~(0x80 >> chp);
702                 if (check_for_io_on_path(sch, chp)) {
703                         if (device_is_online(sch))
704                                 /* Path verification is done after killing. */
705                                 device_kill_io(sch);
706                         else
707                                 /* Kill and retry internal I/O. */
708                                 terminate_internal_io(sch);
709                 } else if (!sch->lpm) {
710                         if (device_trigger_verify(sch) != 0) {
711                                 if (css_enqueue_subchannel_slow(sch->schid)) {
712                                         css_clear_subchannel_slow_list();
713                                         need_rescan = 1;
714                                 }
715                         }
716                 } else if (sch->driver && sch->driver->verify)
717                         sch->driver->verify(&sch->dev);
718                 break;
719         }
720         spin_unlock_irqrestore(sch->lock, flags);
721 }
722
723 static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
724 {
725         struct subchannel *sch;
726         struct chp_id *chpid;
727
728         sch = to_subchannel(dev);
729         chpid = data;
730
731         __s390_subchannel_vary_chpid(sch, *chpid, 0);
732         return 0;
733 }
734
735 static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
736 {
737         struct subchannel *sch;
738         struct chp_id *chpid;
739
740         sch = to_subchannel(dev);
741         chpid = data;
742
743         __s390_subchannel_vary_chpid(sch, *chpid, 1);
744         return 0;
745 }
746
747 static int
748 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
749 {
750         struct schib schib;
751         struct subchannel *sch;
752
753         sch = get_subchannel_by_schid(schid);
754         if (sch) {
755                 put_device(&sch->dev);
756                 return 0;
757         }
758         if (stsch_err(schid, &schib))
759                 /* We're through */
760                 return -ENXIO;
761         /* Put it on the slow path. */
762         if (css_enqueue_subchannel_slow(schid)) {
763                 css_clear_subchannel_slow_list();
764                 need_rescan = 1;
765                 return -EAGAIN;
766         }
767         return 0;
768 }
769
770 /**
771  * chsc_chp_vary - propagate channel-path vary operation to subchannels
772  * @chpid: channl-path ID
773  * @on: non-zero for vary online, zero for vary offline
774  */
775 int chsc_chp_vary(struct chp_id chpid, int on)
776 {
777         /*
778          * Redo PathVerification on the devices the chpid connects to
779          */
780
781         bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
782                          s390_subchannel_vary_chpid_on :
783                          s390_subchannel_vary_chpid_off);
784         if (on)
785                 /* Scan for new devices on varied on path. */
786                 for_each_subchannel(__s390_vary_chpid_on, NULL);
787         if (need_rescan || css_slow_subchannels_exist())
788                 queue_work(slow_path_wq, &slow_path_work);
789         return 0;
790 }
791
792 static void
793 chsc_remove_cmg_attr(struct channel_subsystem *css)
794 {
795         int i;
796
797         for (i = 0; i <= __MAX_CHPID; i++) {
798                 if (!css->chps[i])
799                         continue;
800                 chp_remove_cmg_attr(css->chps[i]);
801         }
802 }
803
804 static int
805 chsc_add_cmg_attr(struct channel_subsystem *css)
806 {
807         int i, ret;
808
809         ret = 0;
810         for (i = 0; i <= __MAX_CHPID; i++) {
811                 if (!css->chps[i])
812                         continue;
813                 ret = chp_add_cmg_attr(css->chps[i]);
814                 if (ret)
815                         goto cleanup;
816         }
817         return ret;
818 cleanup:
819         for (--i; i >= 0; i--) {
820                 if (!css->chps[i])
821                         continue;
822                 chp_remove_cmg_attr(css->chps[i]);
823         }
824         return ret;
825 }
826
827 static int
828 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
829 {
830         struct {
831                 struct chsc_header request;
832                 u32 operation_code : 2;
833                 u32 : 30;
834                 u32 key : 4;
835                 u32 : 28;
836                 u32 zeroes1;
837                 u32 cub_addr1;
838                 u32 zeroes2;
839                 u32 cub_addr2;
840                 u32 reserved[13];
841                 struct chsc_header response;
842                 u32 status : 8;
843                 u32 : 4;
844                 u32 fmt : 4;
845                 u32 : 16;
846         } __attribute__ ((packed)) *secm_area;
847         int ret, ccode;
848
849         secm_area = page;
850         secm_area->request.length = 0x0050;
851         secm_area->request.code = 0x0016;
852
853         secm_area->key = PAGE_DEFAULT_KEY;
854         secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
855         secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
856
857         secm_area->operation_code = enable ? 0 : 1;
858
859         ccode = chsc(secm_area);
860         if (ccode > 0)
861                 return (ccode == 3) ? -ENODEV : -EBUSY;
862
863         switch (secm_area->response.code) {
864         case 0x0001: /* Success. */
865                 ret = 0;
866                 break;
867         case 0x0003: /* Invalid block. */
868         case 0x0007: /* Invalid format. */
869         case 0x0008: /* Other invalid block. */
870                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
871                 ret = -EINVAL;
872                 break;
873         case 0x0004: /* Command not provided in model. */
874                 CIO_CRW_EVENT(2, "Model does not provide secm\n");
875                 ret = -EOPNOTSUPP;
876                 break;
877         case 0x0102: /* cub adresses incorrect */
878                 CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
879                 ret = -EINVAL;
880                 break;
881         case 0x0103: /* key error */
882                 CIO_CRW_EVENT(2, "Access key error in secm\n");
883                 ret = -EINVAL;
884                 break;
885         case 0x0105: /* error while starting */
886                 CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
887                 ret = -EIO;
888                 break;
889         default:
890                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
891                               secm_area->response.code);
892                 ret = -EIO;
893         }
894         return ret;
895 }
896
897 int
898 chsc_secm(struct channel_subsystem *css, int enable)
899 {
900         void  *secm_area;
901         int ret;
902
903         secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
904         if (!secm_area)
905                 return -ENOMEM;
906
907         mutex_lock(&css->mutex);
908         if (enable && !css->cm_enabled) {
909                 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
910                 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
911                 if (!css->cub_addr1 || !css->cub_addr2) {
912                         free_page((unsigned long)css->cub_addr1);
913                         free_page((unsigned long)css->cub_addr2);
914                         free_page((unsigned long)secm_area);
915                         mutex_unlock(&css->mutex);
916                         return -ENOMEM;
917                 }
918         }
919         ret = __chsc_do_secm(css, enable, secm_area);
920         if (!ret) {
921                 css->cm_enabled = enable;
922                 if (css->cm_enabled) {
923                         ret = chsc_add_cmg_attr(css);
924                         if (ret) {
925                                 memset(secm_area, 0, PAGE_SIZE);
926                                 __chsc_do_secm(css, 0, secm_area);
927                                 css->cm_enabled = 0;
928                         }
929                 } else
930                         chsc_remove_cmg_attr(css);
931         }
932         if (enable && !css->cm_enabled) {
933                 free_page((unsigned long)css->cub_addr1);
934                 free_page((unsigned long)css->cub_addr2);
935         }
936         mutex_unlock(&css->mutex);
937         free_page((unsigned long)secm_area);
938         return ret;
939 }
940
941 int chsc_determine_channel_path_description(struct chp_id chpid,
942                                             struct channel_path_desc *desc)
943 {
944         int ccode, ret;
945
946         struct {
947                 struct chsc_header request;
948                 u32 : 24;
949                 u32 first_chpid : 8;
950                 u32 : 24;
951                 u32 last_chpid : 8;
952                 u32 zeroes1;
953                 struct chsc_header response;
954                 u32 zeroes2;
955                 struct channel_path_desc desc;
956         } __attribute__ ((packed)) *scpd_area;
957
958         scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
959         if (!scpd_area)
960                 return -ENOMEM;
961
962         scpd_area->request.length = 0x0010;
963         scpd_area->request.code = 0x0002;
964
965         scpd_area->first_chpid = chpid.id;
966         scpd_area->last_chpid = chpid.id;
967
968         ccode = chsc(scpd_area);
969         if (ccode > 0) {
970                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
971                 goto out;
972         }
973
974         switch (scpd_area->response.code) {
975         case 0x0001: /* Success. */
976                 memcpy(desc, &scpd_area->desc,
977                        sizeof(struct channel_path_desc));
978                 ret = 0;
979                 break;
980         case 0x0003: /* Invalid block. */
981         case 0x0007: /* Invalid format. */
982         case 0x0008: /* Other invalid block. */
983                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
984                 ret = -EINVAL;
985                 break;
986         case 0x0004: /* Command not provided in model. */
987                 CIO_CRW_EVENT(2, "Model does not provide scpd\n");
988                 ret = -EOPNOTSUPP;
989                 break;
990         default:
991                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
992                               scpd_area->response.code);
993                 ret = -EIO;
994         }
995 out:
996         free_page((unsigned long)scpd_area);
997         return ret;
998 }
999
1000 static void
1001 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1002                           struct cmg_chars *chars)
1003 {
1004         switch (chp->cmg) {
1005         case 2:
1006         case 3:
1007                 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
1008                                          GFP_KERNEL);
1009                 if (chp->cmg_chars) {
1010                         int i, mask;
1011                         struct cmg_chars *cmg_chars;
1012
1013                         cmg_chars = chp->cmg_chars;
1014                         for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
1015                                 mask = 0x80 >> (i + 3);
1016                                 if (cmcv & mask)
1017                                         cmg_chars->values[i] = chars->values[i];
1018                                 else
1019                                         cmg_chars->values[i] = 0;
1020                         }
1021                 }
1022                 break;
1023         default:
1024                 /* No cmg-dependent data. */
1025                 break;
1026         }
1027 }
1028
1029 int chsc_get_channel_measurement_chars(struct channel_path *chp)
1030 {
1031         int ccode, ret;
1032
1033         struct {
1034                 struct chsc_header request;
1035                 u32 : 24;
1036                 u32 first_chpid : 8;
1037                 u32 : 24;
1038                 u32 last_chpid : 8;
1039                 u32 zeroes1;
1040                 struct chsc_header response;
1041                 u32 zeroes2;
1042                 u32 not_valid : 1;
1043                 u32 shared : 1;
1044                 u32 : 22;
1045                 u32 chpid : 8;
1046                 u32 cmcv : 5;
1047                 u32 : 11;
1048                 u32 cmgq : 8;
1049                 u32 cmg : 8;
1050                 u32 zeroes3;
1051                 u32 data[NR_MEASUREMENT_CHARS];
1052         } __attribute__ ((packed)) *scmc_area;
1053
1054         scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1055         if (!scmc_area)
1056                 return -ENOMEM;
1057
1058         scmc_area->request.length = 0x0010;
1059         scmc_area->request.code = 0x0022;
1060
1061         scmc_area->first_chpid = chp->chpid.id;
1062         scmc_area->last_chpid = chp->chpid.id;
1063
1064         ccode = chsc(scmc_area);
1065         if (ccode > 0) {
1066                 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1067                 goto out;
1068         }
1069
1070         switch (scmc_area->response.code) {
1071         case 0x0001: /* Success. */
1072                 if (!scmc_area->not_valid) {
1073                         chp->cmg = scmc_area->cmg;
1074                         chp->shared = scmc_area->shared;
1075                         chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
1076                                                   (struct cmg_chars *)
1077                                                   &scmc_area->data);
1078                 } else {
1079                         chp->cmg = -1;
1080                         chp->shared = -1;
1081                 }
1082                 ret = 0;
1083                 break;
1084         case 0x0003: /* Invalid block. */
1085         case 0x0007: /* Invalid format. */
1086         case 0x0008: /* Invalid bit combination. */
1087                 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
1088                 ret = -EINVAL;
1089                 break;
1090         case 0x0004: /* Command not provided. */
1091                 CIO_CRW_EVENT(2, "Model does not provide scmc\n");
1092                 ret = -EOPNOTSUPP;
1093                 break;
1094         default:
1095                 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
1096                               scmc_area->response.code);
1097                 ret = -EIO;
1098         }
1099 out:
1100         free_page((unsigned long)scmc_area);
1101         return ret;
1102 }
1103
1104 static int __init
1105 chsc_alloc_sei_area(void)
1106 {
1107         sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1108         if (!sei_page)
1109                 printk(KERN_WARNING"Can't allocate page for processing of " \
1110                        "chsc machine checks!\n");
1111         return (sei_page ? 0 : -ENOMEM);
1112 }
1113
1114 int __init
1115 chsc_enable_facility(int operation_code)
1116 {
1117         int ret;
1118         struct {
1119                 struct chsc_header request;
1120                 u8 reserved1:4;
1121                 u8 format:4;
1122                 u8 reserved2;
1123                 u16 operation_code;
1124                 u32 reserved3;
1125                 u32 reserved4;
1126                 u32 operation_data_area[252];
1127                 struct chsc_header response;
1128                 u32 reserved5:4;
1129                 u32 format2:4;
1130                 u32 reserved6:24;
1131         } __attribute__ ((packed)) *sda_area;
1132
1133         sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1134         if (!sda_area)
1135                 return -ENOMEM;
1136         sda_area->request.length = 0x0400;
1137         sda_area->request.code = 0x0031;
1138         sda_area->operation_code = operation_code;
1139
1140         ret = chsc(sda_area);
1141         if (ret > 0) {
1142                 ret = (ret == 3) ? -ENODEV : -EBUSY;
1143                 goto out;
1144         }
1145         switch (sda_area->response.code) {
1146         case 0x0001: /* everything ok */
1147                 ret = 0;
1148                 break;
1149         case 0x0003: /* invalid request block */
1150         case 0x0007:
1151                 ret = -EINVAL;
1152                 break;
1153         case 0x0004: /* command not provided */
1154         case 0x0101: /* facility not provided */
1155                 ret = -EOPNOTSUPP;
1156                 break;
1157         default: /* something went wrong */
1158                 ret = -EIO;
1159         }
1160  out:
1161         free_page((unsigned long)sda_area);
1162         return ret;
1163 }
1164
1165 subsys_initcall(chsc_alloc_sei_area);
1166
1167 struct css_general_char css_general_characteristics;
1168 struct css_chsc_char css_chsc_characteristics;
1169
1170 int __init
1171 chsc_determine_css_characteristics(void)
1172 {
1173         int result;
1174         struct {
1175                 struct chsc_header request;
1176                 u32 reserved1;
1177                 u32 reserved2;
1178                 u32 reserved3;
1179                 struct chsc_header response;
1180                 u32 reserved4;
1181                 u32 general_char[510];
1182                 u32 chsc_char[518];
1183         } __attribute__ ((packed)) *scsc_area;
1184
1185         scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1186         if (!scsc_area) {
1187                 printk(KERN_WARNING"cio: Was not able to determine available" \
1188                        "CHSCs due to no memory.\n");
1189                 return -ENOMEM;
1190         }
1191
1192         scsc_area->request.length = 0x0010;
1193         scsc_area->request.code = 0x0010;
1194
1195         result = chsc(scsc_area);
1196         if (result) {
1197                 printk(KERN_WARNING"cio: Was not able to determine " \
1198                        "available CHSCs, cc=%i.\n", result);
1199                 result = -EIO;
1200                 goto exit;
1201         }
1202
1203         if (scsc_area->response.code != 1) {
1204                 printk(KERN_WARNING"cio: Was not able to determine " \
1205                        "available CHSCs.\n");
1206                 result = -EIO;
1207                 goto exit;
1208         }
1209         memcpy(&css_general_characteristics, scsc_area->general_char,
1210                sizeof(css_general_characteristics));
1211         memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1212                sizeof(css_chsc_characteristics));
1213 exit:
1214         free_page ((unsigned long) scsc_area);
1215         return result;
1216 }
1217
1218 EXPORT_SYMBOL_GPL(css_general_characteristics);
1219 EXPORT_SYMBOL_GPL(css_chsc_characteristics);