2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright IBM Corp. 1999,2008
6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com)
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/device.h>
17 #include <asm/chpid.h>
21 #include "cio_debug.h"
26 static void *sei_page;
28 static int chsc_error_from_response(int response)
47 struct chsc_ssd_area {
48 struct chsc_header request;
52 u16 f_sch; /* first subchannel */
54 u16 l_sch; /* last subchannel */
56 struct chsc_header response;
60 u8 st : 3; /* subchannel type */
62 u8 unit_addr; /* unit address */
63 u16 devno; /* device number */
66 u16 sch; /* subchannel */
67 u8 chpid[8]; /* chpids 0-7 */
68 u16 fla[8]; /* full link addresses 0-7 */
69 } __attribute__ ((packed));
71 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
74 struct chsc_ssd_area *ssd_area;
80 page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
83 ssd_area = (struct chsc_ssd_area *) page;
84 ssd_area->request.length = 0x0010;
85 ssd_area->request.code = 0x0004;
86 ssd_area->ssid = schid.ssid;
87 ssd_area->f_sch = schid.sch_no;
88 ssd_area->l_sch = schid.sch_no;
90 ccode = chsc(ssd_area);
93 ret = (ccode == 3) ? -ENODEV : -EBUSY;
96 ret = chsc_error_from_response(ssd_area->response.code);
98 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
99 schid.ssid, schid.sch_no,
100 ssd_area->response.code);
103 if (!ssd_area->sch_valid) {
109 memset(ssd, 0, sizeof(struct chsc_ssd_info));
110 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
111 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
113 ssd->path_mask = ssd_area->path_mask;
114 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
115 for (i = 0; i < 8; i++) {
117 if (ssd_area->path_mask & mask) {
118 chp_id_init(&ssd->chpid[i]);
119 ssd->chpid[i].id = ssd_area->chpid[i];
121 if (ssd_area->fla_valid_mask & mask)
122 ssd->fla[i] = ssd_area->fla[i];
129 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
131 spin_lock_irq(sch->lock);
132 if (sch->driver && sch->driver->chp_event)
133 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
135 spin_unlock_irq(sch->lock);
140 spin_unlock_irq(sch->lock);
141 css_schedule_eval(sch->schid);
145 void chsc_chp_offline(struct chp_id chpid)
149 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
150 CIO_TRACE_EVENT(2, dbf_txt);
152 if (chp_get_status(chpid) <= 0)
154 /* Wait until previous actions have settled. */
155 css_wait_for_slow_path();
156 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
159 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
163 * We don't know the device yet, but since a path
164 * may be available now to the device we'll have
165 * to do recognition again.
166 * Since we don't have any idea about which chpid
167 * that beast may be on we'll have to do a stsch
168 * on all devices, grr...
170 if (stsch_err(schid, &schib))
174 /* Put it on the slow path. */
175 css_schedule_eval(schid);
179 static int __s390_process_res_acc(struct subchannel *sch, void *data)
181 spin_lock_irq(sch->lock);
182 if (sch->driver && sch->driver->chp_event)
183 sch->driver->chp_event(sch, data, CHP_ONLINE);
184 spin_unlock_irq(sch->lock);
189 static void s390_process_res_acc (struct res_acc_data *res_data)
193 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
195 CIO_TRACE_EVENT( 2, dbf_txt);
196 if (res_data->fla != 0) {
197 sprintf(dbf_txt, "fla%x", res_data->fla);
198 CIO_TRACE_EVENT( 2, dbf_txt);
200 /* Wait until previous actions have settled. */
201 css_wait_for_slow_path();
203 * I/O resources may have become accessible.
204 * Scan through all subchannels that may be concerned and
205 * do a validation on those.
206 * The more information we have (info), the less scanning
207 * will we have to do.
209 for_each_subchannel_staged(__s390_process_res_acc,
210 s390_process_res_acc_new_sch, res_data);
214 __get_chpid_from_lir(void *data)
220 /* incident-node descriptor */
222 /* attached-node descriptor */
224 /* incident-specific information */
226 } __attribute__ ((packed)) *lir;
230 /* NULL link incident record */
232 if (!(lir->indesc[0]&0xc0000000))
233 /* node descriptor not valid */
235 if (!(lir->indesc[0]&0x10000000))
236 /* don't handle device-type nodes - FIXME */
238 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
240 return (u16) (lir->indesc[0]&0x000000ff);
243 struct chsc_sei_area {
244 struct chsc_header request;
248 struct chsc_header response;
251 u8 vf; /* validity flags */
252 u8 rs; /* reporting source */
253 u8 cc; /* content code */
254 u16 fla; /* full link address */
255 u16 rsid; /* reporting source id */
258 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
259 /* ccdf has to be big enough for a link-incident record */
260 } __attribute__ ((packed));
262 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
267 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
268 sei_area->rs, sei_area->rsid);
269 if (sei_area->rs != 4)
271 id = __get_chpid_from_lir(sei_area->ccdf);
273 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
277 chsc_chp_offline(chpid);
281 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
283 struct res_acc_data res_data;
287 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
288 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
289 if (sei_area->rs != 4)
292 chpid.id = sei_area->rsid;
293 /* allocate a new channel path structure, if needed */
294 status = chp_get_status(chpid);
299 memset(&res_data, 0, sizeof(struct res_acc_data));
300 res_data.chpid = chpid;
301 if ((sei_area->vf & 0xc0) != 0) {
302 res_data.fla = sei_area->fla;
303 if ((sei_area->vf & 0xc0) == 0xc0)
304 /* full link address */
305 res_data.fla_mask = 0xffff;
308 res_data.fla_mask = 0xff00;
310 s390_process_res_acc(&res_data);
313 struct chp_config_data {
319 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
321 struct chp_config_data *data;
325 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
326 if (sei_area->rs != 0)
328 data = (struct chp_config_data *) &(sei_area->ccdf);
330 for (num = 0; num <= __MAX_CHPID; num++) {
331 if (!chp_test_bit(data->map, num))
334 printk(KERN_WARNING "cio: processing configure event %d for "
335 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
338 chp_cfg_schedule(chpid, 1);
341 chp_cfg_schedule(chpid, 0);
344 chp_cfg_cancel_deconfigure(chpid);
350 static void chsc_process_sei(struct chsc_sei_area *sei_area)
352 /* Check if we might have lost some information. */
353 if (sei_area->flags & 0x40) {
354 CIO_CRW_EVENT(2, "chsc: event overflow\n");
355 css_schedule_eval_all();
357 /* which kind of information was stored? */
358 switch (sei_area->cc) {
359 case 1: /* link incident*/
360 chsc_process_sei_link_incident(sei_area);
362 case 2: /* i/o resource accessibiliy */
363 chsc_process_sei_res_acc(sei_area);
365 case 8: /* channel-path-configuration notification */
366 chsc_process_sei_chp_config(sei_area);
368 default: /* other stuff */
369 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
375 void chsc_process_crw(void)
377 struct chsc_sei_area *sei_area;
381 /* Access to sei_page is serialized through machine check handler
382 * thread, so no need for locking. */
385 CIO_TRACE_EVENT( 2, "prcss");
387 memset(sei_area, 0, sizeof(*sei_area));
388 sei_area->request.length = 0x0010;
389 sei_area->request.code = 0x000e;
393 if (sei_area->response.code == 0x0001) {
394 CIO_CRW_EVENT(4, "chsc: sei successful\n");
395 chsc_process_sei(sei_area);
397 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
398 sei_area->response.code);
401 } while (sei_area->flags & 0x80);
404 void chsc_chp_online(struct chp_id chpid)
407 struct res_acc_data res_data;
409 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
410 CIO_TRACE_EVENT(2, dbf_txt);
412 if (chp_get_status(chpid) != 0) {
413 memset(&res_data, 0, sizeof(struct res_acc_data));
414 res_data.chpid = chpid;
415 /* Wait until previous actions have settled. */
416 css_wait_for_slow_path();
417 for_each_subchannel_staged(__s390_process_res_acc, NULL,
422 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
423 struct chp_id chpid, int on)
426 struct res_acc_data res_data;
428 memset(&res_data, 0, sizeof(struct res_acc_data));
429 res_data.chpid = chpid;
430 spin_lock_irqsave(sch->lock, flags);
431 if (sch->driver && sch->driver->chp_event)
432 sch->driver->chp_event(sch, &res_data,
433 on ? CHP_VARY_ON : CHP_VARY_OFF);
434 spin_unlock_irqrestore(sch->lock, flags);
437 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
439 struct chp_id *chpid = data;
441 __s390_subchannel_vary_chpid(sch, *chpid, 0);
445 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
447 struct chp_id *chpid = data;
449 __s390_subchannel_vary_chpid(sch, *chpid, 1);
454 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
458 if (stsch_err(schid, &schib))
461 /* Put it on the slow path. */
462 css_schedule_eval(schid);
467 * chsc_chp_vary - propagate channel-path vary operation to subchannels
468 * @chpid: channl-path ID
469 * @on: non-zero for vary online, zero for vary offline
471 int chsc_chp_vary(struct chp_id chpid, int on)
473 /* Wait until previous actions have settled. */
474 css_wait_for_slow_path();
476 * Redo PathVerification on the devices the chpid connects to
480 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
481 __s390_vary_chpid_on, &chpid);
483 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
490 chsc_remove_cmg_attr(struct channel_subsystem *css)
494 for (i = 0; i <= __MAX_CHPID; i++) {
497 chp_remove_cmg_attr(css->chps[i]);
502 chsc_add_cmg_attr(struct channel_subsystem *css)
507 for (i = 0; i <= __MAX_CHPID; i++) {
510 ret = chp_add_cmg_attr(css->chps[i]);
516 for (--i; i >= 0; i--) {
519 chp_remove_cmg_attr(css->chps[i]);
525 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
528 struct chsc_header request;
529 u32 operation_code : 2;
538 struct chsc_header response;
543 } __attribute__ ((packed)) *secm_area;
547 secm_area->request.length = 0x0050;
548 secm_area->request.code = 0x0016;
550 secm_area->key = PAGE_DEFAULT_KEY;
551 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
552 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
554 secm_area->operation_code = enable ? 0 : 1;
556 ccode = chsc(secm_area);
558 return (ccode == 3) ? -ENODEV : -EBUSY;
560 switch (secm_area->response.code) {
565 ret = chsc_error_from_response(secm_area->response.code);
568 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
569 secm_area->response.code);
574 chsc_secm(struct channel_subsystem *css, int enable)
579 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
583 if (enable && !css->cm_enabled) {
584 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
585 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
586 if (!css->cub_addr1 || !css->cub_addr2) {
587 free_page((unsigned long)css->cub_addr1);
588 free_page((unsigned long)css->cub_addr2);
589 free_page((unsigned long)secm_area);
593 ret = __chsc_do_secm(css, enable, secm_area);
595 css->cm_enabled = enable;
596 if (css->cm_enabled) {
597 ret = chsc_add_cmg_attr(css);
599 memset(secm_area, 0, PAGE_SIZE);
600 __chsc_do_secm(css, 0, secm_area);
604 chsc_remove_cmg_attr(css);
606 if (!css->cm_enabled) {
607 free_page((unsigned long)css->cub_addr1);
608 free_page((unsigned long)css->cub_addr2);
610 free_page((unsigned long)secm_area);
614 int chsc_determine_channel_path_description(struct chp_id chpid,
615 struct channel_path_desc *desc)
620 struct chsc_header request;
626 struct chsc_header response;
628 struct channel_path_desc desc;
629 } __attribute__ ((packed)) *scpd_area;
631 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
635 scpd_area->request.length = 0x0010;
636 scpd_area->request.code = 0x0002;
638 scpd_area->first_chpid = chpid.id;
639 scpd_area->last_chpid = chpid.id;
641 ccode = chsc(scpd_area);
643 ret = (ccode == 3) ? -ENODEV : -EBUSY;
647 ret = chsc_error_from_response(scpd_area->response.code);
650 memcpy(desc, &scpd_area->desc,
651 sizeof(struct channel_path_desc));
653 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
654 scpd_area->response.code);
656 free_page((unsigned long)scpd_area);
661 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
662 struct cmg_chars *chars)
667 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
669 if (chp->cmg_chars) {
671 struct cmg_chars *cmg_chars;
673 cmg_chars = chp->cmg_chars;
674 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
675 mask = 0x80 >> (i + 3);
677 cmg_chars->values[i] = chars->values[i];
679 cmg_chars->values[i] = 0;
684 /* No cmg-dependent data. */
689 int chsc_get_channel_measurement_chars(struct channel_path *chp)
694 struct chsc_header request;
700 struct chsc_header response;
711 u32 data[NR_MEASUREMENT_CHARS];
712 } __attribute__ ((packed)) *scmc_area;
714 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
718 scmc_area->request.length = 0x0010;
719 scmc_area->request.code = 0x0022;
721 scmc_area->first_chpid = chp->chpid.id;
722 scmc_area->last_chpid = chp->chpid.id;
724 ccode = chsc(scmc_area);
726 ret = (ccode == 3) ? -ENODEV : -EBUSY;
730 ret = chsc_error_from_response(scmc_area->response.code);
733 if (!scmc_area->not_valid) {
734 chp->cmg = scmc_area->cmg;
735 chp->shared = scmc_area->shared;
736 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
744 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
745 scmc_area->response.code);
748 free_page((unsigned long)scmc_area);
752 int __init chsc_alloc_sei_area(void)
754 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
756 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
757 "chsc machine checks!\n");
758 return (sei_page ? 0 : -ENOMEM);
761 void __init chsc_free_sei_area(void)
767 chsc_enable_facility(int operation_code)
771 struct chsc_header request;
778 u32 operation_data_area[252];
779 struct chsc_header response;
783 } __attribute__ ((packed)) *sda_area;
785 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
788 sda_area->request.length = 0x0400;
789 sda_area->request.code = 0x0031;
790 sda_area->operation_code = operation_code;
792 ret = chsc(sda_area);
794 ret = (ret == 3) ? -ENODEV : -EBUSY;
798 switch (sda_area->response.code) {
803 ret = chsc_error_from_response(sda_area->response.code);
806 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
807 operation_code, sda_area->response.code);
809 free_page((unsigned long)sda_area);
813 struct css_general_char css_general_characteristics;
814 struct css_chsc_char css_chsc_characteristics;
817 chsc_determine_css_characteristics(void)
821 struct chsc_header request;
825 struct chsc_header response;
827 u32 general_char[510];
829 } __attribute__ ((packed)) *scsc_area;
831 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
835 scsc_area->request.length = 0x0010;
836 scsc_area->request.code = 0x0010;
838 result = chsc(scsc_area);
840 result = (result == 3) ? -ENODEV : -EBUSY;
844 result = chsc_error_from_response(scsc_area->response.code);
846 memcpy(&css_general_characteristics, scsc_area->general_char,
847 sizeof(css_general_characteristics));
848 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
849 sizeof(css_chsc_characteristics));
851 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
852 scsc_area->response.code);
854 free_page ((unsigned long) scsc_area);
858 EXPORT_SYMBOL_GPL(css_general_characteristics);
859 EXPORT_SYMBOL_GPL(css_chsc_characteristics);