2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls
5 * Copyright IBM Corp. 1999,2008
6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com)
9 * Martin Schwidefsky (schwidefsky@de.ibm.com)
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/device.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/interrupt.h>
19 #include <asm/delay.h>
21 #include <asm/irq_regs.h>
22 #include <asm/setup.h>
23 #include <asm/reset.h>
25 #include <asm/chpid.h>
35 #include "blacklist.h"
36 #include "cio_debug.h"
38 #include "../s390mach.h"
40 debug_info_t *cio_debug_msg_id;
41 debug_info_t *cio_debug_trace_id;
42 debug_info_t *cio_debug_crw_id;
45 * Function: cio_debug_init
46 * Initializes three debug logs for common I/O:
47 * - cio_msg logs generic cio messages
48 * - cio_trace logs the calling of different functions
49 * - cio_crw logs machine check related cio messages
51 static int __init cio_debug_init(void)
53 cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
54 if (!cio_debug_msg_id)
56 debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
57 debug_set_level(cio_debug_msg_id, 2);
58 cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
59 if (!cio_debug_trace_id)
61 debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
62 debug_set_level(cio_debug_trace_id, 2);
63 cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
64 if (!cio_debug_crw_id)
66 debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
67 debug_set_level(cio_debug_crw_id, 4);
72 debug_unregister(cio_debug_msg_id);
73 if (cio_debug_trace_id)
74 debug_unregister(cio_debug_trace_id);
76 debug_unregister(cio_debug_crw_id);
77 printk(KERN_WARNING"cio: could not initialize debugging\n");
81 arch_initcall (cio_debug_init);
84 cio_set_options (struct subchannel *sch, int flags)
86 sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
87 sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
88 sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
92 /* FIXME: who wants to use this? */
94 cio_get_options (struct subchannel *sch)
99 if (sch->options.suspend)
100 flags |= DOIO_ALLOW_SUSPEND;
101 if (sch->options.prefetch)
102 flags |= DOIO_DENY_PREFETCH;
103 if (sch->options.inter)
104 flags |= DOIO_SUPPRESS_INTER;
109 * Use tpi to get a pending interrupt, call the interrupt handler and
110 * return a pointer to the subchannel structure.
115 struct tpi_info *tpi_info;
116 struct subchannel *sch;
119 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
122 irb = (struct irb *) __LC_IRB;
123 /* Store interrupt response block to lowcore. */
124 if (tsch (tpi_info->schid, irb) != 0)
125 /* Not status pending or not operational. */
127 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
132 spin_lock(sch->lock);
133 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
134 if (sch->driver && sch->driver->irq)
135 sch->driver->irq(sch);
136 spin_unlock(sch->lock);
143 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
152 stsch (sch->schid, &sch->schib);
154 CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
155 "subchannel 0.%x.%04x!\n", sch->schid.ssid,
157 sprintf(dbf_text, "no%s", sch->dev.bus_id);
158 CIO_TRACE_EVENT(0, dbf_text);
159 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
161 return (sch->lpm ? -EACCES : -ENODEV);
165 cio_start_key (struct subchannel *sch, /* subchannel structure */
166 struct ccw1 * cpa, /* logical channel prog addr */
167 __u8 lpm, /* logical path mask */
168 __u8 key) /* storage key */
174 CIO_TRACE_EVENT(4, "stIO");
175 CIO_TRACE_EVENT(4, sch->dev.bus_id);
177 orb = &to_io_private(sch)->orb;
178 /* sch is always under 2G. */
179 orb->cmd.intparm = (u32)(addr_t)sch;
182 orb->cmd.pfch = sch->options.prefetch == 0;
183 orb->cmd.spnd = sch->options.suspend;
184 orb->cmd.ssic = sch->options.suspend && sch->options.inter;
185 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
188 * for 64 bit we always support 64 bit IDAWs with 4k page size only
193 orb->cmd.key = key >> 4;
194 /* issue "Start Subchannel" */
195 orb->cmd.cpa = (__u32) __pa(cpa);
196 ccode = ssch(sch->schid, orb);
198 /* process condition code */
199 sprintf(dbf_txt, "ccode:%d", ccode);
200 CIO_TRACE_EVENT(4, dbf_txt);
205 * initialize device status information
207 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
209 case 1: /* status pending */
212 default: /* device/path not operational */
213 return cio_start_handle_notoper(sch, lpm);
218 cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
220 return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
224 * resume suspended I/O operation
227 cio_resume (struct subchannel *sch)
232 CIO_TRACE_EVENT (4, "resIO");
233 CIO_TRACE_EVENT (4, sch->dev.bus_id);
235 ccode = rsch (sch->schid);
237 sprintf (dbf_txt, "ccode:%d", ccode);
238 CIO_TRACE_EVENT (4, dbf_txt);
242 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
250 * useless to wait for request completion
251 * as device is no longer operational !
261 cio_halt(struct subchannel *sch)
269 CIO_TRACE_EVENT (2, "haltIO");
270 CIO_TRACE_EVENT (2, sch->dev.bus_id);
273 * Issue "Halt subchannel" and process condition code
275 ccode = hsch (sch->schid);
277 sprintf (dbf_txt, "ccode:%d", ccode);
278 CIO_TRACE_EVENT (2, dbf_txt);
282 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
284 case 1: /* status pending */
287 default: /* device not operational */
293 * Clear I/O operation
296 cio_clear(struct subchannel *sch)
304 CIO_TRACE_EVENT (2, "clearIO");
305 CIO_TRACE_EVENT (2, sch->dev.bus_id);
308 * Issue "Clear subchannel" and process condition code
310 ccode = csch (sch->schid);
312 sprintf (dbf_txt, "ccode:%d", ccode);
313 CIO_TRACE_EVENT (2, dbf_txt);
317 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
319 default: /* device not operational */
325 * Function: cio_cancel
326 * Issues a "Cancel Subchannel" on the specified subchannel
327 * Note: We don't need any fancy intparms and flags here
328 * since xsch is executed synchronously.
329 * Only for common I/O internal use as for now.
332 cio_cancel (struct subchannel *sch)
340 CIO_TRACE_EVENT (2, "cancelIO");
341 CIO_TRACE_EVENT (2, sch->dev.bus_id);
343 ccode = xsch (sch->schid);
345 sprintf (dbf_txt, "ccode:%d", ccode);
346 CIO_TRACE_EVENT (2, dbf_txt);
349 case 0: /* success */
350 /* Update information in scsw. */
351 stsch (sch->schid, &sch->schib);
353 case 1: /* status pending */
355 case 2: /* not applicable */
357 default: /* not oper */
363 * Function: cio_modify
364 * Issues a "Modify Subchannel" on the specified subchannel
367 cio_modify (struct subchannel *sch)
369 int ccode, retry, ret;
372 for (retry = 0; retry < 5; retry++) {
373 ccode = msch_err (sch->schid, &sch->schib);
374 if (ccode < 0) /* -EIO if msch gets a program check. */
377 case 0: /* successfull */
379 case 1: /* status pending */
382 udelay (100); /* allow for recovery */
385 case 3: /* not operational */
393 * cio_enable_subchannel - enable a subchannel.
394 * @sch: subchannel to be enabled
395 * @intparm: interruption parameter to set
397 int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
404 CIO_TRACE_EVENT (2, "ensch");
405 CIO_TRACE_EVENT (2, sch->dev.bus_id);
407 if (sch_is_pseudo_sch(sch))
409 ccode = stsch (sch->schid, &sch->schib);
413 for (retry = 5, ret = 0; retry > 0; retry--) {
414 sch->schib.pmcw.ena = 1;
415 sch->schib.pmcw.isc = sch->isc;
416 sch->schib.pmcw.intparm = intparm;
417 ret = cio_modify(sch);
422 * Got a program check in cio_modify. Try without
423 * the concurrent sense bit the next time.
425 sch->schib.pmcw.csense = 0;
427 stsch (sch->schid, &sch->schib);
428 if (sch->schib.pmcw.ena)
433 if (tsch(sch->schid, &irb) != 0)
437 sprintf (dbf_txt, "ret:%d", ret);
438 CIO_TRACE_EVENT (2, dbf_txt);
441 EXPORT_SYMBOL_GPL(cio_enable_subchannel);
444 * cio_disable_subchannel - disable a subchannel.
445 * @sch: subchannel to disable
447 int cio_disable_subchannel(struct subchannel *sch)
454 CIO_TRACE_EVENT (2, "dissch");
455 CIO_TRACE_EVENT (2, sch->dev.bus_id);
457 if (sch_is_pseudo_sch(sch))
459 ccode = stsch (sch->schid, &sch->schib);
460 if (ccode == 3) /* Not operational. */
463 if (scsw_actl(&sch->schib.scsw) != 0)
465 * the disable function must not be called while there are
466 * requests pending for completion !
470 for (retry = 5, ret = 0; retry > 0; retry--) {
471 sch->schib.pmcw.ena = 0;
472 ret = cio_modify(sch);
477 * The subchannel is busy or status pending.
478 * We'll disable when the next interrupt was delivered
479 * via the state machine.
483 stsch (sch->schid, &sch->schib);
484 if (!sch->schib.pmcw.ena)
488 sprintf (dbf_txt, "ret:%d", ret);
489 CIO_TRACE_EVENT (2, dbf_txt);
492 EXPORT_SYMBOL_GPL(cio_disable_subchannel);
494 int cio_create_sch_lock(struct subchannel *sch)
496 sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
499 spin_lock_init(sch->lock);
503 static int cio_check_devno_blacklisted(struct subchannel *sch)
505 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
507 * This device must not be known to Linux. So we simply
508 * say that there is no device and return ENODEV.
510 CIO_MSG_EVENT(6, "Blacklisted device detected "
511 "at devno %04X, subchannel set %x\n",
512 sch->schib.pmcw.dev, sch->schid.ssid);
518 static int cio_validate_io_subchannel(struct subchannel *sch)
520 /* Initialization for io subchannels. */
521 if (!css_sch_is_valid(&sch->schib))
524 /* Devno is valid. */
525 return cio_check_devno_blacklisted(sch);
528 static int cio_validate_msg_subchannel(struct subchannel *sch)
530 /* Initialization for message subchannels. */
531 if (!css_sch_is_valid(&sch->schib))
534 /* Devno is valid. */
535 return cio_check_devno_blacklisted(sch);
539 * cio_validate_subchannel - basic validation of subchannel
540 * @sch: subchannel structure to be filled out
541 * @schid: subchannel id
543 * Find out subchannel type and initialize struct subchannel.
546 * -ENXIO for non-defined subchannels
547 * -ENODEV for invalid subchannels or blacklisted devices
548 * -EIO for subchannels in an invalid subchannel set
550 int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
556 sprintf(dbf_txt, "valsch%x", schid.sch_no);
557 CIO_TRACE_EVENT(4, dbf_txt);
559 /* Nuke all fields. */
560 memset(sch, 0, sizeof(struct subchannel));
563 if (cio_is_console(schid)) {
564 sch->lock = cio_get_console_lock();
566 err = cio_create_sch_lock(sch);
570 mutex_init(&sch->reg_mutex);
571 /* Set a name for the subchannel */
572 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
576 * The first subchannel that is not-operational (ccode==3)
577 * indicates that there aren't any more devices available.
578 * If stsch gets an exception, it means the current subchannel set
581 ccode = stsch_err (schid, &sch->schib);
583 err = (ccode == 3) ? -ENXIO : ccode;
586 /* Copy subchannel type from path management control word. */
587 sch->st = sch->schib.pmcw.st;
590 case SUBCHANNEL_TYPE_IO:
591 err = cio_validate_io_subchannel(sch);
593 case SUBCHANNEL_TYPE_MSG:
594 err = cio_validate_msg_subchannel(sch);
602 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
603 sch->schid.ssid, sch->schid.sch_no, sch->st);
606 if (!cio_is_console(schid))
613 * do_IRQ() handles all normal I/O device IRQ's (the special
614 * SMP cross-CPU interrupts have their own specific
619 do_IRQ (struct pt_regs *regs)
621 struct tpi_info *tpi_info;
622 struct subchannel *sch;
624 struct pt_regs *old_regs;
626 old_regs = set_irq_regs(regs);
629 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
630 /* Serve timer interrupts first. */
631 clock_comparator_work();
633 * Get interrupt information from lowcore
635 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
636 irb = (struct irb *) __LC_IRB;
638 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
640 * Non I/O-subchannel thin interrupts are processed differently
642 if (tpi_info->adapter_IO == 1 &&
643 tpi_info->int_type == IO_INTERRUPT_TYPE) {
644 do_adapter_IO(tpi_info->isc);
647 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
649 /* Clear pending interrupt condition. */
650 tsch(tpi_info->schid, irb);
653 spin_lock(sch->lock);
654 /* Store interrupt response block to lowcore. */
655 if (tsch(tpi_info->schid, irb) == 0) {
656 /* Keep subchannel information word up to date. */
657 memcpy (&sch->schib.scsw, &irb->scsw,
659 /* Call interrupt handler if there is one. */
660 if (sch->driver && sch->driver->irq)
661 sch->driver->irq(sch);
663 spin_unlock(sch->lock);
665 * Are more interrupts pending?
666 * If so, the tpi instruction will update the lowcore
667 * to hold the info for the next interrupt.
668 * We don't do this for VM because a tpi drops the cpu
669 * out of the sie which costs more cycles than it saves.
671 } while (!MACHINE_IS_VM && tpi (NULL) != 0);
673 set_irq_regs(old_regs);
676 #ifdef CONFIG_CCW_CONSOLE
677 static struct subchannel console_subchannel;
678 static struct io_subchannel_private console_priv;
679 static int console_subchannel_in_use;
681 void *cio_get_console_priv(void)
683 return &console_priv;
687 * busy wait for the next interrupt on the console
689 void wait_cons_dev(void)
690 __releases(console_subchannel.lock)
691 __acquires(console_subchannel.lock)
693 unsigned long cr6 __attribute__ ((aligned (8)));
694 unsigned long save_cr6 __attribute__ ((aligned (8)));
697 * before entering the spinlock we may already have
698 * processed the interrupt on a different CPU...
700 if (!console_subchannel_in_use)
703 /* disable all but the console isc */
704 __ctl_store (save_cr6, 6, 6);
705 cr6 = 1UL << (31 - CONSOLE_ISC);
706 __ctl_load (cr6, 6, 6);
709 spin_unlock(console_subchannel.lock);
712 spin_lock(console_subchannel.lock);
713 } while (console_subchannel.schib.scsw.cmd.actl != 0);
715 * restore previous isc value
717 __ctl_load (save_cr6, 6, 6);
721 cio_test_for_console(struct subchannel_id schid, void *data)
723 if (stsch_err(schid, &console_subchannel.schib) != 0)
725 if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
726 console_subchannel.schib.pmcw.dnv &&
727 (console_subchannel.schib.pmcw.dev == console_devno)) {
728 console_irq = schid.sch_no;
729 return 1; /* found */
736 cio_get_console_sch_no(void)
738 struct subchannel_id schid;
740 init_subchannel_id(&schid);
741 if (console_irq != -1) {
742 /* VM provided us with the irq number of the console. */
743 schid.sch_no = console_irq;
744 if (stsch(schid, &console_subchannel.schib) != 0 ||
745 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
746 !console_subchannel.schib.pmcw.dnv)
748 console_devno = console_subchannel.schib.pmcw.dev;
749 } else if (console_devno != -1) {
750 /* At least the console device number is known. */
751 for_each_subchannel(cio_test_for_console, NULL);
752 if (console_irq == -1)
755 /* unlike in 2.4, we cannot autoprobe here, since
756 * the channel subsystem is not fully initialized.
757 * With some luck, the HWC console can take over */
758 printk(KERN_WARNING "cio: No ccw console found!\n");
765 cio_probe_console(void)
768 struct subchannel_id schid;
770 if (xchg(&console_subchannel_in_use, 1) != 0)
771 return ERR_PTR(-EBUSY);
772 sch_no = cio_get_console_sch_no();
774 console_subchannel_in_use = 0;
775 return ERR_PTR(-ENODEV);
777 memset(&console_subchannel, 0, sizeof(struct subchannel));
778 init_subchannel_id(&schid);
779 schid.sch_no = sch_no;
780 ret = cio_validate_subchannel(&console_subchannel, schid);
782 console_subchannel_in_use = 0;
783 return ERR_PTR(-ENODEV);
787 * enable console I/O-interrupt subclass
789 isc_register(CONSOLE_ISC);
790 console_subchannel.schib.pmcw.isc = CONSOLE_ISC;
791 console_subchannel.schib.pmcw.intparm =
792 (u32)(addr_t)&console_subchannel;
793 ret = cio_modify(&console_subchannel);
795 isc_unregister(CONSOLE_ISC);
796 console_subchannel_in_use = 0;
799 return &console_subchannel;
803 cio_release_console(void)
805 console_subchannel.schib.pmcw.intparm = 0;
806 cio_modify(&console_subchannel);
807 isc_unregister(CONSOLE_ISC);
808 console_subchannel_in_use = 0;
811 /* Bah... hack to catch console special sausages. */
813 cio_is_console(struct subchannel_id schid)
815 if (!console_subchannel_in_use)
817 return schid_equal(&schid, &console_subchannel.schid);
821 cio_get_console_subchannel(void)
823 if (!console_subchannel_in_use)
825 return &console_subchannel;
830 __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
835 for (retry=0;retry<3;retry++) {
837 cc = msch(schid, schib);
839 return (cc==3?-ENODEV:-EBUSY);
841 if (!schib->pmcw.ena)
844 return -EBUSY; /* uhm... */
847 /* we can't use the normal udelay here, since it enables external interrupts */
849 static void udelay_reset(unsigned long usecs)
851 uint64_t start_cc, end_cc;
853 asm volatile ("STCK %0" : "=m" (start_cc));
856 asm volatile ("STCK %0" : "=m" (end_cc));
857 } while (((end_cc - start_cc)/4096) < usecs);
861 __clear_io_subchannel_easy(struct subchannel_id schid)
867 for (retry=0;retry<20;retry++) {
871 tsch(ti.schid, (struct irb *)__LC_IRB);
872 if (schid_equal(&ti.schid, &schid))
880 static void __clear_chsc_subchannel_easy(void)
882 /* It seems we can only wait for a bit here :/ */
886 static int pgm_check_occured;
888 static void cio_reset_pgm_check_handler(void)
890 pgm_check_occured = 1;
893 static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
897 pgm_check_occured = 0;
898 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
899 rc = stsch(schid, addr);
900 s390_base_pgm_handler_fn = NULL;
902 /* The program check handler could have changed pgm_check_occured. */
905 if (pgm_check_occured)
911 static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
915 if (stsch_reset(schid, &schib))
919 switch(__disable_subchannel_easy(schid, &schib)) {
923 default: /* -EBUSY */
924 switch (schib.pmcw.st) {
925 case SUBCHANNEL_TYPE_IO:
926 if (__clear_io_subchannel_easy(schid))
927 goto out; /* give up... */
929 case SUBCHANNEL_TYPE_CHSC:
930 __clear_chsc_subchannel_easy();
933 /* No default clear strategy */
936 stsch(schid, &schib);
937 __disable_subchannel_easy(schid, &schib);
943 static atomic_t chpid_reset_count;
945 static void s390_reset_chpids_mcck_handler(void)
950 /* Check for pending channel report word. */
951 mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
954 /* Process channel report words. */
955 while (stcrw(&crw) == 0) {
956 /* Check for responses to RCHP. */
957 if (crw.slct && crw.rsc == CRW_RSC_CPATH)
958 atomic_dec(&chpid_reset_count);
962 #define RCHP_TIMEOUT (30 * USEC_PER_SEC)
963 static void css_reset(void)
966 unsigned long long timeout;
969 /* Reset subchannels. */
970 for_each_subchannel(__shutdown_subchannel_easy, NULL);
971 /* Reset channel paths. */
972 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
973 /* Enable channel report machine checks. */
974 __ctl_set_bit(14, 28);
975 /* Temporarily reenable machine checks. */
978 for (i = 0; i <= __MAX_CHPID; i++) {
981 if ((ret == 0) || (ret == 2))
983 * rchp either succeeded, or another rchp is already
984 * in progress. In either case, we'll get a crw.
986 atomic_inc(&chpid_reset_count);
988 /* Wait for machine check for all channel paths. */
989 timeout = get_clock() + (RCHP_TIMEOUT << 12);
990 while (atomic_read(&chpid_reset_count) != 0) {
991 if (get_clock() > timeout)
995 /* Disable machine checks again. */
996 local_mcck_disable();
997 /* Disable channel report machine checks. */
998 __ctl_clear_bit(14, 28);
999 s390_base_mcck_handler_fn = NULL;
1002 static struct reset_call css_reset_call = {
1006 static int __init init_css_reset_call(void)
1008 atomic_set(&chpid_reset_count, 0);
1009 register_reset_call(&css_reset_call);
1013 arch_initcall(init_css_reset_call);
1015 struct sch_match_id {
1016 struct subchannel_id schid;
1017 struct ccw_dev_id devid;
1021 static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
1024 struct sch_match_id *match_id = data;
1026 if (stsch_reset(schid, &schib))
1028 if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
1029 (schib.pmcw.dev == match_id->devid.devno) &&
1030 (schid.ssid == match_id->devid.ssid)) {
1031 match_id->schid = schid;
1038 static int reipl_find_schid(struct ccw_dev_id *devid,
1039 struct subchannel_id *schid)
1041 struct sch_match_id match_id;
1043 match_id.devid = *devid;
1044 match_id.rc = -ENODEV;
1045 for_each_subchannel(__reipl_subchannel_match, &match_id);
1046 if (match_id.rc == 0)
1047 *schid = match_id.schid;
1051 extern void do_reipl_asm(__u32 schid);
1053 /* Make sure all subchannels are quiet before we re-ipl an lpar. */
1054 void reipl_ccw_dev(struct ccw_dev_id *devid)
1056 struct subchannel_id schid;
1058 s390_reset_system();
1059 if (reipl_find_schid(devid, &schid) != 0)
1060 panic("IPL Device not found\n");
1061 do_reipl_asm(*((__u32*)&schid));
1064 int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1066 struct subchannel_id schid;
1069 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
1072 if (stsch(schid, &schib))
1074 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
1076 if (!schib.pmcw.dnv)
1078 iplinfo->devno = schib.pmcw.dev;
1079 iplinfo->is_qdio = schib.pmcw.qf;
1084 * cio_tm_start_key - perform start function
1085 * @sch: subchannel on which to perform the start function
1086 * @tcw: transport-command word to be started
1087 * @lpm: mask of paths to use
1088 * @key: storage key to use for storage access
1090 * Start the tcw on the given subchannel. Return zero on success, non-zero
1093 int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
1096 union orb *orb = &to_io_private(sch)->orb;
1098 memset(orb, 0, sizeof(union orb));
1099 orb->tm.intparm = (u32) (addr_t) sch;
1100 orb->tm.key = key >> 4;
1102 orb->tm.lpm = lpm ? lpm : sch->lpm;
1103 orb->tm.tcw = (u32) (addr_t) tcw;
1104 cc = ssch(sch->schid, orb);
1112 return cio_start_handle_notoper(sch, lpm);
1117 * cio_tm_intrg - perform interrogate function
1118 * @sch - subchannel on which to perform the interrogate function
1120 * If the specified subchannel is running in transport-mode, perform the
1121 * interrogate function. Return zero on success, non-zero otherwie.
1123 int cio_tm_intrg(struct subchannel *sch)
1127 if (!to_io_private(sch)->orb.tm.b)
1129 cc = xsch(sch->schid);