2 * CTC / ESCON network driver
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
7 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 Peter Tiedemann (ptiedem@de.ibm.com)
9 * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com>
12 * - Principles of Operation (IBM doc#: SA22-7201-06)
13 * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02)
14 * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535)
15 * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00)
16 * - ESCON I/O Interface (IBM doc#: SA22-7202-029
18 * and the source of the original CTC driver by:
19 * Dieter Wellerdiek (wel@de.ibm.com)
20 * Martin Schwidefsky (schwidefsky@de.ibm.com)
21 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
22 * Jochen Röhrig (roehrig@de.ibm.com)
24 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation; either version 2, or (at your option)
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/slab.h>
44 #include <linux/errno.h>
45 #include <linux/types.h>
46 #include <linux/interrupt.h>
47 #include <linux/timer.h>
48 #include <linux/bitops.h>
50 #include <linux/signal.h>
51 #include <linux/string.h>
54 #include <linux/if_arp.h>
55 #include <linux/tcp.h>
56 #include <linux/skbuff.h>
57 #include <linux/ctype.h>
61 #include <asm/ccwdev.h>
62 #include <asm/ccwgroup.h>
63 #include <asm/uaccess.h>
65 #include <asm/idals.h>
73 MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
74 MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
75 MODULE_LICENSE("GPL");
77 * States of the interface statemachine.
81 DEV_STATE_STARTWAIT_RXTX,
82 DEV_STATE_STARTWAIT_RX,
83 DEV_STATE_STARTWAIT_TX,
84 DEV_STATE_STOPWAIT_RXTX,
85 DEV_STATE_STOPWAIT_RX,
86 DEV_STATE_STOPWAIT_TX,
89 * MUST be always the last element!!
94 static const char *dev_state_names[] = {
106 * Events of the interface statemachine.
117 * MUST be always the last element!!
122 static const char *dev_event_names[] = {
133 * Events of the channel statemachine
137 * Events, representing return code of
138 * I/O operations (ccw_device_start, ccw_device_halt et al.)
151 * Events, representing unit-check
155 CH_EVENT_UC_TXTIMEOUT,
156 CH_EVENT_UC_TXPARITY,
158 CH_EVENT_UC_RXPARITY,
163 * Events, representing subchannel-check
168 * Events, representing machine checks
174 * Event, representing normal IRQ
180 * Event, representing timer expiry.
185 * Events, representing commands from upper levels.
191 * MUST be always the last element!!
197 * States of the channel statemachine.
201 * Channel not assigned to any device,
202 * initial state, direction invalid
207 * Channel assigned but not operating
226 * MUST be always the last element!!
231 static int loglevel = CTC_LOGLEVEL_DEFAULT;
234 * Linked list of all detected channels.
236 static struct channel *channels = NULL;
244 static int printed = 0;
249 printk(KERN_INFO "CTC driver initialized\n");
254 * Return type of a detected device.
256 static enum channel_types
257 get_channel_type(struct ccw_device_id *id)
259 enum channel_types type = (enum channel_types) id->driver_info;
261 if (type == channel_type_ficon)
262 type = channel_type_escon;
267 static const char *ch_event_names[] = {
268 "ccw_device success",
272 "ccw_device unknown",
274 "Status ATTN & BUSY",
278 "Unit check remote reset",
279 "Unit check remote system reset",
280 "Unit check TX timeout",
281 "Unit check TX parity",
282 "Unit check Hardware failure",
283 "Unit check RX parity",
285 "Unit check Unknown",
287 "SubChannel check Unknown",
289 "Machine check failure",
290 "Machine check operational",
301 static const char *ch_state_names[] = {
322 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
324 * @param skb The sk_buff to dump.
325 * @param offset Offset relative to skb-data, where to start the dump.
328 ctc_dump_skb(struct sk_buff *skb, int offset)
330 unsigned char *p = skb->data;
332 struct ll_header *header;
335 if (!(loglevel & CTC_LOGLEVEL_DEBUG))
340 header = (struct ll_header *) p;
343 printk(KERN_DEBUG "dump:\n");
344 printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl);
346 printk(KERN_DEBUG "h->length=%d %04x\n", header->length,
348 printk(KERN_DEBUG "h->type=%04x\n", header->type);
349 printk(KERN_DEBUG "h->unused=%04x\n", header->unused);
352 printk(KERN_DEBUG "data: ");
353 for (i = 0; i < bl; i++)
354 printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>");
359 ctc_dump_skb(struct sk_buff *skb, int offset)
365 * Unpack a just received skb and hand it over to
368 * @param ch The channel where this skb has been received.
369 * @param pskb The received skb.
372 ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
374 struct net_device *dev = ch->netdev;
375 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
376 __u16 len = *((__u16 *) pskb->data);
378 DBF_TEXT(trace, 4, __FUNCTION__);
379 skb_put(pskb, 2 + LL_HEADER_LENGTH);
382 pskb->ip_summed = CHECKSUM_UNNECESSARY;
385 struct ll_header *header = (struct ll_header *) pskb->data;
387 skb_pull(pskb, LL_HEADER_LENGTH);
388 if ((ch->protocol == CTC_PROTO_S390) &&
389 (header->type != ETH_P_IP)) {
392 if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
395 * Check packet type only if we stick strictly
396 * to S/390's protocol of OS390. This only
397 * supports IP. Otherwise allow any packet
401 "%s Illegal packet type 0x%04x received, dropping\n",
402 dev->name, header->type);
403 ch->logflags |= LOG_FLAG_ILLEGALPKT;
408 ctc_dump_skb(pskb, -6);
410 privptr->stats.rx_dropped++;
411 privptr->stats.rx_frame_errors++;
414 pskb->protocol = ntohs(header->type);
415 if (header->length <= LL_HEADER_LENGTH) {
417 if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
420 "%s Illegal packet size %d "
421 "received (MTU=%d blocklen=%d), "
422 "dropping\n", dev->name, header->length,
424 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
429 ctc_dump_skb(pskb, -6);
431 privptr->stats.rx_dropped++;
432 privptr->stats.rx_length_errors++;
435 header->length -= LL_HEADER_LENGTH;
436 len -= LL_HEADER_LENGTH;
437 if ((header->length > skb_tailroom(pskb)) ||
438 (header->length > len)) {
440 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
443 "%s Illegal packet size %d "
444 "(beyond the end of received data), "
445 "dropping\n", dev->name, header->length);
446 ch->logflags |= LOG_FLAG_OVERRUN;
451 ctc_dump_skb(pskb, -6);
453 privptr->stats.rx_dropped++;
454 privptr->stats.rx_length_errors++;
457 skb_put(pskb, header->length);
458 pskb->mac.raw = pskb->data;
459 len -= header->length;
460 skb = dev_alloc_skb(pskb->len);
463 if (!(ch->logflags & LOG_FLAG_NOMEM)) {
466 "%s Out of memory in ctc_unpack_skb\n",
468 ch->logflags |= LOG_FLAG_NOMEM;
472 privptr->stats.rx_dropped++;
475 memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
476 skb->mac.raw = skb->data;
477 skb->dev = pskb->dev;
478 skb->protocol = pskb->protocol;
479 pskb->ip_summed = CHECKSUM_UNNECESSARY;
482 * Successful rx; reset logflags
485 dev->last_rx = jiffies;
486 privptr->stats.rx_packets++;
487 privptr->stats.rx_bytes += skb->len;
489 skb_pull(pskb, header->length);
490 if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
492 if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
495 "%s Overrun in ctc_unpack_skb\n",
497 ch->logflags |= LOG_FLAG_OVERRUN;
503 skb_put(pskb, LL_HEADER_LENGTH);
509 * Check return code of a preceeding ccw_device call, halt_IO etc...
511 * @param ch The channel, the error belongs to.
512 * @param return_code The error code to inspect.
515 ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517 DBF_TEXT(trace, 5, __FUNCTION__);
518 switch (return_code) {
520 fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
523 ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg);
524 fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch);
527 ctc_pr_emerg("%s (%s): Invalid device called for IO\n",
529 fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch);
532 ctc_pr_emerg("%s (%s): Status pending... \n",
534 fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch);
537 ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n",
538 ch->id, msg, return_code);
539 fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch);
544 * Check sense of a unit check.
546 * @param ch The channel, the sense code belongs to.
547 * @param sense The sense code to inspect.
550 ccw_unit_check(struct channel *ch, unsigned char sense)
552 DBF_TEXT(trace, 5, __FUNCTION__);
553 if (sense & SNS0_INTERVENTION_REQ) {
555 ctc_pr_debug("%s: Interface disc. or Sel. reset "
556 "(remote)\n", ch->id);
557 fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
559 ctc_pr_debug("%s: System reset (remote)\n", ch->id);
560 fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch);
562 } else if (sense & SNS0_EQUIPMENT_CHECK) {
563 if (sense & SNS0_BUS_OUT_CHECK) {
564 ctc_pr_warn("%s: Hardware malfunction (remote)\n",
566 fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch);
568 ctc_pr_warn("%s: Read-data parity error (remote)\n",
570 fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch);
572 } else if (sense & SNS0_BUS_OUT_CHECK) {
574 ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id);
575 fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch);
577 ctc_pr_warn("%s: Data-transfer parity error\n", ch->id);
578 fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch);
580 } else if (sense & SNS0_CMD_REJECT) {
581 ctc_pr_warn("%s: Command reject\n", ch->id);
582 } else if (sense == 0) {
583 ctc_pr_debug("%s: Unit check ZERO\n", ch->id);
584 fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch);
586 ctc_pr_warn("%s: Unit Check with sense code: %02x\n",
588 fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch);
593 ctc_purge_skb_queue(struct sk_buff_head *q)
597 DBF_TEXT(trace, 5, __FUNCTION__);
599 while ((skb = skb_dequeue(q))) {
600 atomic_dec(&skb->users);
601 dev_kfree_skb_irq(skb);
606 ctc_checkalloc_buffer(struct channel *ch, int warn)
608 DBF_TEXT(trace, 5, __FUNCTION__);
609 if ((ch->trans_skb == NULL) ||
610 (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
611 if (ch->trans_skb != NULL)
612 dev_kfree_skb(ch->trans_skb);
613 clear_normalized_cda(&ch->ccw[1]);
614 ch->trans_skb = __dev_alloc_skb(ch->max_bufsize,
615 GFP_ATOMIC | GFP_DMA);
616 if (ch->trans_skb == NULL) {
619 "%s: Couldn't alloc %s trans_skb\n",
621 (CHANNEL_DIRECTION(ch->flags) == READ) ?
625 ch->ccw[1].count = ch->max_bufsize;
626 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
627 dev_kfree_skb(ch->trans_skb);
628 ch->trans_skb = NULL;
631 "%s: set_normalized_cda for %s "
632 "trans_skb failed, dropping packets\n",
634 (CHANNEL_DIRECTION(ch->flags) == READ) ?
638 ch->ccw[1].count = 0;
639 ch->trans_skb_data = ch->trans_skb->data;
640 ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
646 * Dummy NOP action for statemachines
649 fsm_action_nop(fsm_instance * fi, int event, void *arg)
654 * Actions for channel - statemachines.
655 *****************************************************************************/
658 * Normal data has been send. Free the corresponding
659 * skb (it's in io_queue), reset dev->tbusy and
660 * revert to idle state.
662 * @param fi An instance of a channel statemachine.
663 * @param event The event, just happened.
664 * @param arg Generic pointer, casted from channel * upon call.
667 ch_action_txdone(fsm_instance * fi, int event, void *arg)
669 struct channel *ch = (struct channel *) arg;
670 struct net_device *dev = ch->netdev;
671 struct ctc_priv *privptr = dev->priv;
675 unsigned long duration;
676 struct timespec done_stamp = xtime;
678 DBF_TEXT(trace, 4, __FUNCTION__);
681 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
682 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
683 if (duration > ch->prof.tx_time)
684 ch->prof.tx_time = duration;
686 if (ch->irb->scsw.count != 0)
687 ctc_pr_debug("%s: TX not complete, remaining %d bytes\n",
688 dev->name, ch->irb->scsw.count);
689 fsm_deltimer(&ch->timer);
690 while ((skb = skb_dequeue(&ch->io_queue))) {
691 privptr->stats.tx_packets++;
692 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
694 privptr->stats.tx_bytes += 2;
697 atomic_dec(&skb->users);
698 dev_kfree_skb_irq(skb);
700 spin_lock(&ch->collect_lock);
701 clear_normalized_cda(&ch->ccw[4]);
702 if (ch->collect_len > 0) {
705 if (ctc_checkalloc_buffer(ch, 1)) {
706 spin_unlock(&ch->collect_lock);
709 ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data;
710 ch->trans_skb->len = 0;
711 if (ch->prof.maxmulti < (ch->collect_len + 2))
712 ch->prof.maxmulti = ch->collect_len + 2;
713 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
714 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
715 *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
717 while ((skb = skb_dequeue(&ch->collect_queue))) {
718 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
720 privptr->stats.tx_packets++;
721 privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
722 atomic_dec(&skb->users);
723 dev_kfree_skb_irq(skb);
727 spin_unlock(&ch->collect_lock);
728 ch->ccw[1].count = ch->trans_skb->len;
729 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
730 ch->prof.send_stamp = xtime;
731 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
732 (unsigned long) ch, 0xff, 0);
733 ch->prof.doios_multi++;
735 privptr->stats.tx_dropped += i;
736 privptr->stats.tx_errors += i;
737 fsm_deltimer(&ch->timer);
738 ccw_check_return_code(ch, rc, "chained TX");
741 spin_unlock(&ch->collect_lock);
742 fsm_newstate(fi, CH_STATE_TXIDLE);
748 * Initial data is sent.
749 * Notify device statemachine that we are up and
752 * @param fi An instance of a channel statemachine.
753 * @param event The event, just happened.
754 * @param arg Generic pointer, casted from channel * upon call.
757 ch_action_txidle(fsm_instance * fi, int event, void *arg)
759 struct channel *ch = (struct channel *) arg;
761 DBF_TEXT(trace, 4, __FUNCTION__);
762 fsm_deltimer(&ch->timer);
763 fsm_newstate(fi, CH_STATE_TXIDLE);
764 fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
769 * Got normal data, check for sanity, queue it up, allocate new buffer
770 * trigger bottom half, and initiate next read.
772 * @param fi An instance of a channel statemachine.
773 * @param event The event, just happened.
774 * @param arg Generic pointer, casted from channel * upon call.
777 ch_action_rx(fsm_instance * fi, int event, void *arg)
779 struct channel *ch = (struct channel *) arg;
780 struct net_device *dev = ch->netdev;
781 struct ctc_priv *privptr = dev->priv;
782 int len = ch->max_bufsize - ch->irb->scsw.count;
783 struct sk_buff *skb = ch->trans_skb;
784 __u16 block_len = *((__u16 *) skb->data);
788 DBF_TEXT(trace, 4, __FUNCTION__);
789 fsm_deltimer(&ch->timer);
791 ctc_pr_debug("%s: got packet with length %d < 8\n",
793 privptr->stats.rx_dropped++;
794 privptr->stats.rx_length_errors++;
797 if (len > ch->max_bufsize) {
798 ctc_pr_debug("%s: got packet with length %d > %d\n",
799 dev->name, len, ch->max_bufsize);
800 privptr->stats.rx_dropped++;
801 privptr->stats.rx_length_errors++;
806 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
808 switch (ch->protocol) {
810 case CTC_PROTO_OS390:
811 check_len = block_len + 2;
814 check_len = block_len;
817 if ((len < block_len) || (len > check_len)) {
818 ctc_pr_debug("%s: got block length %d != rx length %d\n",
819 dev->name, block_len, len);
821 ctc_dump_skb(skb, 0);
823 *((__u16 *) skb->data) = len;
824 privptr->stats.rx_dropped++;
825 privptr->stats.rx_length_errors++;
830 *((__u16 *) skb->data) = block_len;
831 ctc_unpack_skb(ch, skb);
834 skb->data = skb->tail = ch->trans_skb_data;
836 if (ctc_checkalloc_buffer(ch, 1))
838 ch->ccw[1].count = ch->max_bufsize;
839 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
841 ccw_check_return_code(ch, rc, "normal RX");
844 static void ch_action_rxidle(fsm_instance * fi, int event, void *arg);
847 * Initialize connection by sending a __u16 of value 0.
849 * @param fi An instance of a channel statemachine.
850 * @param event The event, just happened.
851 * @param arg Generic pointer, casted from channel * upon call.
854 ch_action_firstio(fsm_instance * fi, int event, void *arg)
856 struct channel *ch = (struct channel *) arg;
859 DBF_TEXT(trace, 4, __FUNCTION__);
861 if (fsm_getstate(fi) == CH_STATE_TXIDLE)
862 ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
863 fsm_deltimer(&ch->timer);
864 if (ctc_checkalloc_buffer(ch, 1))
866 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
867 (ch->protocol == CTC_PROTO_OS390)) {
868 /* OS/390 resp. z/OS */
869 if (CHANNEL_DIRECTION(ch->flags) == READ) {
870 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
871 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC,
873 ch_action_rxidle(fi, event, arg);
875 struct net_device *dev = ch->netdev;
876 fsm_newstate(fi, CH_STATE_TXIDLE);
877 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
878 DEV_EVENT_TXUP, dev);
884 * Don´t setup a timer for receiving the initial RX frame
885 * if in compatibility mode, since VM TCP delays the initial
886 * frame until it has some data to send.
888 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
889 (ch->protocol != CTC_PROTO_S390))
890 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
892 *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN;
893 ch->ccw[1].count = 2; /* Transfer only length */
895 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
896 ? CH_STATE_RXINIT : CH_STATE_TXINIT);
897 rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0);
899 fsm_deltimer(&ch->timer);
900 fsm_newstate(fi, CH_STATE_SETUPWAIT);
901 ccw_check_return_code(ch, rc, "init IO");
904 * If in compatibility mode since we don´t setup a timer, we
905 * also signal RX channel up immediately. This enables us
906 * to send packets early which in turn usually triggers some
907 * reply from VM TCP which brings up the RX channel to it´s
910 if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
911 (ch->protocol == CTC_PROTO_S390)) {
912 struct net_device *dev = ch->netdev;
913 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP,
919 * Got initial data, check it. If OK,
920 * notify device statemachine that we are up and
923 * @param fi An instance of a channel statemachine.
924 * @param event The event, just happened.
925 * @param arg Generic pointer, casted from channel * upon call.
928 ch_action_rxidle(fsm_instance * fi, int event, void *arg)
930 struct channel *ch = (struct channel *) arg;
931 struct net_device *dev = ch->netdev;
935 DBF_TEXT(trace, 4, __FUNCTION__);
936 fsm_deltimer(&ch->timer);
937 buflen = *((__u16 *) ch->trans_skb->data);
939 ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen);
941 if (buflen >= CTC_INITIAL_BLOCKLEN) {
942 if (ctc_checkalloc_buffer(ch, 1))
944 ch->ccw[1].count = ch->max_bufsize;
945 fsm_newstate(fi, CH_STATE_RXIDLE);
946 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
947 (unsigned long) ch, 0xff, 0);
949 fsm_newstate(fi, CH_STATE_RXINIT);
950 ccw_check_return_code(ch, rc, "initial RX");
952 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
953 DEV_EVENT_RXUP, dev);
955 ctc_pr_debug("%s: Initial RX count %d not %d\n",
956 dev->name, buflen, CTC_INITIAL_BLOCKLEN);
957 ch_action_firstio(fi, event, arg);
962 * Set channel into extended mode.
964 * @param fi An instance of a channel statemachine.
965 * @param event The event, just happened.
966 * @param arg Generic pointer, casted from channel * upon call.
969 ch_action_setmode(fsm_instance * fi, int event, void *arg)
971 struct channel *ch = (struct channel *) arg;
973 unsigned long saveflags;
975 DBF_TEXT(trace, 4, __FUNCTION__);
976 fsm_deltimer(&ch->timer);
977 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
978 fsm_newstate(fi, CH_STATE_SETUPWAIT);
979 saveflags = 0; /* avoids compiler warning with
980 spin_unlock_irqrestore */
981 if (event == CH_EVENT_TIMER) // only for timer not yet locked
982 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
983 rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0);
984 if (event == CH_EVENT_TIMER)
985 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
987 fsm_deltimer(&ch->timer);
988 fsm_newstate(fi, CH_STATE_STARTWAIT);
989 ccw_check_return_code(ch, rc, "set Mode");
997 * @param fi An instance of a channel statemachine.
998 * @param event The event, just happened.
999 * @param arg Generic pointer, casted from channel * upon call.
1002 ch_action_start(fsm_instance * fi, int event, void *arg)
1004 struct channel *ch = (struct channel *) arg;
1005 unsigned long saveflags;
1007 struct net_device *dev;
1009 DBF_TEXT(trace, 4, __FUNCTION__);
1011 ctc_pr_warn("ch_action_start ch=NULL\n");
1014 if (ch->netdev == NULL) {
1015 ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id);
1021 ctc_pr_debug("%s: %s channel start\n", dev->name,
1022 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1025 if (ch->trans_skb != NULL) {
1026 clear_normalized_cda(&ch->ccw[1]);
1027 dev_kfree_skb(ch->trans_skb);
1028 ch->trans_skb = NULL;
1030 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1031 ch->ccw[1].cmd_code = CCW_CMD_READ;
1032 ch->ccw[1].flags = CCW_FLAG_SLI;
1033 ch->ccw[1].count = 0;
1035 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
1036 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1037 ch->ccw[1].count = 0;
1039 if (ctc_checkalloc_buffer(ch, 0)) {
1041 "%s: Could not allocate %s trans_skb, delaying "
1042 "allocation until first transfer\n",
1044 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1047 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
1048 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1049 ch->ccw[0].count = 0;
1051 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
1052 ch->ccw[2].flags = CCW_FLAG_SLI;
1053 ch->ccw[2].count = 0;
1055 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3);
1057 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
1059 fsm_newstate(fi, CH_STATE_STARTWAIT);
1060 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1061 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1062 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1063 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1066 fsm_deltimer(&ch->timer);
1067 ccw_check_return_code(ch, rc, "initial HaltIO");
1070 ctc_pr_debug("ctc: %s(): leaving\n", __func__);
1075 * Shutdown a channel.
1077 * @param fi An instance of a channel statemachine.
1078 * @param event The event, just happened.
1079 * @param arg Generic pointer, casted from channel * upon call.
1082 ch_action_haltio(fsm_instance * fi, int event, void *arg)
1084 struct channel *ch = (struct channel *) arg;
1085 unsigned long saveflags;
1089 DBF_TEXT(trace, 3, __FUNCTION__);
1090 fsm_deltimer(&ch->timer);
1091 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1092 saveflags = 0; /* avoids comp warning with
1093 spin_unlock_irqrestore */
1094 if (event == CH_EVENT_STOP) // only for STOP not yet locked
1095 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1096 oldstate = fsm_getstate(fi);
1097 fsm_newstate(fi, CH_STATE_TERM);
1098 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1099 if (event == CH_EVENT_STOP)
1100 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1103 fsm_deltimer(&ch->timer);
1104 fsm_newstate(fi, oldstate);
1106 ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio");
1111 * A channel has successfully been halted.
1112 * Cleanup it's queue and notify interface statemachine.
1114 * @param fi An instance of a channel statemachine.
1115 * @param event The event, just happened.
1116 * @param arg Generic pointer, casted from channel * upon call.
1119 ch_action_stopped(fsm_instance * fi, int event, void *arg)
1121 struct channel *ch = (struct channel *) arg;
1122 struct net_device *dev = ch->netdev;
1124 DBF_TEXT(trace, 3, __FUNCTION__);
1125 fsm_deltimer(&ch->timer);
1126 fsm_newstate(fi, CH_STATE_STOPPED);
1127 if (ch->trans_skb != NULL) {
1128 clear_normalized_cda(&ch->ccw[1]);
1129 dev_kfree_skb(ch->trans_skb);
1130 ch->trans_skb = NULL;
1132 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1133 skb_queue_purge(&ch->io_queue);
1134 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1135 DEV_EVENT_RXDOWN, dev);
1137 ctc_purge_skb_queue(&ch->io_queue);
1138 spin_lock(&ch->collect_lock);
1139 ctc_purge_skb_queue(&ch->collect_queue);
1140 ch->collect_len = 0;
1141 spin_unlock(&ch->collect_lock);
1142 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1143 DEV_EVENT_TXDOWN, dev);
1148 * A stop command from device statemachine arrived and we are in
1149 * not operational mode. Set state to stopped.
1151 * @param fi An instance of a channel statemachine.
1152 * @param event The event, just happened.
1153 * @param arg Generic pointer, casted from channel * upon call.
1156 ch_action_stop(fsm_instance * fi, int event, void *arg)
1158 fsm_newstate(fi, CH_STATE_STOPPED);
1162 * A machine check for no path, not operational status or gone device has
1164 * Cleanup queue and notify interface statemachine.
1166 * @param fi An instance of a channel statemachine.
1167 * @param event The event, just happened.
1168 * @param arg Generic pointer, casted from channel * upon call.
1171 ch_action_fail(fsm_instance * fi, int event, void *arg)
1173 struct channel *ch = (struct channel *) arg;
1174 struct net_device *dev = ch->netdev;
1176 DBF_TEXT(trace, 3, __FUNCTION__);
1177 fsm_deltimer(&ch->timer);
1178 fsm_newstate(fi, CH_STATE_NOTOP);
1179 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1180 skb_queue_purge(&ch->io_queue);
1181 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1182 DEV_EVENT_RXDOWN, dev);
1184 ctc_purge_skb_queue(&ch->io_queue);
1185 spin_lock(&ch->collect_lock);
1186 ctc_purge_skb_queue(&ch->collect_queue);
1187 ch->collect_len = 0;
1188 spin_unlock(&ch->collect_lock);
1189 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1190 DEV_EVENT_TXDOWN, dev);
1195 * Handle error during setup of channel.
1197 * @param fi An instance of a channel statemachine.
1198 * @param event The event, just happened.
1199 * @param arg Generic pointer, casted from channel * upon call.
1202 ch_action_setuperr(fsm_instance * fi, int event, void *arg)
1204 struct channel *ch = (struct channel *) arg;
1205 struct net_device *dev = ch->netdev;
1207 DBF_TEXT(setup, 3, __FUNCTION__);
1209 * Special case: Got UC_RCRESET on setmode.
1210 * This means that remote side isn't setup. In this case
1211 * simply retry after some 10 secs...
1213 if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) &&
1214 ((event == CH_EVENT_UC_RCRESET) ||
1215 (event == CH_EVENT_UC_RSRESET))) {
1216 fsm_newstate(fi, CH_STATE_STARTRETRY);
1217 fsm_deltimer(&ch->timer);
1218 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1219 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1220 int rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1222 ccw_check_return_code(
1223 ch, rc, "HaltIO in ch_action_setuperr");
1228 ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n",
1229 dev->name, ch_event_names[event],
1230 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
1231 fsm_getstate_str(fi));
1232 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1233 fsm_newstate(fi, CH_STATE_RXERR);
1234 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1235 DEV_EVENT_RXDOWN, dev);
1237 fsm_newstate(fi, CH_STATE_TXERR);
1238 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1239 DEV_EVENT_TXDOWN, dev);
1244 * Restart a channel after an error.
1246 * @param fi An instance of a channel statemachine.
1247 * @param event The event, just happened.
1248 * @param arg Generic pointer, casted from channel * upon call.
1251 ch_action_restart(fsm_instance * fi, int event, void *arg)
1253 unsigned long saveflags;
1257 struct channel *ch = (struct channel *) arg;
1258 struct net_device *dev = ch->netdev;
1260 DBF_TEXT(trace, 3, __FUNCTION__);
1261 fsm_deltimer(&ch->timer);
1262 ctc_pr_debug("%s: %s channel restart\n", dev->name,
1263 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
1264 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
1265 oldstate = fsm_getstate(fi);
1266 fsm_newstate(fi, CH_STATE_STARTWAIT);
1267 saveflags = 0; /* avoids compiler warning with
1268 spin_unlock_irqrestore */
1269 if (event == CH_EVENT_TIMER) // only for timer not yet locked
1270 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1271 rc = ccw_device_halt(ch->cdev, (unsigned long) ch);
1272 if (event == CH_EVENT_TIMER)
1273 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
1276 fsm_deltimer(&ch->timer);
1277 fsm_newstate(fi, oldstate);
1279 ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart");
1284 * Handle error during RX initial handshake (exchange of
1285 * 0-length block header)
1287 * @param fi An instance of a channel statemachine.
1288 * @param event The event, just happened.
1289 * @param arg Generic pointer, casted from channel * upon call.
1292 ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
1294 struct channel *ch = (struct channel *) arg;
1295 struct net_device *dev = ch->netdev;
1297 DBF_TEXT(setup, 3, __FUNCTION__);
1298 if (event == CH_EVENT_TIMER) {
1299 fsm_deltimer(&ch->timer);
1300 ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
1301 if (ch->retry++ < 3)
1302 ch_action_restart(fi, event, arg);
1304 fsm_newstate(fi, CH_STATE_RXERR);
1305 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1306 DEV_EVENT_RXDOWN, dev);
1309 ctc_pr_warn("%s: Error during RX init handshake\n", dev->name);
1313 * Notify device statemachine if we gave up initialization
1316 * @param fi An instance of a channel statemachine.
1317 * @param event The event, just happened.
1318 * @param arg Generic pointer, casted from channel * upon call.
1321 ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
1323 struct channel *ch = (struct channel *) arg;
1324 struct net_device *dev = ch->netdev;
1326 DBF_TEXT(setup, 3, __FUNCTION__);
1327 fsm_newstate(fi, CH_STATE_RXERR);
1328 ctc_pr_warn("%s: RX initialization failed\n", dev->name);
1329 ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
1330 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1334 * Handle RX Unit check remote reset (remote disconnected)
1336 * @param fi An instance of a channel statemachine.
1337 * @param event The event, just happened.
1338 * @param arg Generic pointer, casted from channel * upon call.
1341 ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
1343 struct channel *ch = (struct channel *) arg;
1344 struct channel *ch2;
1345 struct net_device *dev = ch->netdev;
1347 DBF_TEXT(trace, 3, __FUNCTION__);
1348 fsm_deltimer(&ch->timer);
1349 ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
1353 * Notify device statemachine
1355 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev);
1356 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev);
1358 fsm_newstate(fi, CH_STATE_DTERM);
1359 ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE];
1360 fsm_newstate(ch2->fsm, CH_STATE_DTERM);
1362 ccw_device_halt(ch->cdev, (unsigned long) ch);
1363 ccw_device_halt(ch2->cdev, (unsigned long) ch2);
1367 * Handle error during TX channel initialization.
1369 * @param fi An instance of a channel statemachine.
1370 * @param event The event, just happened.
1371 * @param arg Generic pointer, casted from channel * upon call.
1374 ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
1376 struct channel *ch = (struct channel *) arg;
1377 struct net_device *dev = ch->netdev;
1379 DBF_TEXT(setup, 2, __FUNCTION__);
1380 if (event == CH_EVENT_TIMER) {
1381 fsm_deltimer(&ch->timer);
1382 ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
1383 if (ch->retry++ < 3)
1384 ch_action_restart(fi, event, arg);
1386 fsm_newstate(fi, CH_STATE_TXERR);
1387 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1388 DEV_EVENT_TXDOWN, dev);
1391 ctc_pr_warn("%s: Error during TX init handshake\n", dev->name);
1395 * Handle TX timeout by retrying operation.
1397 * @param fi An instance of a channel statemachine.
1398 * @param event The event, just happened.
1399 * @param arg Generic pointer, casted from channel * upon call.
1402 ch_action_txretry(fsm_instance * fi, int event, void *arg)
1404 struct channel *ch = (struct channel *) arg;
1405 struct net_device *dev = ch->netdev;
1406 unsigned long saveflags;
1408 DBF_TEXT(trace, 4, __FUNCTION__);
1409 fsm_deltimer(&ch->timer);
1410 if (ch->retry++ > 3) {
1411 ctc_pr_debug("%s: TX retry failed, restarting channel\n",
1413 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1414 DEV_EVENT_TXDOWN, dev);
1415 ch_action_restart(fi, event, arg);
1417 struct sk_buff *skb;
1419 ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry);
1420 if ((skb = skb_peek(&ch->io_queue))) {
1423 clear_normalized_cda(&ch->ccw[4]);
1424 ch->ccw[4].count = skb->len;
1425 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1427 "%s: IDAL alloc failed, chan restart\n",
1429 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1430 DEV_EVENT_TXDOWN, dev);
1431 ch_action_restart(fi, event, arg);
1434 fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch);
1435 saveflags = 0; /* avoids compiler warning with
1436 spin_unlock_irqrestore */
1437 if (event == CH_EVENT_TIMER) // only for TIMER not yet locked
1438 spin_lock_irqsave(get_ccwdev_lock(ch->cdev),
1440 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1441 (unsigned long) ch, 0xff, 0);
1442 if (event == CH_EVENT_TIMER)
1443 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1446 fsm_deltimer(&ch->timer);
1447 ccw_check_return_code(ch, rc, "TX in ch_action_txretry");
1448 ctc_purge_skb_queue(&ch->io_queue);
1456 * Handle fatal errors during an I/O command.
1458 * @param fi An instance of a channel statemachine.
1459 * @param event The event, just happened.
1460 * @param arg Generic pointer, casted from channel * upon call.
1463 ch_action_iofatal(fsm_instance * fi, int event, void *arg)
1465 struct channel *ch = (struct channel *) arg;
1466 struct net_device *dev = ch->netdev;
1468 DBF_TEXT(trace, 3, __FUNCTION__);
1469 fsm_deltimer(&ch->timer);
1470 if (CHANNEL_DIRECTION(ch->flags) == READ) {
1471 ctc_pr_debug("%s: RX I/O error\n", dev->name);
1472 fsm_newstate(fi, CH_STATE_RXERR);
1473 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1474 DEV_EVENT_RXDOWN, dev);
1476 ctc_pr_debug("%s: TX I/O error\n", dev->name);
1477 fsm_newstate(fi, CH_STATE_TXERR);
1478 fsm_event(((struct ctc_priv *) dev->priv)->fsm,
1479 DEV_EVENT_TXDOWN, dev);
1484 ch_action_reinit(fsm_instance *fi, int event, void *arg)
1486 struct channel *ch = (struct channel *)arg;
1487 struct net_device *dev = ch->netdev;
1488 struct ctc_priv *privptr = dev->priv;
1490 DBF_TEXT(trace, 4, __FUNCTION__);
1491 ch_action_iofatal(fi, event, arg);
1492 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1496 * The statemachine for a channel.
1498 static const fsm_node ch_fsm[] = {
1499 {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop },
1500 {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start },
1501 {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop },
1502 {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop },
1504 {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop },
1505 {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop },
1506 {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop },
1507 {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop },
1508 {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start },
1510 {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio },
1511 {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop },
1512 {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode },
1513 {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr },
1514 {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1515 {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1516 {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1518 {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio },
1519 {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode },
1520 {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop },
1521 {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail },
1523 {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio },
1524 {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop },
1525 {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio },
1526 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr },
1527 {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr },
1528 {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode },
1529 {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1530 {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit },
1531 {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail },
1533 {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio },
1534 {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop },
1535 {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle },
1536 {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr },
1537 {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr },
1538 {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr },
1539 {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail },
1540 {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1541 {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1542 {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio },
1543 {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1545 {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio },
1546 {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop },
1547 {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx },
1548 {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc },
1549 // {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry },
1550 {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1551 {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1552 {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1553 {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx },
1555 {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio },
1556 {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop },
1557 {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle },
1558 {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr },
1559 {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr },
1560 {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr },
1561 {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1562 {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit },
1563 {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail },
1565 {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio },
1566 {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop },
1567 {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio },
1568 {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop },
1569 {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop },
1570 {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1571 {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit },
1572 {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail },
1574 {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop },
1575 {CH_STATE_TERM, CH_EVENT_START, ch_action_restart },
1576 {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped },
1577 {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1578 {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1579 {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail },
1581 {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio },
1582 {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart },
1583 {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode },
1584 {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop },
1585 {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop },
1586 {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail },
1588 {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio },
1589 {CH_STATE_TX, CH_EVENT_START, fsm_action_nop },
1590 {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone },
1591 {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry },
1592 {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry },
1593 {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry },
1594 {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal },
1595 {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit },
1596 {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail },
1598 {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio },
1599 {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio },
1600 {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1601 {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail },
1604 static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1607 * Functions related to setup and device detection.
1608 *****************************************************************************/
1611 less_than(char *id1, char *id2)
1615 for (i = 0; i < 5; i++) {
1619 dev1 = simple_strtoul(id1, &id1, 16);
1620 dev2 = simple_strtoul(id2, &id2, 16);
1622 return (dev1 < dev2);
1626 * Add a new channel to the list of channels.
1627 * Keeps the channel list sorted.
1629 * @param cdev The ccw_device to be added.
1630 * @param type The type class of the new channel.
1632 * @return 0 on success, !0 on error.
1635 add_channel(struct ccw_device *cdev, enum channel_types type)
1637 struct channel **c = &channels;
1640 DBF_TEXT(trace, 2, __FUNCTION__);
1641 ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1643 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1646 /* assure all flags and counters are reset */
1647 ch->ccw = kzalloc(8 * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1650 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1656 * "static" ccws are used in the following way:
1658 * ccw[0..2] (Channel program for generic I/O):
1660 * 1: read or write (depending on direction) with fixed
1661 * buffer (idal allocated once when buffer is allocated)
1663 * ccw[3..5] (Channel program for direct write of packets)
1665 * 4: write (idal allocated on every write).
1667 * ccw[6..7] (Channel program for initial channel setup):
1668 * 6: set extended mode
1671 * ch->ccw[0..5] are initialized in ch_action_start because
1672 * the channel's direction is yet unknown here.
1674 ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
1675 ch->ccw[6].flags = CCW_FLAG_SLI;
1677 ch->ccw[7].cmd_code = CCW_CMD_NOOP;
1678 ch->ccw[7].flags = CCW_FLAG_SLI;
1681 snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id);
1683 ch->fsm = init_fsm(ch->id, ch_state_names,
1684 ch_event_names, NR_CH_STATES, NR_CH_EVENTS,
1685 ch_fsm, CH_FSM_LEN, GFP_KERNEL);
1686 if (ch->fsm == NULL) {
1687 ctc_pr_warn("ctc: Could not create FSM in add_channel\n");
1692 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1693 ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1695 ctc_pr_warn("ctc: Out of memory in add_channel\n");
1701 while (*c && less_than((*c)->id, ch->id))
1703 if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) {
1705 "ctc: add_channel: device %s already in list, "
1706 "using old entry\n", (*c)->id);
1714 spin_lock_init(&ch->collect_lock);
1716 fsm_settimer(ch->fsm, &ch->timer);
1717 skb_queue_head_init(&ch->io_queue);
1718 skb_queue_head_init(&ch->collect_queue);
1725 * Release a specific channel in the channel list.
1727 * @param ch Pointer to channel struct to be released.
1730 channel_free(struct channel *ch)
1732 ch->flags &= ~CHANNEL_FLAGS_INUSE;
1733 fsm_newstate(ch->fsm, CH_STATE_IDLE);
1737 * Remove a specific channel in the channel list.
1739 * @param ch Pointer to channel struct to be released.
1742 channel_remove(struct channel *ch)
1744 struct channel **c = &channels;
1746 DBF_TEXT(trace, 2, __FUNCTION__);
1754 fsm_deltimer(&ch->timer);
1756 clear_normalized_cda(&ch->ccw[4]);
1757 if (ch->trans_skb != NULL) {
1758 clear_normalized_cda(&ch->ccw[1]);
1759 dev_kfree_skb(ch->trans_skb);
1771 * Get a specific channel from the channel list.
1773 * @param type Type of channel we are interested in.
1774 * @param id Id of channel we are interested in.
1775 * @param direction Direction we want to use this channel for.
1777 * @return Pointer to a channel or NULL if no matching channel available.
1779 static struct channel
1781 channel_get(enum channel_types type, char *id, int direction)
1783 struct channel *ch = channels;
1785 DBF_TEXT(trace, 3, __FUNCTION__);
1787 ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
1788 __func__, id, type);
1791 while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) {
1793 ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n",
1794 __func__, ch, ch->id, ch->type);
1799 ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n",
1800 __func__, ch, ch->id, ch->type);
1803 ctc_pr_warn("ctc: %s(): channel with id %s "
1804 "and type %d not found in channel list\n",
1805 __func__, id, type);
1807 if (ch->flags & CHANNEL_FLAGS_INUSE)
1810 ch->flags |= CHANNEL_FLAGS_INUSE;
1811 ch->flags &= ~CHANNEL_FLAGS_RWMASK;
1812 ch->flags |= (direction == WRITE)
1813 ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
1814 fsm_newstate(ch->fsm, CH_STATE_STOPPED);
1821 * Return the channel type by name.
1823 * @param name Name of network interface.
1825 * @return Type class of channel to be used for that interface.
1827 static enum channel_types inline
1828 extract_channel_media(char *name)
1830 enum channel_types ret = channel_type_unknown;
1833 if (strncmp(name, "ctc", 3) == 0)
1834 ret = channel_type_parallel;
1835 if (strncmp(name, "escon", 5) == 0)
1836 ret = channel_type_escon;
1842 __ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb)
1847 switch (PTR_ERR(irb)) {
1849 ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id);
1850 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1851 // CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO);
1854 ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id);
1855 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1856 // CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
1859 ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb),
1861 // CTC_DBF_TEXT(trace, 2, "ckirberr");
1862 // CTC_DBF_TEXT(trace, 2, " rc???");
1864 return PTR_ERR(irb);
1870 * @param cdev The ccw_device the interrupt is for.
1871 * @param intparm interruption parameter.
1872 * @param irb interruption response block.
1875 ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1878 struct net_device *dev;
1879 struct ctc_priv *priv;
1881 DBF_TEXT(trace, 5, __FUNCTION__);
1882 if (__ctc_check_irb_error(cdev, irb))
1885 /* Check for unsolicited interrupts. */
1886 if (!cdev->dev.driver_data) {
1887 ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n",
1888 cdev->dev.bus_id, irb->scsw.cstat,
1893 priv = ((struct ccwgroup_device *)cdev->dev.driver_data)
1896 /* Try to extract channel from driver data. */
1897 if (priv->channel[READ]->cdev == cdev)
1898 ch = priv->channel[READ];
1899 else if (priv->channel[WRITE]->cdev == cdev)
1900 ch = priv->channel[WRITE];
1902 ctc_pr_err("ctc: Can't determine channel for interrupt, "
1903 "device %s\n", cdev->dev.bus_id);
1907 dev = (struct net_device *) (ch->netdev);
1909 ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n",
1910 cdev->dev.bus_id, ch);
1915 ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n",
1916 dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat);
1919 /* Copy interruption response block. */
1920 memcpy(ch->irb, irb, sizeof(struct irb));
1922 /* Check for good subchannel return code, otherwise error message */
1923 if (ch->irb->scsw.cstat) {
1924 fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch);
1925 ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n",
1926 dev->name, ch->id, ch->irb->scsw.cstat,
1927 ch->irb->scsw.dstat);
1931 /* Check the reason-code of a unit check */
1932 if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) {
1933 ccw_unit_check(ch, ch->irb->ecw[0]);
1936 if (ch->irb->scsw.dstat & DEV_STAT_BUSY) {
1937 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION)
1938 fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch);
1940 fsm_event(ch->fsm, CH_EVENT_BUSY, ch);
1943 if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) {
1944 fsm_event(ch->fsm, CH_EVENT_ATTN, ch);
1947 if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) ||
1948 (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) ||
1949 (ch->irb->scsw.stctl ==
1950 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1951 fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch);
1953 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1958 * Actions for interface - statemachine.
1959 *****************************************************************************/
1962 * Startup channels by sending CH_EVENT_START to each channel.
1964 * @param fi An instance of an interface statemachine.
1965 * @param event The event, just happened.
1966 * @param arg Generic pointer, casted from struct net_device * upon call.
1969 dev_action_start(fsm_instance * fi, int event, void *arg)
1971 struct net_device *dev = (struct net_device *) arg;
1972 struct ctc_priv *privptr = dev->priv;
1975 DBF_TEXT(setup, 3, __FUNCTION__);
1976 fsm_deltimer(&privptr->restart_timer);
1977 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
1978 for (direction = READ; direction <= WRITE; direction++) {
1979 struct channel *ch = privptr->channel[direction];
1980 fsm_event(ch->fsm, CH_EVENT_START, ch);
1985 * Shutdown channels by sending CH_EVENT_STOP to each channel.
1987 * @param fi An instance of an interface statemachine.
1988 * @param event The event, just happened.
1989 * @param arg Generic pointer, casted from struct net_device * upon call.
1992 dev_action_stop(fsm_instance * fi, int event, void *arg)
1994 struct net_device *dev = (struct net_device *) arg;
1995 struct ctc_priv *privptr = dev->priv;
1998 DBF_TEXT(trace, 3, __FUNCTION__);
1999 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2000 for (direction = READ; direction <= WRITE; direction++) {
2001 struct channel *ch = privptr->channel[direction];
2002 fsm_event(ch->fsm, CH_EVENT_STOP, ch);
2006 dev_action_restart(fsm_instance *fi, int event, void *arg)
2008 struct net_device *dev = (struct net_device *)arg;
2009 struct ctc_priv *privptr = dev->priv;
2011 DBF_TEXT(trace, 3, __FUNCTION__);
2012 ctc_pr_debug("%s: Restarting\n", dev->name);
2013 dev_action_stop(fi, event, arg);
2014 fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
2015 fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC,
2016 DEV_EVENT_START, dev);
2020 * Called from channel statemachine
2021 * when a channel is up and running.
2023 * @param fi An instance of an interface statemachine.
2024 * @param event The event, just happened.
2025 * @param arg Generic pointer, casted from struct net_device * upon call.
2028 dev_action_chup(fsm_instance * fi, int event, void *arg)
2030 struct net_device *dev = (struct net_device *) arg;
2032 DBF_TEXT(trace, 3, __FUNCTION__);
2033 switch (fsm_getstate(fi)) {
2034 case DEV_STATE_STARTWAIT_RXTX:
2035 if (event == DEV_EVENT_RXUP)
2036 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2038 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2040 case DEV_STATE_STARTWAIT_RX:
2041 if (event == DEV_EVENT_RXUP) {
2042 fsm_newstate(fi, DEV_STATE_RUNNING);
2043 ctc_pr_info("%s: connected with remote side\n",
2045 ctc_clear_busy(dev);
2048 case DEV_STATE_STARTWAIT_TX:
2049 if (event == DEV_EVENT_TXUP) {
2050 fsm_newstate(fi, DEV_STATE_RUNNING);
2051 ctc_pr_info("%s: connected with remote side\n",
2053 ctc_clear_busy(dev);
2056 case DEV_STATE_STOPWAIT_TX:
2057 if (event == DEV_EVENT_RXUP)
2058 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2060 case DEV_STATE_STOPWAIT_RX:
2061 if (event == DEV_EVENT_TXUP)
2062 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2068 * Called from channel statemachine
2069 * when a channel has been shutdown.
2071 * @param fi An instance of an interface statemachine.
2072 * @param event The event, just happened.
2073 * @param arg Generic pointer, casted from struct net_device * upon call.
2076 dev_action_chdown(fsm_instance * fi, int event, void *arg)
2079 DBF_TEXT(trace, 3, __FUNCTION__);
2080 switch (fsm_getstate(fi)) {
2081 case DEV_STATE_RUNNING:
2082 if (event == DEV_EVENT_TXDOWN)
2083 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2085 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2087 case DEV_STATE_STARTWAIT_RX:
2088 if (event == DEV_EVENT_TXDOWN)
2089 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2091 case DEV_STATE_STARTWAIT_TX:
2092 if (event == DEV_EVENT_RXDOWN)
2093 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2095 case DEV_STATE_STOPWAIT_RXTX:
2096 if (event == DEV_EVENT_TXDOWN)
2097 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2099 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2101 case DEV_STATE_STOPWAIT_RX:
2102 if (event == DEV_EVENT_RXDOWN)
2103 fsm_newstate(fi, DEV_STATE_STOPPED);
2105 case DEV_STATE_STOPWAIT_TX:
2106 if (event == DEV_EVENT_TXDOWN)
2107 fsm_newstate(fi, DEV_STATE_STOPPED);
2112 static const fsm_node dev_fsm[] = {
2113 {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start},
2115 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2116 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2117 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2118 {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2120 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2121 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2122 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2123 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2124 {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2126 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2127 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2128 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2129 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2130 {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2132 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2133 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2134 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2135 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2136 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2137 {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2139 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2140 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2141 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2142 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2143 {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2145 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2146 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2147 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2148 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2149 {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2151 {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2152 {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2153 {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2154 {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop },
2155 {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop },
2156 {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2159 static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node);
2162 * Transmit a packet.
2163 * This is a helper function for ctc_tx().
2165 * @param ch Channel to be used for sending.
2166 * @param skb Pointer to struct sk_buff of packet to send.
2167 * The linklevel header has already been set up
2170 * @return 0 on success, -ERRNO on failure. (Never fails.)
2173 transmit_skb(struct channel *ch, struct sk_buff *skb)
2175 unsigned long saveflags;
2176 struct ll_header header;
2179 DBF_TEXT(trace, 5, __FUNCTION__);
2180 /* we need to acquire the lock for testing the state
2181 * otherwise we can have an IRQ changing the state to
2182 * TXIDLE after the test but before acquiring the lock.
2184 spin_lock_irqsave(&ch->collect_lock, saveflags);
2185 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2186 int l = skb->len + LL_HEADER_LENGTH;
2188 if (ch->collect_len + l > ch->max_bufsize - 2) {
2189 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2192 atomic_inc(&skb->users);
2194 header.type = skb->protocol;
2196 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2198 skb_queue_tail(&ch->collect_queue, skb);
2199 ch->collect_len += l;
2201 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2205 struct sk_buff *nskb;
2207 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2209 * Protect skb against beeing free'd by upper
2212 atomic_inc(&skb->users);
2213 ch->prof.txlen += skb->len;
2214 header.length = skb->len + LL_HEADER_LENGTH;
2215 header.type = skb->protocol;
2217 memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
2219 block_len = skb->len + 2;
2220 *((__u16 *) skb_push(skb, 2)) = block_len;
2223 * IDAL support in CTC is broken, so we have to
2224 * care about skb's above 2G ourselves.
2226 hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31;
2228 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
2230 atomic_dec(&skb->users);
2231 skb_pull(skb, LL_HEADER_LENGTH + 2);
2232 ctc_clear_busy(ch->netdev);
2235 memcpy(skb_put(nskb, skb->len),
2236 skb->data, skb->len);
2237 atomic_inc(&nskb->users);
2238 atomic_dec(&skb->users);
2239 dev_kfree_skb_irq(skb);
2244 ch->ccw[4].count = block_len;
2245 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
2247 * idal allocation failed, try via copying to
2248 * trans_skb. trans_skb usually has a pre-allocated
2251 if (ctc_checkalloc_buffer(ch, 1)) {
2253 * Remove our header. It gets added
2254 * again on retransmit.
2256 atomic_dec(&skb->users);
2257 skb_pull(skb, LL_HEADER_LENGTH + 2);
2258 ctc_clear_busy(ch->netdev);
2262 ch->trans_skb->tail = ch->trans_skb->data;
2263 ch->trans_skb->len = 0;
2264 ch->ccw[1].count = skb->len;
2265 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
2267 atomic_dec(&skb->users);
2268 dev_kfree_skb_irq(skb);
2271 skb_queue_tail(&ch->io_queue, skb);
2275 fsm_newstate(ch->fsm, CH_STATE_TX);
2276 fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
2277 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
2278 ch->prof.send_stamp = xtime;
2279 rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
2280 (unsigned long) ch, 0xff, 0);
2281 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
2283 ch->prof.doios_single++;
2285 fsm_deltimer(&ch->timer);
2286 ccw_check_return_code(ch, rc, "single skb TX");
2288 skb_dequeue_tail(&ch->io_queue);
2290 * Remove our header. It gets added
2291 * again on retransmit.
2293 skb_pull(skb, LL_HEADER_LENGTH + 2);
2296 struct net_device *dev = ch->netdev;
2297 struct ctc_priv *privptr = dev->priv;
2298 privptr->stats.tx_packets++;
2299 privptr->stats.tx_bytes +=
2300 skb->len - LL_HEADER_LENGTH;
2305 ctc_clear_busy(ch->netdev);
2310 * Interface API for upper network layers
2311 *****************************************************************************/
2314 * Open an interface.
2315 * Called from generic network layer when ifconfig up is run.
2317 * @param dev Pointer to interface struct.
2319 * @return 0 on success, -ERRNO on failure. (Never fails.)
2322 ctc_open(struct net_device * dev)
2324 DBF_TEXT(trace, 5, __FUNCTION__);
2325 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
2330 * Close an interface.
2331 * Called from generic network layer when ifconfig down is run.
2333 * @param dev Pointer to interface struct.
2335 * @return 0 on success, -ERRNO on failure. (Never fails.)
2338 ctc_close(struct net_device * dev)
2340 DBF_TEXT(trace, 5, __FUNCTION__);
2341 fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
2346 * Start transmission of a packet.
2347 * Called from generic network device layer.
2349 * @param skb Pointer to buffer containing the packet.
2350 * @param dev Pointer to interface struct.
2352 * @return 0 if packet consumed, !0 if packet rejected.
2353 * Note: If we return !0, then the packet is free'd by
2354 * the generic network layer.
2357 ctc_tx(struct sk_buff *skb, struct net_device * dev)
2360 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2362 DBF_TEXT(trace, 5, __FUNCTION__);
2364 * Some sanity checks ...
2367 ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name);
2368 privptr->stats.tx_dropped++;
2371 if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
2372 ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n",
2373 dev->name, LL_HEADER_LENGTH + 2);
2375 privptr->stats.tx_dropped++;
2380 * If channels are not running, try to restart them
2381 * and throw away packet.
2383 if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
2384 fsm_event(privptr->fsm, DEV_EVENT_START, dev);
2386 privptr->stats.tx_dropped++;
2387 privptr->stats.tx_errors++;
2388 privptr->stats.tx_carrier_errors++;
2392 if (ctc_test_and_set_busy(dev))
2395 dev->trans_start = jiffies;
2396 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2402 * Sets MTU of an interface.
2404 * @param dev Pointer to interface struct.
2405 * @param new_mtu The new MTU to use for this interface.
2407 * @return 0 on success, -EINVAL if MTU is out of valid range.
2408 * (valid range is 576 .. 65527). If VM is on the
2409 * remote side, maximum MTU is 32760, however this is
2410 * <em>not</em> checked here.
2413 ctc_change_mtu(struct net_device * dev, int new_mtu)
2415 struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
2417 DBF_TEXT(trace, 3, __FUNCTION__);
2418 if ((new_mtu < 576) || (new_mtu > 65527) ||
2419 (new_mtu > (privptr->channel[READ]->max_bufsize -
2420 LL_HEADER_LENGTH - 2)))
2423 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2428 * Returns interface statistics of a device.
2430 * @param dev Pointer to interface struct.
2432 * @return Pointer to stats struct of this interface.
2434 static struct net_device_stats *
2435 ctc_stats(struct net_device * dev)
2437 return &((struct ctc_priv *) dev->priv)->stats;
2445 buffer_show(struct device *dev, struct device_attribute *attr, char *buf)
2447 struct ctc_priv *priv;
2449 priv = dev->driver_data;
2452 return sprintf(buf, "%d\n",
2457 buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2459 struct ctc_priv *priv;
2460 struct net_device *ndev;
2464 DBF_TEXT(trace, 3, __FUNCTION__);
2465 DBF_TEXT(trace, 3, buf);
2466 priv = dev->driver_data;
2468 DBF_TEXT(trace, 3, "bfnopriv");
2472 sscanf(buf, "%u", &bs1);
2473 if (bs1 > CTC_BUFSIZE_LIMIT)
2475 if (bs1 < (576 + LL_HEADER_LENGTH + 2))
2477 priv->buffer_size = bs1; // just to overwrite the default
2479 ndev = priv->channel[READ]->netdev;
2481 DBF_TEXT(trace, 3, "bfnondev");
2485 if ((ndev->flags & IFF_RUNNING) &&
2486 (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
2489 priv->channel[READ]->max_bufsize = bs1;
2490 priv->channel[WRITE]->max_bufsize = bs1;
2491 if (!(ndev->flags & IFF_RUNNING))
2492 ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
2493 priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2494 priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
2496 sprintf(buffer, "%d",priv->buffer_size);
2497 DBF_TEXT(trace, 3, buffer);
2501 DBF_TEXT(trace, 3, "buff_err");
2506 loglevel_show(struct device *dev, struct device_attribute *attr, char *buf)
2508 return sprintf(buf, "%d\n", loglevel);
2512 loglevel_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2516 DBF_TEXT(trace, 5, __FUNCTION__);
2517 sscanf(buf, "%i", &ll1);
2519 if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0))
2526 ctc_print_statistics(struct ctc_priv *priv)
2531 DBF_TEXT(trace, 4, __FUNCTION__);
2534 sbuf = kmalloc(2048, GFP_KERNEL);
2539 p += sprintf(p, " Device FSM state: %s\n",
2540 fsm_getstate_str(priv->fsm));
2541 p += sprintf(p, " RX channel FSM state: %s\n",
2542 fsm_getstate_str(priv->channel[READ]->fsm));
2543 p += sprintf(p, " TX channel FSM state: %s\n",
2544 fsm_getstate_str(priv->channel[WRITE]->fsm));
2545 p += sprintf(p, " Max. TX buffer used: %ld\n",
2546 priv->channel[WRITE]->prof.maxmulti);
2547 p += sprintf(p, " Max. chained SKBs: %ld\n",
2548 priv->channel[WRITE]->prof.maxcqueue);
2549 p += sprintf(p, " TX single write ops: %ld\n",
2550 priv->channel[WRITE]->prof.doios_single);
2551 p += sprintf(p, " TX multi write ops: %ld\n",
2552 priv->channel[WRITE]->prof.doios_multi);
2553 p += sprintf(p, " Netto bytes written: %ld\n",
2554 priv->channel[WRITE]->prof.txlen);
2555 p += sprintf(p, " Max. TX IO-time: %ld\n",
2556 priv->channel[WRITE]->prof.tx_time);
2558 ctc_pr_debug("Statistics for %s:\n%s",
2559 priv->channel[WRITE]->netdev->name, sbuf);
2565 stats_show(struct device *dev, struct device_attribute *attr, char *buf)
2567 struct ctc_priv *priv = dev->driver_data;
2570 ctc_print_statistics(priv);
2571 return sprintf(buf, "0\n");
2575 stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2577 struct ctc_priv *priv = dev->driver_data;
2580 /* Reset statistics */
2581 memset(&priv->channel[WRITE]->prof, 0,
2582 sizeof(priv->channel[WRITE]->prof));
2587 ctc_netdev_unregister(struct net_device * dev)
2589 struct ctc_priv *privptr;
2593 privptr = (struct ctc_priv *) dev->priv;
2594 unregister_netdev(dev);
2598 ctc_netdev_register(struct net_device * dev)
2600 return register_netdev(dev);
2604 ctc_free_netdevice(struct net_device * dev, int free_dev)
2606 struct ctc_priv *privptr;
2609 privptr = dev->priv;
2612 kfree_fsm(privptr->fsm);
2622 ctc_proto_show(struct device *dev, struct device_attribute *attr, char *buf)
2624 struct ctc_priv *priv;
2626 priv = dev->driver_data;
2630 return sprintf(buf, "%d\n", priv->protocol);
2634 ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
2636 struct ctc_priv *priv;
2639 DBF_TEXT(trace, 3, __FUNCTION__);
2640 pr_debug("%s() called\n", __FUNCTION__);
2642 priv = dev->driver_data;
2645 sscanf(buf, "%u", &value);
2646 if (!((value == CTC_PROTO_S390) ||
2647 (value == CTC_PROTO_LINUX) ||
2648 (value == CTC_PROTO_OS390)))
2650 priv->protocol = value;
2656 ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2658 struct ccwgroup_device *cgdev;
2660 cgdev = to_ccwgroupdev(dev);
2664 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]);
2667 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
2668 static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store);
2669 static DEVICE_ATTR(type, 0444, ctc_type_show, NULL);
2671 static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write);
2672 static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
2674 static struct attribute *ctc_attr[] = {
2675 &dev_attr_protocol.attr,
2676 &dev_attr_type.attr,
2677 &dev_attr_buffer.attr,
2681 static struct attribute_group ctc_attr_group = {
2686 ctc_add_attributes(struct device *dev)
2690 rc = device_create_file(dev, &dev_attr_loglevel);
2693 rc = device_create_file(dev, &dev_attr_stats);
2696 device_remove_file(dev, &dev_attr_loglevel);
2702 ctc_remove_attributes(struct device *dev)
2704 device_remove_file(dev, &dev_attr_stats);
2705 device_remove_file(dev, &dev_attr_loglevel);
2709 ctc_add_files(struct device *dev)
2711 pr_debug("%s() called\n", __FUNCTION__);
2713 return sysfs_create_group(&dev->kobj, &ctc_attr_group);
2717 ctc_remove_files(struct device *dev)
2719 pr_debug("%s() called\n", __FUNCTION__);
2721 sysfs_remove_group(&dev->kobj, &ctc_attr_group);
2725 * Add ctc specific attributes.
2726 * Add ctc private data.
2728 * @param cgdev pointer to ccwgroup_device just added
2730 * @returns 0 on success, !0 on failure.
2733 ctc_probe_device(struct ccwgroup_device *cgdev)
2735 struct ctc_priv *priv;
2739 pr_debug("%s() called\n", __FUNCTION__);
2740 DBF_TEXT(setup, 3, __FUNCTION__);
2742 if (!get_device(&cgdev->dev))
2745 priv = kzalloc(sizeof(struct ctc_priv), GFP_KERNEL);
2747 ctc_pr_err("%s: Out of memory\n", __func__);
2748 put_device(&cgdev->dev);
2752 rc = ctc_add_files(&cgdev->dev);
2755 put_device(&cgdev->dev);
2758 priv->buffer_size = CTC_BUFSIZE_DEFAULT;
2759 cgdev->cdev[0]->handler = ctc_irq_handler;
2760 cgdev->cdev[1]->handler = ctc_irq_handler;
2761 cgdev->dev.driver_data = priv;
2763 sprintf(buffer, "%p", priv);
2764 DBF_TEXT(data, 3, buffer);
2766 sprintf(buffer, "%u", (unsigned int)sizeof(struct ctc_priv));
2767 DBF_TEXT(data, 3, buffer);
2769 sprintf(buffer, "%p", &channels);
2770 DBF_TEXT(data, 3, buffer);
2772 sprintf(buffer, "%u", (unsigned int)sizeof(struct channel));
2773 DBF_TEXT(data, 3, buffer);
2779 * Initialize everything of the net device except the name and the
2782 static struct net_device *
2783 ctc_init_netdevice(struct net_device * dev, int alloc_device,
2784 struct ctc_priv *privptr)
2789 DBF_TEXT(setup, 3, __FUNCTION__);
2792 dev = kzalloc(sizeof(struct net_device), GFP_KERNEL);
2797 dev->priv = privptr;
2798 privptr->fsm = init_fsm("ctcdev", dev_state_names,
2799 dev_event_names, CTC_NR_DEV_STATES, CTC_NR_DEV_EVENTS,
2800 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2801 if (privptr->fsm == NULL) {
2806 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2807 fsm_settimer(privptr->fsm, &privptr->restart_timer);
2809 dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
2810 dev->hard_start_xmit = ctc_tx;
2811 dev->open = ctc_open;
2812 dev->stop = ctc_close;
2813 dev->get_stats = ctc_stats;
2814 dev->change_mtu = ctc_change_mtu;
2815 dev->hard_header_len = LL_HEADER_LENGTH + 2;
2817 dev->type = ARPHRD_SLIP;
2818 dev->tx_queue_len = 100;
2819 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2820 SET_MODULE_OWNER(dev);
2827 * Setup an interface.
2829 * @param cgdev Device to be setup.
2831 * @returns 0 on success, !0 on failure.
2834 ctc_new_device(struct ccwgroup_device *cgdev)
2836 char read_id[CTC_ID_SIZE];
2837 char write_id[CTC_ID_SIZE];
2839 enum channel_types type;
2840 struct ctc_priv *privptr;
2841 struct net_device *dev;
2845 pr_debug("%s() called\n", __FUNCTION__);
2846 DBF_TEXT(setup, 3, __FUNCTION__);
2848 privptr = cgdev->dev.driver_data;
2852 sprintf(buffer, "%d", privptr->buffer_size);
2853 DBF_TEXT(setup, 3, buffer);
2855 type = get_channel_type(&cgdev->cdev[0]->id);
2857 snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id);
2858 snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id);
2860 if (add_channel(cgdev->cdev[0], type))
2862 if (add_channel(cgdev->cdev[1], type))
2865 ret = ccw_device_set_online(cgdev->cdev[0]);
2868 "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret);
2871 ret = ccw_device_set_online(cgdev->cdev[1]);
2874 "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret);
2877 dev = ctc_init_netdevice(NULL, 1, privptr);
2880 ctc_pr_warn("ctc_init_netdevice failed\n");
2884 strlcpy(dev->name, "ctc%d", IFNAMSIZ);
2886 for (direction = READ; direction <= WRITE; direction++) {
2887 privptr->channel[direction] =
2888 channel_get(type, direction == READ ? read_id : write_id,
2890 if (privptr->channel[direction] == NULL) {
2891 if (direction == WRITE)
2892 channel_free(privptr->channel[READ]);
2894 ctc_free_netdevice(dev, 1);
2897 privptr->channel[direction]->netdev = dev;
2898 privptr->channel[direction]->protocol = privptr->protocol;
2899 privptr->channel[direction]->max_bufsize = privptr->buffer_size;
2902 SET_NETDEV_DEV(dev, &cgdev->dev);
2904 if (ctc_netdev_register(dev) != 0) {
2905 ctc_free_netdevice(dev, 1);
2909 if (ctc_add_attributes(&cgdev->dev)) {
2910 ctc_netdev_unregister(dev);
2912 ctc_free_netdevice(dev, 1);
2916 strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name));
2920 ctc_pr_info("%s: read: %s, write: %s, proto: %d\n",
2921 dev->name, privptr->channel[READ]->id,
2922 privptr->channel[WRITE]->id, privptr->protocol);
2926 ccw_device_set_offline(cgdev->cdev[1]);
2927 ccw_device_set_offline(cgdev->cdev[0]);
2933 * Shutdown an interface.
2935 * @param cgdev Device to be shut down.
2937 * @returns 0 on success, !0 on failure.
2940 ctc_shutdown_device(struct ccwgroup_device *cgdev)
2942 struct ctc_priv *priv;
2943 struct net_device *ndev;
2945 DBF_TEXT(setup, 3, __FUNCTION__);
2946 pr_debug("%s() called\n", __FUNCTION__);
2949 priv = cgdev->dev.driver_data;
2954 if (priv->channel[READ]) {
2955 ndev = priv->channel[READ]->netdev;
2957 /* Close the device */
2959 ndev->flags &=~IFF_RUNNING;
2961 ctc_remove_attributes(&cgdev->dev);
2963 channel_free(priv->channel[READ]);
2965 if (priv->channel[WRITE])
2966 channel_free(priv->channel[WRITE]);
2969 ctc_netdev_unregister(ndev);
2971 ctc_free_netdevice(ndev, 1);
2975 kfree_fsm(priv->fsm);
2977 ccw_device_set_offline(cgdev->cdev[1]);
2978 ccw_device_set_offline(cgdev->cdev[0]);
2980 if (priv->channel[READ])
2981 channel_remove(priv->channel[READ]);
2982 if (priv->channel[WRITE])
2983 channel_remove(priv->channel[WRITE]);
2984 priv->channel[READ] = priv->channel[WRITE] = NULL;
2991 ctc_remove_device(struct ccwgroup_device *cgdev)
2993 struct ctc_priv *priv;
2995 pr_debug("%s() called\n", __FUNCTION__);
2996 DBF_TEXT(setup, 3, __FUNCTION__);
2998 priv = cgdev->dev.driver_data;
3001 if (cgdev->state == CCWGROUP_ONLINE)
3002 ctc_shutdown_device(cgdev);
3003 ctc_remove_files(&cgdev->dev);
3004 cgdev->dev.driver_data = NULL;
3006 put_device(&cgdev->dev);
3009 static struct ccwgroup_driver ctc_group_driver = {
3010 .owner = THIS_MODULE,
3013 .driver_id = 0xC3E3C3,
3014 .probe = ctc_probe_device,
3015 .remove = ctc_remove_device,
3016 .set_online = ctc_new_device,
3017 .set_offline = ctc_shutdown_device,
3021 * Module related routines
3022 *****************************************************************************/
3025 * Prepare to be unloaded. Free IRQ's and release all resources.
3026 * This is called just before this module is unloaded. It is
3027 * <em>not</em> called, if the usage count is !0, so we don't need to check
3033 DBF_TEXT(setup, 3, __FUNCTION__);
3034 unregister_cu3088_discipline(&ctc_group_driver);
3035 ctc_unregister_dbf_views();
3036 ctc_pr_info("CTC driver unloaded\n");
3040 * Initialize module.
3041 * This is called just after the module is loaded.
3043 * @return 0 on success, !0 on error.
3050 loglevel = CTC_LOGLEVEL_DEFAULT;
3052 DBF_TEXT(setup, 3, __FUNCTION__);
3056 ret = ctc_register_dbf_views();
3058 ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
3061 ret = register_cu3088_discipline(&ctc_group_driver);
3063 ctc_unregister_dbf_views();
3068 module_init(ctc_init);
3069 module_exit(ctc_exit);
3071 /* --- This is the END my friend --- */