2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
12 #include <linux/config.h>
13 #include <linux/kmod.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/ctype.h>
17 #include <linux/major.h>
18 #include <linux/slab.h>
19 #include <linux/buffer_head.h>
20 #include <linux/hdreg.h>
22 #include <asm/ccwdev.h>
23 #include <asm/ebcdic.h>
24 #include <asm/idals.h>
25 #include <asm/todclk.h>
28 #define PRINTK_HEADER "dasd:"
32 * SECTION: Constant definitions to be used within this file
34 #define DASD_CHANQ_MAX_SIZE 4
37 * SECTION: exported variables of dasd.c
39 debug_info_t *dasd_debug_area;
40 struct dasd_discipline *dasd_diag_discipline_pointer;
42 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
43 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
44 " Copyright 2000 IBM Corporation");
45 MODULE_SUPPORTED_DEVICE("dasd");
46 MODULE_LICENSE("GPL");
49 * SECTION: prototypes for static functions of dasd.c
51 static int dasd_alloc_queue(struct dasd_device * device);
52 static void dasd_setup_queue(struct dasd_device * device);
53 static void dasd_free_queue(struct dasd_device * device);
54 static void dasd_flush_request_queue(struct dasd_device *);
55 static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
56 static void dasd_flush_ccw_queue(struct dasd_device *, int);
57 static void dasd_tasklet(struct dasd_device *);
58 static void do_kick_device(void *data);
61 * SECTION: Operations on the device structure.
63 static wait_queue_head_t dasd_init_waitq;
66 * Allocate memory for a new device structure.
69 dasd_alloc_device(void)
71 struct dasd_device *device;
73 device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC);
75 return ERR_PTR(-ENOMEM);
76 /* open_count = 0 means device online but not in use */
77 atomic_set(&device->open_count, -1);
79 /* Get two pages for normal block device operations. */
80 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
81 if (device->ccw_mem == NULL) {
83 return ERR_PTR(-ENOMEM);
85 /* Get one page for error recovery. */
86 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
87 if (device->erp_mem == NULL) {
88 free_pages((unsigned long) device->ccw_mem, 1);
90 return ERR_PTR(-ENOMEM);
93 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
94 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
95 spin_lock_init(&device->mem_lock);
96 spin_lock_init(&device->request_queue_lock);
97 atomic_set (&device->tasklet_scheduled, 0);
98 tasklet_init(&device->tasklet,
99 (void (*)(unsigned long)) dasd_tasklet,
100 (unsigned long) device);
101 INIT_LIST_HEAD(&device->ccw_queue);
102 init_timer(&device->timer);
103 INIT_WORK(&device->kick_work, do_kick_device, device);
104 device->state = DASD_STATE_NEW;
105 device->target = DASD_STATE_NEW;
111 * Free memory of a device structure.
114 dasd_free_device(struct dasd_device *device)
116 kfree(device->private);
117 free_page((unsigned long) device->erp_mem);
118 free_pages((unsigned long) device->ccw_mem, 1);
123 * Make a new device known to the system.
126 dasd_state_new_to_known(struct dasd_device *device)
131 * As long as the device is not in state DASD_STATE_NEW we want to
132 * keep the reference count > 0.
134 dasd_get_device(device);
136 rc = dasd_alloc_queue(device);
138 dasd_put_device(device);
142 device->state = DASD_STATE_KNOWN;
147 * Let the system forget about a device.
150 dasd_state_known_to_new(struct dasd_device * device)
152 /* Disable extended error reporting for this device. */
153 dasd_eer_disable(device);
154 /* Forget the discipline information. */
155 if (device->discipline)
156 module_put(device->discipline->owner);
157 device->discipline = NULL;
158 if (device->base_discipline)
159 module_put(device->base_discipline->owner);
160 device->base_discipline = NULL;
161 device->state = DASD_STATE_NEW;
163 dasd_free_queue(device);
165 /* Give up reference we took in dasd_state_new_to_known. */
166 dasd_put_device(device);
170 * Request the irq line for the device.
173 dasd_state_known_to_basic(struct dasd_device * device)
177 /* Allocate and register gendisk structure. */
178 rc = dasd_gendisk_alloc(device);
182 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
183 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
185 debug_register_view(device->debug_area, &debug_sprintf_view);
186 debug_set_level(device->debug_area, DBF_EMERG);
187 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
189 device->state = DASD_STATE_BASIC;
194 * Release the irq line for the device. Terminate any running i/o.
197 dasd_state_basic_to_known(struct dasd_device * device)
199 dasd_gendisk_free(device);
200 dasd_flush_ccw_queue(device, 1);
201 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
202 if (device->debug_area != NULL) {
203 debug_unregister(device->debug_area);
204 device->debug_area = NULL;
206 device->state = DASD_STATE_KNOWN;
210 * Do the initial analysis. The do_analysis function may return
211 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
212 * until the discipline decides to continue the startup sequence
213 * by calling the function dasd_change_state. The eckd disciplines
214 * uses this to start a ccw that detects the format. The completion
215 * interrupt for this detection ccw uses the kernel event daemon to
216 * trigger the call to dasd_change_state. All this is done in the
217 * discipline code, see dasd_eckd.c.
218 * After the analysis ccw is done (do_analysis returned 0) the block
220 * In case the analysis returns an error, the device setup is stopped
221 * (a fake disk was already added to allow formatting).
224 dasd_state_basic_to_ready(struct dasd_device * device)
229 if (device->discipline->do_analysis != NULL)
230 rc = device->discipline->do_analysis(device);
233 device->state = DASD_STATE_UNFMT;
236 /* make disk known with correct capacity */
237 dasd_setup_queue(device);
238 set_capacity(device->gdp, device->blocks << device->s2b_shift);
239 device->state = DASD_STATE_READY;
240 rc = dasd_scan_partitions(device);
242 device->state = DASD_STATE_BASIC;
247 * Remove device from block device layer. Destroy dirty buffers.
248 * Forget format information. Check if the target level is basic
249 * and if it is create fake disk for formatting.
252 dasd_state_ready_to_basic(struct dasd_device * device)
254 dasd_flush_ccw_queue(device, 0);
255 dasd_destroy_partitions(device);
256 dasd_flush_request_queue(device);
258 device->bp_block = 0;
259 device->s2b_shift = 0;
260 device->state = DASD_STATE_BASIC;
267 dasd_state_unfmt_to_basic(struct dasd_device * device)
269 device->state = DASD_STATE_BASIC;
273 * Make the device online and schedule the bottom half to start
274 * the requeueing of requests from the linux request queue to the
278 dasd_state_ready_to_online(struct dasd_device * device)
280 device->state = DASD_STATE_ONLINE;
281 dasd_schedule_bh(device);
286 * Stop the requeueing of requests again.
289 dasd_state_online_to_ready(struct dasd_device * device)
291 device->state = DASD_STATE_READY;
295 * Device startup state changes.
298 dasd_increase_state(struct dasd_device *device)
303 if (device->state == DASD_STATE_NEW &&
304 device->target >= DASD_STATE_KNOWN)
305 rc = dasd_state_new_to_known(device);
308 device->state == DASD_STATE_KNOWN &&
309 device->target >= DASD_STATE_BASIC)
310 rc = dasd_state_known_to_basic(device);
313 device->state == DASD_STATE_BASIC &&
314 device->target >= DASD_STATE_READY)
315 rc = dasd_state_basic_to_ready(device);
318 device->state == DASD_STATE_READY &&
319 device->target >= DASD_STATE_ONLINE)
320 rc = dasd_state_ready_to_online(device);
326 * Device shutdown state changes.
329 dasd_decrease_state(struct dasd_device *device)
331 if (device->state == DASD_STATE_ONLINE &&
332 device->target <= DASD_STATE_READY)
333 dasd_state_online_to_ready(device);
335 if (device->state == DASD_STATE_READY &&
336 device->target <= DASD_STATE_BASIC)
337 dasd_state_ready_to_basic(device);
339 if (device->state == DASD_STATE_UNFMT &&
340 device->target <= DASD_STATE_BASIC)
341 dasd_state_unfmt_to_basic(device);
343 if (device->state == DASD_STATE_BASIC &&
344 device->target <= DASD_STATE_KNOWN)
345 dasd_state_basic_to_known(device);
347 if (device->state == DASD_STATE_KNOWN &&
348 device->target <= DASD_STATE_NEW)
349 dasd_state_known_to_new(device);
355 * This is the main startup/shutdown routine.
358 dasd_change_state(struct dasd_device *device)
362 if (device->state == device->target)
363 /* Already where we want to go today... */
365 if (device->state < device->target)
366 rc = dasd_increase_state(device);
368 rc = dasd_decrease_state(device);
369 if (rc && rc != -EAGAIN)
370 device->target = device->state;
372 if (device->state == device->target)
373 wake_up(&dasd_init_waitq);
377 * Kick starter for devices that did not complete the startup/shutdown
378 * procedure or were sleeping because of a pending state.
379 * dasd_kick_device will schedule a call do do_kick_device to the kernel
383 do_kick_device(void *data)
385 struct dasd_device *device;
387 device = (struct dasd_device *) data;
388 dasd_change_state(device);
389 dasd_schedule_bh(device);
390 dasd_put_device(device);
394 dasd_kick_device(struct dasd_device *device)
396 dasd_get_device(device);
397 /* queue call to dasd_kick_device to the kernel event daemon. */
398 schedule_work(&device->kick_work);
402 * Set the target state for a device and starts the state change.
405 dasd_set_target_state(struct dasd_device *device, int target)
407 /* If we are in probeonly mode stop at DASD_STATE_READY. */
408 if (dasd_probeonly && target > DASD_STATE_READY)
409 target = DASD_STATE_READY;
410 if (device->target != target) {
411 if (device->state == target)
412 wake_up(&dasd_init_waitq);
413 device->target = target;
415 if (device->state != device->target)
416 dasd_change_state(device);
420 * Enable devices with device numbers in [from..to].
423 _wait_for_device(struct dasd_device *device)
425 return (device->state == device->target);
429 dasd_enable_device(struct dasd_device *device)
431 dasd_set_target_state(device, DASD_STATE_ONLINE);
432 if (device->state <= DASD_STATE_KNOWN)
433 /* No discipline for device found. */
434 dasd_set_target_state(device, DASD_STATE_NEW);
435 /* Now wait for the devices to come up. */
436 wait_event(dasd_init_waitq, _wait_for_device(device));
440 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
442 #ifdef CONFIG_DASD_PROFILE
444 struct dasd_profile_info_t dasd_global_profile;
445 unsigned int dasd_profile_level = DASD_PROFILE_OFF;
448 * Increments counter in global and local profiling structures.
450 #define dasd_profile_counter(value, counter, device) \
453 for (index = 0; index < 31 && value >> (2+index); index++); \
454 dasd_global_profile.counter[index]++; \
455 device->profile.counter[index]++; \
459 * Add profiling information for cqr before execution.
462 dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
466 unsigned int counter;
468 if (dasd_profile_level != DASD_PROFILE_ON)
471 /* count the length of the chanq for statistics */
473 list_for_each(l, &device->ccw_queue)
476 dasd_global_profile.dasd_io_nr_req[counter]++;
477 device->profile.dasd_io_nr_req[counter]++;
481 * Add profiling information for cqr after execution.
484 dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
487 long strtime, irqtime, endtime, tottime; /* in microseconds */
488 long tottimeps, sectors;
490 if (dasd_profile_level != DASD_PROFILE_ON)
493 sectors = req->nr_sectors;
494 if (!cqr->buildclk || !cqr->startclk ||
495 !cqr->stopclk || !cqr->endclk ||
499 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
500 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
501 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
502 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
503 tottimeps = tottime / sectors;
505 if (!dasd_global_profile.dasd_io_reqs)
506 memset(&dasd_global_profile, 0,
507 sizeof (struct dasd_profile_info_t));
508 dasd_global_profile.dasd_io_reqs++;
509 dasd_global_profile.dasd_io_sects += sectors;
511 if (!device->profile.dasd_io_reqs)
512 memset(&device->profile, 0,
513 sizeof (struct dasd_profile_info_t));
514 device->profile.dasd_io_reqs++;
515 device->profile.dasd_io_sects += sectors;
517 dasd_profile_counter(sectors, dasd_io_secs, device);
518 dasd_profile_counter(tottime, dasd_io_times, device);
519 dasd_profile_counter(tottimeps, dasd_io_timps, device);
520 dasd_profile_counter(strtime, dasd_io_time1, device);
521 dasd_profile_counter(irqtime, dasd_io_time2, device);
522 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
523 dasd_profile_counter(endtime, dasd_io_time3, device);
526 #define dasd_profile_start(device, cqr, req) do {} while (0)
527 #define dasd_profile_end(device, cqr, req) do {} while (0)
528 #endif /* CONFIG_DASD_PROFILE */
531 * Allocate memory for a channel program with 'cplength' channel
532 * command words and 'datasize' additional space. There are two
533 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
534 * memory and 2) dasd_smalloc_request uses the static ccw memory
535 * that gets allocated for each device.
537 struct dasd_ccw_req *
538 dasd_kmalloc_request(char *magic, int cplength, int datasize,
539 struct dasd_device * device)
541 struct dasd_ccw_req *cqr;
544 if ( magic == NULL || datasize > PAGE_SIZE ||
545 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
548 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
550 return ERR_PTR(-ENOMEM);
553 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
554 GFP_ATOMIC | GFP_DMA);
555 if (cqr->cpaddr == NULL) {
557 return ERR_PTR(-ENOMEM);
562 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
563 if (cqr->data == NULL) {
566 return ERR_PTR(-ENOMEM);
569 strncpy((char *) &cqr->magic, magic, 4);
570 ASCEBC((char *) &cqr->magic, 4);
571 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
572 dasd_get_device(device);
576 struct dasd_ccw_req *
577 dasd_smalloc_request(char *magic, int cplength, int datasize,
578 struct dasd_device * device)
581 struct dasd_ccw_req *cqr;
586 if ( magic == NULL || datasize > PAGE_SIZE ||
587 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
590 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
592 size += cplength * sizeof(struct ccw1);
595 spin_lock_irqsave(&device->mem_lock, flags);
596 cqr = (struct dasd_ccw_req *)
597 dasd_alloc_chunk(&device->ccw_chunks, size);
598 spin_unlock_irqrestore(&device->mem_lock, flags);
600 return ERR_PTR(-ENOMEM);
601 memset(cqr, 0, sizeof(struct dasd_ccw_req));
602 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
605 cqr->cpaddr = (struct ccw1 *) data;
606 data += cplength*sizeof(struct ccw1);
607 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
612 memset(cqr->data, 0, datasize);
614 strncpy((char *) &cqr->magic, magic, 4);
615 ASCEBC((char *) &cqr->magic, 4);
616 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
617 dasd_get_device(device);
622 * Free memory of a channel program. This function needs to free all the
623 * idal lists that might have been created by dasd_set_cda and the
624 * struct dasd_ccw_req itself.
627 dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
632 /* Clear any idals used for the request. */
635 clear_normalized_cda(ccw);
636 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
641 dasd_put_device(device);
645 dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
649 spin_lock_irqsave(&device->mem_lock, flags);
650 dasd_free_chunk(&device->ccw_chunks, cqr);
651 spin_unlock_irqrestore(&device->mem_lock, flags);
652 dasd_put_device(device);
656 * Check discipline magic in cqr.
659 dasd_check_cqr(struct dasd_ccw_req *cqr)
661 struct dasd_device *device;
665 device = cqr->device;
666 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
667 DEV_MESSAGE(KERN_WARNING, device,
668 " dasd_ccw_req 0x%08x magic doesn't match"
669 " discipline 0x%08x",
671 *(unsigned int *) device->discipline->name);
678 * Terminate the current i/o and set the request to clear_pending.
679 * Timer keeps device runnig.
680 * ccw_device_clear can fail if the i/o subsystem
684 dasd_term_IO(struct dasd_ccw_req * cqr)
686 struct dasd_device *device;
690 rc = dasd_check_cqr(cqr);
694 device = (struct dasd_device *) cqr->device;
695 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
696 rc = ccw_device_clear(device->cdev, (long) cqr);
698 case 0: /* termination successful */
700 cqr->status = DASD_CQR_CLEAR;
701 cqr->stopclk = get_clock();
702 DBF_DEV_EVENT(DBF_DEBUG, device,
703 "terminate cqr %p successful",
707 DBF_DEV_EVENT(DBF_ERR, device, "%s",
708 "device gone, retry");
711 DBF_DEV_EVENT(DBF_ERR, device, "%s",
716 DBF_DEV_EVENT(DBF_ERR, device, "%s",
717 "device busy, retry later");
720 DEV_MESSAGE(KERN_ERR, device,
721 "line %d unknown RC=%d, please "
722 "report to linux390@de.ibm.com",
729 dasd_schedule_bh(device);
734 * Start the i/o. This start_IO can fail if the channel is really busy.
735 * In that case set up a timer to start the request later.
738 dasd_start_IO(struct dasd_ccw_req * cqr)
740 struct dasd_device *device;
744 rc = dasd_check_cqr(cqr);
747 device = (struct dasd_device *) cqr->device;
748 if (cqr->retries < 0) {
749 DEV_MESSAGE(KERN_DEBUG, device,
750 "start_IO: request %p (%02x/%i) - no retry left.",
751 cqr, cqr->status, cqr->retries);
752 cqr->status = DASD_CQR_FAILED;
755 cqr->startclk = get_clock();
756 cqr->starttime = jiffies;
758 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
762 cqr->status = DASD_CQR_IN_IO;
763 DBF_DEV_EVENT(DBF_DEBUG, device,
764 "start_IO: request %p started successful",
768 DBF_DEV_EVENT(DBF_ERR, device, "%s",
769 "start_IO: device busy, retry later");
772 DBF_DEV_EVENT(DBF_ERR, device, "%s",
773 "start_IO: request timeout, retry later");
776 /* -EACCES indicates that the request used only a
777 * subset of the available pathes and all these
779 * Do a retry with all available pathes.
781 cqr->lpm = LPM_ANYPATH;
782 DBF_DEV_EVENT(DBF_ERR, device, "%s",
783 "start_IO: selected pathes gone,"
784 " retry on all pathes");
788 DBF_DEV_EVENT(DBF_ERR, device, "%s",
789 "start_IO: device gone, retry");
792 DEV_MESSAGE(KERN_ERR, device,
793 "line %d unknown RC=%d, please report"
794 " to linux390@de.ibm.com", __LINE__, rc);
802 * Timeout function for dasd devices. This is used for different purposes
803 * 1) missing interrupt handler for normal operation
804 * 2) delayed start of request where start_IO failed with -EBUSY
805 * 3) timeout for missing state change interrupts
806 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
807 * DASD_CQR_QUEUED for 2) and 3).
810 dasd_timeout_device(unsigned long ptr)
813 struct dasd_device *device;
815 device = (struct dasd_device *) ptr;
816 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
817 /* re-activate request queue */
818 device->stopped &= ~DASD_STOPPED_PENDING;
819 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
820 dasd_schedule_bh(device);
824 * Setup timeout for a device in jiffies.
827 dasd_set_timer(struct dasd_device *device, int expires)
830 if (timer_pending(&device->timer))
831 del_timer(&device->timer);
834 if (timer_pending(&device->timer)) {
835 if (mod_timer(&device->timer, jiffies + expires))
838 device->timer.function = dasd_timeout_device;
839 device->timer.data = (unsigned long) device;
840 device->timer.expires = jiffies + expires;
841 add_timer(&device->timer);
845 * Clear timeout for a device.
848 dasd_clear_timer(struct dasd_device *device)
850 if (timer_pending(&device->timer))
851 del_timer(&device->timer);
855 dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
857 struct dasd_ccw_req *cqr;
858 struct dasd_device *device;
860 cqr = (struct dasd_ccw_req *) intparm;
861 if (cqr->status != DASD_CQR_IN_IO) {
863 "invalid status in handle_killed_request: "
864 "bus_id %s, status %02x",
865 cdev->dev.bus_id, cqr->status);
869 device = (struct dasd_device *) cqr->device;
870 if (device == NULL ||
871 device != dasd_device_from_cdev(cdev) ||
872 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
873 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
878 /* Schedule request to be retried. */
879 cqr->status = DASD_CQR_QUEUED;
881 dasd_clear_timer(device);
882 dasd_schedule_bh(device);
883 dasd_put_device(device);
887 dasd_handle_state_change_pending(struct dasd_device *device)
889 struct dasd_ccw_req *cqr;
890 struct list_head *l, *n;
892 /* First of all start sense subsystem status request. */
893 dasd_eer_snss(device);
895 device->stopped &= ~DASD_STOPPED_PENDING;
897 /* restart all 'running' IO on queue */
898 list_for_each_safe(l, n, &device->ccw_queue) {
899 cqr = list_entry(l, struct dasd_ccw_req, list);
900 if (cqr->status == DASD_CQR_IN_IO) {
901 cqr->status = DASD_CQR_QUEUED;
904 dasd_clear_timer(device);
905 dasd_schedule_bh(device);
909 * Interrupt handler for "normal" ssch-io based dasd devices.
912 dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
915 struct dasd_ccw_req *cqr, *next;
916 struct dasd_device *device;
917 unsigned long long now;
923 switch (PTR_ERR(irb)) {
925 dasd_handle_killed_request(cdev, intparm);
928 printk(KERN_WARNING"%s(%s): request timed out\n",
929 __FUNCTION__, cdev->dev.bus_id);
930 //FIXME - dasd uses own timeout interface...
933 printk(KERN_WARNING"%s(%s): unknown error %ld\n",
934 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
941 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
942 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
943 (unsigned int) intparm);
945 /* first of all check for state change pending interrupt */
946 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
947 if ((irb->scsw.dstat & mask) == mask) {
948 device = dasd_device_from_cdev(cdev);
949 if (!IS_ERR(device)) {
950 dasd_handle_state_change_pending(device);
951 dasd_put_device(device);
956 cqr = (struct dasd_ccw_req *) intparm;
958 /* check for unsolicited interrupts */
961 "unsolicited interrupt received: bus_id %s",
966 device = (struct dasd_device *) cqr->device;
967 if (device == NULL ||
968 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
969 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
974 /* Check for clear pending */
975 if (cqr->status == DASD_CQR_CLEAR &&
976 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
977 cqr->status = DASD_CQR_QUEUED;
978 dasd_clear_timer(device);
979 dasd_schedule_bh(device);
983 /* check status - the request might have been killed by dyn detach */
984 if (cqr->status != DASD_CQR_IN_IO) {
986 "invalid status: bus_id %s, status %02x",
987 cdev->dev.bus_id, cqr->status);
990 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
991 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
993 /* Find out the appropriate era_action. */
994 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
995 era = dasd_era_fatal;
996 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
997 irb->scsw.cstat == 0 &&
998 !irb->esw.esw0.erw.cons)
1000 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
1001 era = dasd_era_fatal; /* don't recover this request */
1002 else if (irb->esw.esw0.erw.cons)
1003 era = device->discipline->examine_error(cqr, irb);
1005 era = dasd_era_recover;
1007 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
1009 if (era == dasd_era_none) {
1010 cqr->status = DASD_CQR_DONE;
1012 /* Start first request on queue if possible -> fast_io. */
1013 if (cqr->list.next != &device->ccw_queue) {
1014 next = list_entry(cqr->list.next,
1015 struct dasd_ccw_req, list);
1016 if ((next->status == DASD_CQR_QUEUED) &&
1017 (!device->stopped)) {
1018 if (device->discipline->start_IO(next) == 0)
1019 expires = next->expires;
1021 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1022 "Interrupt fastpath "
1026 } else { /* error */
1027 memcpy(&cqr->irb, irb, sizeof (struct irb));
1029 /* dump sense data */
1030 dasd_log_sense(cqr, irb);
1033 case dasd_era_fatal:
1034 cqr->status = DASD_CQR_FAILED;
1037 case dasd_era_recover:
1038 cqr->status = DASD_CQR_ERROR;
1045 dasd_set_timer(device, expires);
1047 dasd_clear_timer(device);
1048 dasd_schedule_bh(device);
1052 * posts the buffer_cache about a finalized request
1055 dasd_end_request(struct request *req, int uptodate)
1057 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1059 add_disk_randomness(req->rq_disk);
1060 end_that_request_last(req, uptodate);
1064 * Process finished error recovery ccw.
1067 __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1069 dasd_erp_fn_t erp_fn;
1071 if (cqr->status == DASD_CQR_DONE)
1072 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1074 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1075 erp_fn = device->discipline->erp_postaction(cqr);
1080 * Process ccw request queue.
1083 __dasd_process_ccw_queue(struct dasd_device * device,
1084 struct list_head *final_queue)
1086 struct list_head *l, *n;
1087 struct dasd_ccw_req *cqr;
1088 dasd_erp_fn_t erp_fn;
1091 /* Process request with final status. */
1092 list_for_each_safe(l, n, &device->ccw_queue) {
1093 cqr = list_entry(l, struct dasd_ccw_req, list);
1094 /* Stop list processing at the first non-final request. */
1095 if (cqr->status != DASD_CQR_DONE &&
1096 cqr->status != DASD_CQR_FAILED &&
1097 cqr->status != DASD_CQR_ERROR)
1099 /* Process requests with DASD_CQR_ERROR */
1100 if (cqr->status == DASD_CQR_ERROR) {
1101 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
1102 cqr->status = DASD_CQR_FAILED;
1103 cqr->stopclk = get_clock();
1105 if (cqr->irb.esw.esw0.erw.cons) {
1106 erp_fn = device->discipline->
1110 dasd_default_erp_action(cqr);
1115 /* First of all call extended error reporting. */
1116 if (dasd_eer_enabled(device) &&
1117 cqr->status == DASD_CQR_FAILED) {
1118 dasd_eer_write(device, cqr, DASD_EER_FATALERROR);
1120 /* restart request */
1121 cqr->status = DASD_CQR_QUEUED;
1123 device->stopped |= DASD_STOPPED_QUIESCE;
1127 /* Process finished ERP request. */
1129 __dasd_process_erp(device, cqr);
1133 /* Rechain finished requests to final queue */
1134 cqr->endclk = get_clock();
1135 list_move_tail(&cqr->list, final_queue);
1140 dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1142 struct request *req;
1143 struct dasd_device *device;
1146 req = (struct request *) data;
1147 device = cqr->device;
1148 dasd_profile_end(device, cqr, req);
1149 status = cqr->device->discipline->free_cp(cqr,req);
1150 spin_lock_irq(&device->request_queue_lock);
1151 dasd_end_request(req, status);
1152 spin_unlock_irq(&device->request_queue_lock);
1157 * Fetch requests from the block device queue.
1160 __dasd_process_blk_queue(struct dasd_device * device)
1162 request_queue_t *queue;
1163 struct request *req;
1164 struct dasd_ccw_req *cqr;
1167 queue = device->request_queue;
1168 /* No queue ? Then there is nothing to do. */
1173 * We requeue request from the block device queue to the ccw
1174 * queue only in two states. In state DASD_STATE_READY the
1175 * partition detection is done and we need to requeue requests
1176 * for that. State DASD_STATE_ONLINE is normal block device
1179 if (device->state != DASD_STATE_READY &&
1180 device->state != DASD_STATE_ONLINE)
1183 /* Now we try to fetch requests from the request queue */
1184 list_for_each_entry(cqr, &device->ccw_queue, list)
1185 if (cqr->status == DASD_CQR_QUEUED)
1187 while (!blk_queue_plugged(queue) &&
1188 elv_next_request(queue) &&
1189 nr_queued < DASD_CHANQ_MAX_SIZE) {
1190 req = elv_next_request(queue);
1192 if (device->features & DASD_FEATURE_READONLY &&
1193 rq_data_dir(req) == WRITE) {
1194 DBF_DEV_EVENT(DBF_ERR, device,
1195 "Rejecting write request %p",
1197 blkdev_dequeue_request(req);
1198 dasd_end_request(req, 0);
1201 if (device->stopped & DASD_STOPPED_DC_EIO) {
1202 blkdev_dequeue_request(req);
1203 dasd_end_request(req, 0);
1206 cqr = device->discipline->build_cp(device, req);
1208 if (PTR_ERR(cqr) == -ENOMEM)
1209 break; /* terminate request queue loop */
1210 DBF_DEV_EVENT(DBF_ERR, device,
1211 "CCW creation failed (rc=%ld) "
1214 blkdev_dequeue_request(req);
1215 dasd_end_request(req, 0);
1218 cqr->callback = dasd_end_request_cb;
1219 cqr->callback_data = (void *) req;
1220 cqr->status = DASD_CQR_QUEUED;
1221 blkdev_dequeue_request(req);
1222 list_add_tail(&cqr->list, &device->ccw_queue);
1223 dasd_profile_start(device, cqr, req);
1229 * Take a look at the first request on the ccw queue and check
1230 * if it reached its expire time. If so, terminate the IO.
1233 __dasd_check_expire(struct dasd_device * device)
1235 struct dasd_ccw_req *cqr;
1237 if (list_empty(&device->ccw_queue))
1239 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1240 if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
1241 if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
1242 if (device->discipline->term_IO(cqr) != 0)
1243 /* Hmpf, try again in 1/10 sec */
1244 dasd_set_timer(device, 10);
1250 * Take a look at the first request on the ccw queue and check
1251 * if it needs to be started.
1254 __dasd_start_head(struct dasd_device * device)
1256 struct dasd_ccw_req *cqr;
1259 if (list_empty(&device->ccw_queue))
1261 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1262 /* check FAILFAST */
1263 if (device->stopped & ~DASD_STOPPED_PENDING &&
1264 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1265 (!dasd_eer_enabled(device))) {
1266 cqr->status = DASD_CQR_FAILED;
1267 dasd_schedule_bh(device);
1269 if ((cqr->status == DASD_CQR_QUEUED) &&
1270 (!device->stopped)) {
1271 /* try to start the first I/O that can be started */
1272 rc = device->discipline->start_IO(cqr);
1274 dasd_set_timer(device, cqr->expires);
1275 else if (rc == -EACCES) {
1276 dasd_schedule_bh(device);
1278 /* Hmpf, try again in 1/2 sec */
1279 dasd_set_timer(device, 50);
1284 * Remove requests from the ccw queue.
1287 dasd_flush_ccw_queue(struct dasd_device * device, int all)
1289 struct list_head flush_queue;
1290 struct list_head *l, *n;
1291 struct dasd_ccw_req *cqr;
1293 INIT_LIST_HEAD(&flush_queue);
1294 spin_lock_irq(get_ccwdev_lock(device->cdev));
1295 list_for_each_safe(l, n, &device->ccw_queue) {
1296 cqr = list_entry(l, struct dasd_ccw_req, list);
1297 /* Flush all request or only block device requests? */
1298 if (all == 0 && cqr->callback == dasd_end_request_cb)
1300 if (cqr->status == DASD_CQR_IN_IO)
1301 device->discipline->term_IO(cqr);
1302 if (cqr->status != DASD_CQR_DONE ||
1303 cqr->status != DASD_CQR_FAILED) {
1304 cqr->status = DASD_CQR_FAILED;
1305 cqr->stopclk = get_clock();
1307 /* Process finished ERP request. */
1309 __dasd_process_erp(device, cqr);
1312 /* Rechain request on device request queue */
1313 cqr->endclk = get_clock();
1314 list_move_tail(&cqr->list, &flush_queue);
1316 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1317 /* Now call the callback function of flushed requests */
1318 list_for_each_safe(l, n, &flush_queue) {
1319 cqr = list_entry(l, struct dasd_ccw_req, list);
1320 if (cqr->callback != NULL)
1321 (cqr->callback)(cqr, cqr->callback_data);
1326 * Acquire the device lock and process queues for the device.
1329 dasd_tasklet(struct dasd_device * device)
1331 struct list_head final_queue;
1332 struct list_head *l, *n;
1333 struct dasd_ccw_req *cqr;
1335 atomic_set (&device->tasklet_scheduled, 0);
1336 INIT_LIST_HEAD(&final_queue);
1337 spin_lock_irq(get_ccwdev_lock(device->cdev));
1338 /* Check expire time of first request on the ccw queue. */
1339 __dasd_check_expire(device);
1340 /* Finish off requests on ccw queue */
1341 __dasd_process_ccw_queue(device, &final_queue);
1342 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1343 /* Now call the callback function of requests with final status */
1344 list_for_each_safe(l, n, &final_queue) {
1345 cqr = list_entry(l, struct dasd_ccw_req, list);
1346 list_del_init(&cqr->list);
1347 if (cqr->callback != NULL)
1348 (cqr->callback)(cqr, cqr->callback_data);
1350 spin_lock_irq(&device->request_queue_lock);
1351 spin_lock(get_ccwdev_lock(device->cdev));
1352 /* Get new request from the block device request queue */
1353 __dasd_process_blk_queue(device);
1354 /* Now check if the head of the ccw queue needs to be started. */
1355 __dasd_start_head(device);
1356 spin_unlock(get_ccwdev_lock(device->cdev));
1357 spin_unlock_irq(&device->request_queue_lock);
1358 dasd_put_device(device);
1362 * Schedules a call to dasd_tasklet over the device tasklet.
1365 dasd_schedule_bh(struct dasd_device * device)
1367 /* Protect against rescheduling. */
1368 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1370 dasd_get_device(device);
1371 tasklet_hi_schedule(&device->tasklet);
1375 * Queue a request to the head of the ccw_queue. Start the I/O if
1379 dasd_add_request_head(struct dasd_ccw_req *req)
1381 struct dasd_device *device;
1382 unsigned long flags;
1384 device = req->device;
1385 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1386 req->status = DASD_CQR_QUEUED;
1387 req->device = device;
1388 list_add(&req->list, &device->ccw_queue);
1389 /* let the bh start the request to keep them in order */
1390 dasd_schedule_bh(device);
1391 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1395 * Queue a request to the tail of the ccw_queue. Start the I/O if
1399 dasd_add_request_tail(struct dasd_ccw_req *req)
1401 struct dasd_device *device;
1402 unsigned long flags;
1404 device = req->device;
1405 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1406 req->status = DASD_CQR_QUEUED;
1407 req->device = device;
1408 list_add_tail(&req->list, &device->ccw_queue);
1409 /* let the bh start the request to keep them in order */
1410 dasd_schedule_bh(device);
1411 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1418 dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1420 wake_up((wait_queue_head_t *) data);
1424 _wait_for_wakeup(struct dasd_ccw_req *cqr)
1426 struct dasd_device *device;
1429 device = cqr->device;
1430 spin_lock_irq(get_ccwdev_lock(device->cdev));
1431 rc = ((cqr->status == DASD_CQR_DONE ||
1432 cqr->status == DASD_CQR_FAILED) &&
1433 list_empty(&cqr->list));
1434 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1439 * Attempts to start a special ccw queue and waits for its completion.
1442 dasd_sleep_on(struct dasd_ccw_req * cqr)
1444 wait_queue_head_t wait_q;
1445 struct dasd_device *device;
1448 device = cqr->device;
1449 spin_lock_irq(get_ccwdev_lock(device->cdev));
1451 init_waitqueue_head (&wait_q);
1452 cqr->callback = dasd_wakeup_cb;
1453 cqr->callback_data = (void *) &wait_q;
1454 cqr->status = DASD_CQR_QUEUED;
1455 list_add_tail(&cqr->list, &device->ccw_queue);
1457 /* let the bh start the request to keep them in order */
1458 dasd_schedule_bh(device);
1460 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1462 wait_event(wait_q, _wait_for_wakeup(cqr));
1464 /* Request status is either done or failed. */
1465 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1470 * Attempts to start a special ccw queue and wait interruptible
1471 * for its completion.
1474 dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1476 wait_queue_head_t wait_q;
1477 struct dasd_device *device;
1480 device = cqr->device;
1481 spin_lock_irq(get_ccwdev_lock(device->cdev));
1483 init_waitqueue_head (&wait_q);
1484 cqr->callback = dasd_wakeup_cb;
1485 cqr->callback_data = (void *) &wait_q;
1486 cqr->status = DASD_CQR_QUEUED;
1487 list_add_tail(&cqr->list, &device->ccw_queue);
1489 /* let the bh start the request to keep them in order */
1490 dasd_schedule_bh(device);
1491 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1495 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1496 if (rc != -ERESTARTSYS) {
1497 /* Request is final (done or failed) */
1498 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1501 spin_lock_irq(get_ccwdev_lock(device->cdev));
1502 switch (cqr->status) {
1503 case DASD_CQR_IN_IO:
1504 /* terminate runnig cqr */
1505 if (device->discipline->term_IO) {
1507 device->discipline->term_IO(cqr);
1509 * wait (non-interruptible) for final status
1510 * because signal ist still pending
1512 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1513 wait_event(wait_q, _wait_for_wakeup(cqr));
1514 spin_lock_irq(get_ccwdev_lock(device->cdev));
1515 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1519 case DASD_CQR_QUEUED:
1521 list_del_init(&cqr->list);
1526 /* cqr with 'non-interruptable' status - just wait */
1529 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1535 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1536 * for eckd devices) the currently running request has to be terminated
1537 * and be put back to status queued, before the special request is added
1538 * to the head of the queue. Then the special request is waited on normally.
1541 _dasd_term_running_cqr(struct dasd_device *device)
1543 struct dasd_ccw_req *cqr;
1546 if (list_empty(&device->ccw_queue))
1548 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1549 rc = device->discipline->term_IO(cqr);
1551 /* termination successful */
1552 cqr->status = DASD_CQR_QUEUED;
1553 cqr->startclk = cqr->stopclk = 0;
1560 dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1562 wait_queue_head_t wait_q;
1563 struct dasd_device *device;
1566 device = cqr->device;
1567 spin_lock_irq(get_ccwdev_lock(device->cdev));
1568 rc = _dasd_term_running_cqr(device);
1570 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1574 init_waitqueue_head (&wait_q);
1575 cqr->callback = dasd_wakeup_cb;
1576 cqr->callback_data = (void *) &wait_q;
1577 cqr->status = DASD_CQR_QUEUED;
1578 list_add(&cqr->list, &device->ccw_queue);
1580 /* let the bh start the request to keep them in order */
1581 dasd_schedule_bh(device);
1583 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1585 wait_event(wait_q, _wait_for_wakeup(cqr));
1587 /* Request status is either done or failed. */
1588 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1593 * Cancels a request that was started with dasd_sleep_on_req.
1594 * This is useful to timeout requests. The request will be
1595 * terminated if it is currently in i/o.
1596 * Returns 1 if the request has been terminated.
1599 dasd_cancel_req(struct dasd_ccw_req *cqr)
1601 struct dasd_device *device = cqr->device;
1602 unsigned long flags;
1606 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1607 switch (cqr->status) {
1608 case DASD_CQR_QUEUED:
1609 /* request was not started - just set to failed */
1610 cqr->status = DASD_CQR_FAILED;
1612 case DASD_CQR_IN_IO:
1613 /* request in IO - terminate IO and release again */
1614 if (device->discipline->term_IO(cqr) != 0)
1615 /* what to do if unable to terminate ??????
1617 cqr->status = DASD_CQR_FAILED;
1618 cqr->stopclk = get_clock();
1622 case DASD_CQR_FAILED:
1623 /* already finished - do nothing */
1626 DEV_MESSAGE(KERN_ALERT, device,
1627 "invalid status %02x in request",
1632 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1633 dasd_schedule_bh(device);
1638 * SECTION: Block device operations (request queue, partitions, open, release).
1642 * Dasd request queue function. Called from ll_rw_blk.c
1645 do_dasd_request(request_queue_t * queue)
1647 struct dasd_device *device;
1649 device = (struct dasd_device *) queue->queuedata;
1650 spin_lock(get_ccwdev_lock(device->cdev));
1651 /* Get new request from the block device request queue */
1652 __dasd_process_blk_queue(device);
1653 /* Now check if the head of the ccw queue needs to be started. */
1654 __dasd_start_head(device);
1655 spin_unlock(get_ccwdev_lock(device->cdev));
1659 * Allocate and initialize request queue and default I/O scheduler.
1662 dasd_alloc_queue(struct dasd_device * device)
1666 device->request_queue = blk_init_queue(do_dasd_request,
1667 &device->request_queue_lock);
1668 if (device->request_queue == NULL)
1671 device->request_queue->queuedata = device;
1673 elevator_exit(device->request_queue->elevator);
1674 rc = elevator_init(device->request_queue, "deadline");
1676 blk_cleanup_queue(device->request_queue);
1683 * Allocate and initialize request queue.
1686 dasd_setup_queue(struct dasd_device * device)
1690 blk_queue_hardsect_size(device->request_queue, device->bp_block);
1691 max = device->discipline->max_blocks << device->s2b_shift;
1692 blk_queue_max_sectors(device->request_queue, max);
1693 blk_queue_max_phys_segments(device->request_queue, -1L);
1694 blk_queue_max_hw_segments(device->request_queue, -1L);
1695 blk_queue_max_segment_size(device->request_queue, -1L);
1696 blk_queue_segment_boundary(device->request_queue, -1L);
1697 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL);
1701 * Deactivate and free request queue.
1704 dasd_free_queue(struct dasd_device * device)
1706 if (device->request_queue) {
1707 blk_cleanup_queue(device->request_queue);
1708 device->request_queue = NULL;
1713 * Flush request on the request queue.
1716 dasd_flush_request_queue(struct dasd_device * device)
1718 struct request *req;
1720 if (!device->request_queue)
1723 spin_lock_irq(&device->request_queue_lock);
1724 while (!list_empty(&device->request_queue->queue_head)) {
1725 req = elv_next_request(device->request_queue);
1728 dasd_end_request(req, 0);
1729 blkdev_dequeue_request(req);
1731 spin_unlock_irq(&device->request_queue_lock);
1735 dasd_open(struct inode *inp, struct file *filp)
1737 struct gendisk *disk = inp->i_bdev->bd_disk;
1738 struct dasd_device *device = disk->private_data;
1741 atomic_inc(&device->open_count);
1742 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1747 if (!try_module_get(device->discipline->owner)) {
1752 if (dasd_probeonly) {
1753 DEV_MESSAGE(KERN_INFO, device, "%s",
1754 "No access to device due to probeonly mode");
1759 if (device->state <= DASD_STATE_BASIC) {
1760 DBF_DEV_EVENT(DBF_ERR, device, " %s",
1761 " Cannot open unrecognized device");
1769 module_put(device->discipline->owner);
1771 atomic_dec(&device->open_count);
1776 dasd_release(struct inode *inp, struct file *filp)
1778 struct gendisk *disk = inp->i_bdev->bd_disk;
1779 struct dasd_device *device = disk->private_data;
1781 atomic_dec(&device->open_count);
1782 module_put(device->discipline->owner);
1787 * Return disk geometry.
1790 dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1792 struct dasd_device *device;
1794 device = bdev->bd_disk->private_data;
1798 if (!device->discipline ||
1799 !device->discipline->fill_geometry)
1802 device->discipline->fill_geometry(device, geo);
1803 geo->start = get_start_sect(bdev) >> device->s2b_shift;
1807 struct block_device_operations
1808 dasd_device_operations = {
1809 .owner = THIS_MODULE,
1811 .release = dasd_release,
1812 .ioctl = dasd_ioctl,
1813 .compat_ioctl = dasd_compat_ioctl,
1814 .getgeo = dasd_getgeo,
1821 #ifdef CONFIG_PROC_FS
1825 if (dasd_page_cache != NULL) {
1826 kmem_cache_destroy(dasd_page_cache);
1827 dasd_page_cache = NULL;
1829 dasd_gendisk_exit();
1831 devfs_remove("dasd");
1832 if (dasd_debug_area != NULL) {
1833 debug_unregister(dasd_debug_area);
1834 dasd_debug_area = NULL;
1839 * SECTION: common functions for ccw_driver use
1843 * Initial attempt at a probe function. this can be simplified once
1844 * the other detection code is gone.
1847 dasd_generic_probe (struct ccw_device *cdev,
1848 struct dasd_discipline *discipline)
1852 ret = dasd_add_sysfs_files(cdev);
1855 "dasd_generic_probe: could not add sysfs entries "
1856 "for %s\n", cdev->dev.bus_id);
1858 cdev->handler = &dasd_int_handler;
1865 * This will one day be called from a global not_oper handler.
1866 * It is also used by driver_unregister during module unload.
1869 dasd_generic_remove (struct ccw_device *cdev)
1871 struct dasd_device *device;
1873 cdev->handler = NULL;
1875 dasd_remove_sysfs_files(cdev);
1876 device = dasd_device_from_cdev(cdev);
1879 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1880 /* Already doing offline processing */
1881 dasd_put_device(device);
1885 * This device is removed unconditionally. Set offline
1886 * flag to prevent dasd_open from opening it while it is
1887 * no quite down yet.
1889 dasd_set_target_state(device, DASD_STATE_NEW);
1890 /* dasd_delete_device destroys the device reference. */
1891 dasd_delete_device(device);
1895 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
1896 * the device is detected for the first time and is supposed to be used
1897 * or the user has started activation through sysfs.
1900 dasd_generic_set_online (struct ccw_device *cdev,
1901 struct dasd_discipline *base_discipline)
1904 struct dasd_discipline *discipline;
1905 struct dasd_device *device;
1908 device = dasd_create_device(cdev);
1910 return PTR_ERR(device);
1912 discipline = base_discipline;
1913 if (device->features & DASD_FEATURE_USEDIAG) {
1914 if (!dasd_diag_discipline_pointer) {
1915 printk (KERN_WARNING
1916 "dasd_generic couldn't online device %s "
1917 "- discipline DIAG not available\n",
1919 dasd_delete_device(device);
1922 discipline = dasd_diag_discipline_pointer;
1924 if (!try_module_get(base_discipline->owner)) {
1925 dasd_delete_device(device);
1928 if (!try_module_get(discipline->owner)) {
1929 module_put(base_discipline->owner);
1930 dasd_delete_device(device);
1933 device->base_discipline = base_discipline;
1934 device->discipline = discipline;
1936 rc = discipline->check_device(device);
1938 printk (KERN_WARNING
1939 "dasd_generic couldn't online device %s "
1940 "with discipline %s rc=%i\n",
1941 cdev->dev.bus_id, discipline->name, rc);
1942 module_put(discipline->owner);
1943 module_put(base_discipline->owner);
1944 dasd_delete_device(device);
1948 dasd_set_target_state(device, DASD_STATE_ONLINE);
1949 if (device->state <= DASD_STATE_KNOWN) {
1950 printk (KERN_WARNING
1951 "dasd_generic discipline not found for %s\n",
1954 dasd_set_target_state(device, DASD_STATE_NEW);
1955 dasd_delete_device(device);
1957 pr_debug("dasd_generic device %s found\n",
1960 /* FIXME: we have to wait for the root device but we don't want
1961 * to wait for each single device but for all at once. */
1962 wait_event(dasd_init_waitq, _wait_for_device(device));
1964 dasd_put_device(device);
1970 dasd_generic_set_offline (struct ccw_device *cdev)
1972 struct dasd_device *device;
1975 device = dasd_device_from_cdev(cdev);
1977 return PTR_ERR(device);
1978 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1979 /* Already doing offline processing */
1980 dasd_put_device(device);
1984 * We must make sure that this device is currently not in use.
1985 * The open_count is increased for every opener, that includes
1986 * the blkdev_get in dasd_scan_partitions. We are only interested
1987 * in the other openers.
1989 max_count = device->bdev ? 0 : -1;
1990 if (atomic_read(&device->open_count) > max_count) {
1991 printk (KERN_WARNING "Can't offline dasd device with open"
1993 atomic_read(&device->open_count));
1994 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
1995 dasd_put_device(device);
1998 dasd_set_target_state(device, DASD_STATE_NEW);
1999 /* dasd_delete_device destroys the device reference. */
2000 dasd_delete_device(device);
2006 dasd_generic_notify(struct ccw_device *cdev, int event)
2008 struct dasd_device *device;
2009 struct dasd_ccw_req *cqr;
2010 unsigned long flags;
2013 device = dasd_device_from_cdev(cdev);
2016 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
2021 /* First of all call extended error reporting. */
2022 dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2024 if (device->state < DASD_STATE_BASIC)
2026 /* Device is active. We want to keep it. */
2027 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
2028 list_for_each_entry(cqr, &device->ccw_queue, list)
2029 if (cqr->status == DASD_CQR_IN_IO)
2030 cqr->status = DASD_CQR_FAILED;
2031 device->stopped |= DASD_STOPPED_DC_EIO;
2033 list_for_each_entry(cqr, &device->ccw_queue, list)
2034 if (cqr->status == DASD_CQR_IN_IO) {
2035 cqr->status = DASD_CQR_QUEUED;
2038 device->stopped |= DASD_STOPPED_DC_WAIT;
2039 dasd_set_timer(device, 0);
2041 dasd_schedule_bh(device);
2045 /* FIXME: add a sanity check. */
2046 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
2047 dasd_schedule_bh(device);
2051 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2052 dasd_put_device(device);
2057 * Automatically online either all dasd devices (dasd_autodetect) or
2058 * all devices specified with dasd= parameters.
2061 __dasd_auto_online(struct device *dev, void *data)
2063 struct ccw_device *cdev;
2065 cdev = to_ccwdev(dev);
2066 if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
2067 ccw_device_set_online(cdev);
2072 dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
2074 struct device_driver *drv;
2076 drv = get_driver(&dasd_discipline_driver->driver);
2077 driver_for_each_device(drv, NULL, NULL, __dasd_auto_online);
2087 init_waitqueue_head(&dasd_init_waitq);
2089 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2090 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));
2091 if (dasd_debug_area == NULL) {
2095 debug_register_view(dasd_debug_area, &debug_sprintf_view);
2096 debug_set_level(dasd_debug_area, DBF_EMERG);
2098 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2100 dasd_diag_discipline_pointer = NULL;
2102 rc = devfs_mk_dir("dasd");
2105 rc = dasd_devmap_init();
2108 rc = dasd_gendisk_init();
2114 rc = dasd_eer_init();
2117 #ifdef CONFIG_PROC_FS
2118 rc = dasd_proc_init();
2125 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
2130 module_init(dasd_init);
2131 module_exit(dasd_exit);
2133 EXPORT_SYMBOL(dasd_debug_area);
2134 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2136 EXPORT_SYMBOL(dasd_add_request_head);
2137 EXPORT_SYMBOL(dasd_add_request_tail);
2138 EXPORT_SYMBOL(dasd_cancel_req);
2139 EXPORT_SYMBOL(dasd_clear_timer);
2140 EXPORT_SYMBOL(dasd_enable_device);
2141 EXPORT_SYMBOL(dasd_int_handler);
2142 EXPORT_SYMBOL(dasd_kfree_request);
2143 EXPORT_SYMBOL(dasd_kick_device);
2144 EXPORT_SYMBOL(dasd_kmalloc_request);
2145 EXPORT_SYMBOL(dasd_schedule_bh);
2146 EXPORT_SYMBOL(dasd_set_target_state);
2147 EXPORT_SYMBOL(dasd_set_timer);
2148 EXPORT_SYMBOL(dasd_sfree_request);
2149 EXPORT_SYMBOL(dasd_sleep_on);
2150 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2151 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2152 EXPORT_SYMBOL(dasd_smalloc_request);
2153 EXPORT_SYMBOL(dasd_start_IO);
2154 EXPORT_SYMBOL(dasd_term_IO);
2156 EXPORT_SYMBOL_GPL(dasd_generic_probe);
2157 EXPORT_SYMBOL_GPL(dasd_generic_remove);
2158 EXPORT_SYMBOL_GPL(dasd_generic_notify);
2159 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2160 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2161 EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
2164 * Overrides for Emacs so that we follow Linus's tabbing style.
2165 * Emacs will notice this stuff at the end of the file and automatically
2166 * adjust the settings for this buffer only. This must remain at the end
2168 * ---------------------------------------------------------------------------
2171 * c-brace-imaginary-offset: 0
2172 * c-brace-offset: -4
2173 * c-argdecl-indent: 4
2174 * c-label-offset: -4
2175 * c-continued-statement-offset: 4
2176 * c-continued-brace-offset: 0
2177 * indent-tabs-mode: 1