3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2005 Douglas Gilbert
12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
21 static int sg_version_num = 30534; /* 2 digits for each component */
22 #define SG_VERSION_STR "3.5.34"
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28 * (otherwise the macros compile to empty statements).
31 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/mtio.h>
40 #include <linux/ioctl.h>
41 #include <linux/fcntl.h>
42 #include <linux/init.h>
43 #include <linux/poll.h>
44 #include <linux/moduleparam.h>
45 #include <linux/cdev.h>
46 #include <linux/idr.h>
47 #include <linux/seq_file.h>
48 #include <linux/blkdev.h>
49 #include <linux/delay.h>
50 #include <linux/scatterlist.h>
51 #include <linux/blktrace_api.h>
52 #include <linux/smp_lock.h>
55 #include <scsi/scsi_dbg.h>
56 #include <scsi/scsi_host.h>
57 #include <scsi/scsi_driver.h>
58 #include <scsi/scsi_ioctl.h>
61 #include "scsi_logging.h"
63 #ifdef CONFIG_SCSI_PROC_FS
64 #include <linux/proc_fs.h>
65 static char *sg_version_date = "20061027";
67 static int sg_proc_init(void);
68 static void sg_proc_cleanup(void);
71 #define SG_ALLOW_DIO_DEF 0
72 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
74 #define SG_MAX_DEVS 32768
77 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
78 * Then when using 32 bit integers x * m may overflow during the calculation.
79 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
80 * calculates the same, but prevents the overflow when both m and d
81 * are "small" numbers (like HZ and USER_HZ).
82 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
85 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
87 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
89 int sg_big_buff = SG_DEF_RESERVED_SIZE;
90 /* N.B. This variable is readable and writeable via
91 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
92 of this size (or less if there is not enough memory) will be reserved
93 for use by this file descriptor. [Deprecated usage: this variable is also
94 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
95 the kernel (i.e. it is not a module).] */
96 static int def_reserved_size = -1; /* picks up init parameter */
97 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
99 static int scatter_elem_sz = SG_SCATTER_SZ;
100 static int scatter_elem_sz_prev = SG_SCATTER_SZ;
102 #define SG_SECTOR_SZ 512
103 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
105 static int sg_add(struct device *, struct class_interface *);
106 static void sg_remove(struct device *, struct class_interface *);
108 static DEFINE_IDR(sg_index_idr);
109 static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
110 file descriptor list for device */
112 static struct class_interface sg_interface = {
114 .remove_dev = sg_remove,
117 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
118 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
119 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
120 unsigned bufflen; /* Size of (aggregate) data buffer */
121 unsigned b_malloc_len; /* actual len malloc'ed in buffer */
122 struct scatterlist *buffer;/* scatter list */
123 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
124 unsigned char cmd_opcode; /* first byte of command */
127 struct sg_device; /* forward declarations */
130 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
131 struct sg_request *nextrp; /* NULL -> tail request (slist) */
132 struct sg_fd *parentfp; /* NULL -> not in use */
133 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
134 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
135 unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
136 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
137 char orphan; /* 1 -> drop on sight, 0 -> normal */
138 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
139 volatile char done; /* 0->before bh, 1->before read, 2->read */
142 typedef struct sg_fd { /* holds the state of a file descriptor */
143 struct sg_fd *nextfp; /* NULL when last opened fd on this device */
144 struct sg_device *parentdp; /* owning device */
145 wait_queue_head_t read_wait; /* queue read until command done */
146 rwlock_t rq_list_lock; /* protect access to list in req_arr */
147 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
148 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
149 Sg_scatter_hold reserve; /* buffer held for this file descriptor */
150 unsigned save_scat_len; /* original length of trunc. scat. element */
151 Sg_request *headrp; /* head of request slist, NULL->empty */
152 struct fasync_struct *async_qp; /* used by asynchronous notification */
153 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
154 char low_dma; /* as in parent but possibly overridden to 1 */
155 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
156 volatile char closed; /* 1 -> fd closed but request(s) outstanding */
157 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
158 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
159 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
160 char mmap_called; /* 0 -> mmap() never called on this fd */
163 typedef struct sg_device { /* holds the state of each scsi generic device */
164 struct scsi_device *device;
165 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
166 int sg_tablesize; /* adapter's max scatter-gather table size */
167 u32 index; /* device index number */
168 Sg_fd *headfp; /* first open fd belonging to this device */
169 volatile char detached; /* 0->attached, 1->detached pending removal */
170 volatile char exclude; /* opened for exclusive access */
171 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
172 struct gendisk *disk;
173 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
176 static int sg_fasync(int fd, struct file *filp, int mode);
177 /* tasklet or soft irq callback */
178 static void sg_cmd_done(void *data, char *sense, int result, int resid);
179 static int sg_start_req(Sg_request * srp);
180 static void sg_finish_rem_req(Sg_request * srp);
181 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
182 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
184 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
186 static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
187 const char __user *buf, size_t count, int blocking,
188 int read_only, Sg_request **o_srp);
189 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
190 unsigned char *cmnd, int timeout, int blocking);
191 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
192 int wr_xf, int *countp, unsigned char __user **up);
193 static int sg_write_xfer(Sg_request * srp);
194 static int sg_read_xfer(Sg_request * srp);
195 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
196 static void sg_remove_scat(Sg_scatter_hold * schp);
197 static void sg_build_reserve(Sg_fd * sfp, int req_size);
198 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
199 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
200 static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
201 static void sg_page_free(struct page *page, int size);
202 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
203 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
204 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
205 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
206 static Sg_request *sg_add_request(Sg_fd * sfp);
207 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
208 static int sg_res_in_use(Sg_fd * sfp);
209 static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
210 static Sg_device *sg_get_dev(int dev);
211 #ifdef CONFIG_SCSI_PROC_FS
212 static int sg_last_dev(void);
215 #define SZ_SG_HEADER sizeof(struct sg_header)
216 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
217 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
218 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
221 sg_open(struct inode *inode, struct file *filp)
223 int dev = iminor(inode);
224 int flags = filp->f_flags;
225 struct request_queue *q;
232 nonseekable_open(inode, filp);
233 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
234 sdp = sg_get_dev(dev);
235 if ((!sdp) || (!sdp->device)) {
244 /* This driver's module count bumped by fops_get in <linux/fs.h> */
245 /* Prevent the device driver from vanishing while we sleep */
246 retval = scsi_device_get(sdp->device);
252 if (!((flags & O_NONBLOCK) ||
253 scsi_block_when_processing_errors(sdp->device))) {
255 /* we are in error recovery for this device */
259 if (flags & O_EXCL) {
260 if (O_RDONLY == (flags & O_ACCMODE)) {
261 retval = -EPERM; /* Can't lock it with read only access */
264 if (sdp->headfp && (flags & O_NONBLOCK)) {
269 __wait_event_interruptible(sdp->o_excl_wait,
270 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
272 retval = res; /* -ERESTARTSYS because signal hit process */
275 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
276 if (flags & O_NONBLOCK) {
281 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
284 retval = res; /* -ERESTARTSYS because signal hit process */
292 if (!sdp->headfp) { /* no existing opens on this device */
294 q = sdp->device->request_queue;
295 sdp->sg_tablesize = min(q->max_hw_segments,
296 q->max_phys_segments);
298 if ((sfp = sg_add_sfp(sdp, dev)))
299 filp->private_data = sfp;
302 sdp->exclude = 0; /* undo if error */
310 scsi_device_put(sdp->device);
315 /* Following function was formerly called 'sg_close' */
317 sg_release(struct inode *inode, struct file *filp)
322 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
324 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
325 sg_fasync(-1, filp, 0); /* remove filp from async notification list */
326 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
327 if (!sdp->detached) {
328 scsi_device_put(sdp->device);
331 wake_up_interruptible(&sdp->o_excl_wait);
337 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
342 int req_pack_id = -1;
344 struct sg_header *old_hdr = NULL;
347 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
349 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
350 sdp->disk->disk_name, (int) count));
352 if (!access_ok(VERIFY_WRITE, buf, count))
354 if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
355 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
358 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
362 if (old_hdr->reply_len < 0) {
363 if (count >= SZ_SG_IO_HDR) {
364 sg_io_hdr_t *new_hdr;
365 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
370 retval =__copy_from_user
371 (new_hdr, buf, SZ_SG_IO_HDR);
372 req_pack_id = new_hdr->pack_id;
380 req_pack_id = old_hdr->pack_id;
382 srp = sg_get_rq_mark(sfp, req_pack_id);
383 if (!srp) { /* now wait on packet to arrive */
388 if (filp->f_flags & O_NONBLOCK) {
393 retval = 0; /* following macro beats race condition */
394 __wait_event_interruptible(sfp->read_wait,
396 (srp = sg_get_rq_mark(sfp, req_pack_id))),
405 /* -ERESTARTSYS as signal hit process */
409 if (srp->header.interface_id != '\0') {
410 retval = sg_new_read(sfp, buf, count, srp);
415 if (old_hdr == NULL) {
416 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
422 memset(old_hdr, 0, SZ_SG_HEADER);
423 old_hdr->reply_len = (int) hp->timeout;
424 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
425 old_hdr->pack_id = hp->pack_id;
426 old_hdr->twelve_byte =
427 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
428 old_hdr->target_status = hp->masked_status;
429 old_hdr->host_status = hp->host_status;
430 old_hdr->driver_status = hp->driver_status;
431 if ((CHECK_CONDITION & hp->masked_status) ||
432 (DRIVER_SENSE & hp->driver_status))
433 memcpy(old_hdr->sense_buffer, srp->sense_b,
434 sizeof (old_hdr->sense_buffer));
435 switch (hp->host_status) {
436 /* This setup of 'result' is for backward compatibility and is best
437 ignored by the user who should use target, host + driver status */
439 case DID_PASSTHROUGH:
446 old_hdr->result = EBUSY;
453 old_hdr->result = EIO;
456 old_hdr->result = (srp->sense_b[0] == 0 &&
457 hp->masked_status == GOOD) ? 0 : EIO;
460 old_hdr->result = EIO;
464 /* Now copy the result back to the user buffer. */
465 if (count >= SZ_SG_HEADER) {
466 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
471 if (count > old_hdr->reply_len)
472 count = old_hdr->reply_len;
473 if (count > SZ_SG_HEADER) {
474 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
480 count = (old_hdr->result == 0) ? 0 : -EIO;
481 sg_finish_rem_req(srp);
489 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
491 sg_io_hdr_t *hp = &srp->header;
495 if (count < SZ_SG_IO_HDR) {
500 if ((hp->mx_sb_len > 0) && hp->sbp) {
501 if ((CHECK_CONDITION & hp->masked_status) ||
502 (DRIVER_SENSE & hp->driver_status)) {
503 int sb_len = SCSI_SENSE_BUFFERSIZE;
504 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
505 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
506 len = (len > sb_len) ? sb_len : len;
507 if (copy_to_user(hp->sbp, srp->sense_b, len)) {
514 if (hp->masked_status || hp->host_status || hp->driver_status)
515 hp->info |= SG_INFO_CHECK;
516 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
520 err = sg_read_xfer(srp);
522 sg_finish_rem_req(srp);
523 return (0 == err) ? count : err;
527 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
529 int mxsize, cmd_size, k;
530 int input_size, blocking;
531 unsigned char opcode;
535 struct sg_header old_hdr;
537 unsigned char cmnd[MAX_COMMAND_SIZE];
539 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
541 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
542 sdp->disk->disk_name, (int) count));
545 if (!((filp->f_flags & O_NONBLOCK) ||
546 scsi_block_when_processing_errors(sdp->device)))
549 if (!access_ok(VERIFY_READ, buf, count))
550 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
551 if (count < SZ_SG_HEADER)
553 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
555 blocking = !(filp->f_flags & O_NONBLOCK);
556 if (old_hdr.reply_len < 0)
557 return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL);
558 if (count < (SZ_SG_HEADER + 6))
559 return -EIO; /* The minimum scsi command length is 6 bytes. */
561 if (!(srp = sg_add_request(sfp))) {
562 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
566 __get_user(opcode, buf);
567 if (sfp->next_cmd_len > 0) {
568 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
569 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
570 sfp->next_cmd_len = 0;
571 sg_remove_request(sfp, srp);
574 cmd_size = sfp->next_cmd_len;
575 sfp->next_cmd_len = 0; /* reset so only this write() effected */
577 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
578 if ((opcode >= 0xc0) && old_hdr.twelve_byte)
581 SCSI_LOG_TIMEOUT(4, printk(
582 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
583 /* Determine buffer size. */
584 input_size = count - cmd_size;
585 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
586 mxsize -= SZ_SG_HEADER;
587 input_size -= SZ_SG_HEADER;
588 if (input_size < 0) {
589 sg_remove_request(sfp, srp);
590 return -EIO; /* User did not pass enough bytes for this command. */
593 hp->interface_id = '\0'; /* indicator of old interface tunnelled */
594 hp->cmd_len = (unsigned char) cmd_size;
598 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
599 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
601 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
602 hp->dxfer_len = mxsize;
603 hp->dxferp = (char __user *)buf + cmd_size;
605 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
606 hp->flags = input_size; /* structure abuse ... */
607 hp->pack_id = old_hdr.pack_id;
609 if (__copy_from_user(cmnd, buf, cmd_size))
612 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
613 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
614 * is a non-zero input_size, so emit a warning.
616 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
617 static char cmd[TASK_COMM_LEN];
618 if (strcmp(current->comm, cmd) && printk_ratelimit()) {
620 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
621 "guessing data in;\n" KERN_WARNING " "
622 "program %s not setting count and/or reply_len properly\n",
623 old_hdr.reply_len - (int)SZ_SG_HEADER,
624 input_size, (unsigned int) cmnd[0],
626 strcpy(cmd, current->comm);
629 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
630 return (k < 0) ? k : count;
634 sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
635 size_t count, int blocking, int read_only,
641 unsigned char cmnd[MAX_COMMAND_SIZE];
643 unsigned long ul_timeout;
645 if (count < SZ_SG_IO_HDR)
647 if (!access_ok(VERIFY_READ, buf, count))
648 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
650 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
651 if (!(srp = sg_add_request(sfp))) {
652 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
656 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
657 sg_remove_request(sfp, srp);
660 if (hp->interface_id != 'S') {
661 sg_remove_request(sfp, srp);
664 if (hp->flags & SG_FLAG_MMAP_IO) {
665 if (hp->dxfer_len > sfp->reserve.bufflen) {
666 sg_remove_request(sfp, srp);
667 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
669 if (hp->flags & SG_FLAG_DIRECT_IO) {
670 sg_remove_request(sfp, srp);
671 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
673 if (sg_res_in_use(sfp)) {
674 sg_remove_request(sfp, srp);
675 return -EBUSY; /* reserve buffer already being used */
678 ul_timeout = msecs_to_jiffies(srp->header.timeout);
679 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
680 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
681 sg_remove_request(sfp, srp);
684 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
685 sg_remove_request(sfp, srp);
686 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
688 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
689 sg_remove_request(sfp, srp);
692 if (read_only && !blk_verify_command(file, cmnd)) {
693 sg_remove_request(sfp, srp);
696 k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
705 sg_common_write(Sg_fd * sfp, Sg_request * srp,
706 unsigned char *cmnd, int timeout, int blocking)
709 Sg_device *sdp = sfp->parentdp;
710 sg_io_hdr_t *hp = &srp->header;
712 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
714 hp->masked_status = 0;
718 hp->driver_status = 0;
720 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
721 (int) cmnd[0], (int) hp->cmd_len));
723 if ((k = sg_start_req(srp))) {
724 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
725 sg_finish_rem_req(srp);
726 return k; /* probably out of space --> ENOMEM */
728 if ((k = sg_write_xfer(srp))) {
729 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: write_xfer, bad address\n"));
730 sg_finish_rem_req(srp);
734 sg_finish_rem_req(srp);
738 switch (hp->dxfer_direction) {
739 case SG_DXFER_TO_FROM_DEV:
740 case SG_DXFER_FROM_DEV:
741 data_dir = DMA_FROM_DEVICE;
743 case SG_DXFER_TO_DEV:
744 data_dir = DMA_TO_DEVICE;
746 case SG_DXFER_UNKNOWN:
747 data_dir = DMA_BIDIRECTIONAL;
753 hp->duration = jiffies_to_msecs(jiffies);
754 /* Now send everything of to mid-level. The next time we hear about this
755 packet is when sg_cmd_done() is called (i.e. a callback). */
756 if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer,
757 hp->dxfer_len, srp->data.k_use_sg, timeout,
758 SG_DEFAULT_RETRIES, srp, sg_cmd_done,
760 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: scsi_execute_async failed\n"));
762 * most likely out of mem, but could also be a bad map
764 sg_finish_rem_req(srp);
771 sg_srp_done(Sg_request *srp, Sg_fd *sfp)
773 unsigned long iflags;
776 read_lock_irqsave(&sfp->rq_list_lock, iflags);
778 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
783 sg_ioctl(struct inode *inode, struct file *filp,
784 unsigned int cmd_in, unsigned long arg)
786 void __user *p = (void __user *)arg;
788 int result, val, read_only;
792 unsigned long iflags;
794 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
796 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
797 sdp->disk->disk_name, (int) cmd_in));
798 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
803 int blocking = 1; /* ignore O_NONBLOCK flag */
807 if (!scsi_block_when_processing_errors(sdp->device))
809 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
812 sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
813 blocking, read_only, &srp);
816 srp->sg_io_owned = 1;
818 result = 0; /* following macro to beat race condition */
819 __wait_event_interruptible(sfp->read_wait,
820 (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
825 return 0; /* request packet dropped already */
829 return result; /* -ERESTARTSYS because signal hit process */
831 write_lock_irqsave(&sfp->rq_list_lock, iflags);
833 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
834 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
835 return (result < 0) ? result : 0;
838 result = get_user(val, ip);
843 if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
844 val = MULDIV (INT_MAX, USER_HZ, HZ);
845 sfp->timeout_user = val;
846 sfp->timeout = MULDIV (val, HZ, USER_HZ);
849 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
850 /* strange ..., for backward compatibility */
851 return sfp->timeout_user;
852 case SG_SET_FORCE_LOW_DMA:
853 result = get_user(val, ip);
858 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
859 val = (int) sfp->reserve.bufflen;
860 sg_remove_scat(&sfp->reserve);
861 sg_build_reserve(sfp, val);
866 sfp->low_dma = sdp->device->host->unchecked_isa_dma;
870 return put_user((int) sfp->low_dma, ip);
872 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
875 sg_scsi_id_t __user *sg_idp = p;
879 __put_user((int) sdp->device->host->host_no,
881 __put_user((int) sdp->device->channel,
883 __put_user((int) sdp->device->id, &sg_idp->scsi_id);
884 __put_user((int) sdp->device->lun, &sg_idp->lun);
885 __put_user((int) sdp->device->type, &sg_idp->scsi_type);
886 __put_user((short) sdp->device->host->cmd_per_lun,
887 &sg_idp->h_cmd_per_lun);
888 __put_user((short) sdp->device->queue_depth,
889 &sg_idp->d_queue_depth);
890 __put_user(0, &sg_idp->unused[0]);
891 __put_user(0, &sg_idp->unused[1]);
894 case SG_SET_FORCE_PACK_ID:
895 result = get_user(val, ip);
898 sfp->force_packid = val ? 1 : 0;
901 if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
903 read_lock_irqsave(&sfp->rq_list_lock, iflags);
904 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
905 if ((1 == srp->done) && (!srp->sg_io_owned)) {
906 read_unlock_irqrestore(&sfp->rq_list_lock,
908 __put_user(srp->header.pack_id, ip);
912 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
915 case SG_GET_NUM_WAITING:
916 read_lock_irqsave(&sfp->rq_list_lock, iflags);
917 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
918 if ((1 == srp->done) && (!srp->sg_io_owned))
921 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
922 return put_user(val, ip);
923 case SG_GET_SG_TABLESIZE:
924 return put_user(sdp->sg_tablesize, ip);
925 case SG_SET_RESERVED_SIZE:
926 result = get_user(val, ip);
931 val = min_t(int, val,
932 sdp->device->request_queue->max_sectors * 512);
933 if (val != sfp->reserve.bufflen) {
934 if (sg_res_in_use(sfp) || sfp->mmap_called)
936 sg_remove_scat(&sfp->reserve);
937 sg_build_reserve(sfp, val);
940 case SG_GET_RESERVED_SIZE:
941 val = min_t(int, sfp->reserve.bufflen,
942 sdp->device->request_queue->max_sectors * 512);
943 return put_user(val, ip);
944 case SG_SET_COMMAND_Q:
945 result = get_user(val, ip);
948 sfp->cmd_q = val ? 1 : 0;
950 case SG_GET_COMMAND_Q:
951 return put_user((int) sfp->cmd_q, ip);
952 case SG_SET_KEEP_ORPHAN:
953 result = get_user(val, ip);
956 sfp->keep_orphan = val;
958 case SG_GET_KEEP_ORPHAN:
959 return put_user((int) sfp->keep_orphan, ip);
960 case SG_NEXT_CMD_LEN:
961 result = get_user(val, ip);
964 sfp->next_cmd_len = (val > 0) ? val : 0;
966 case SG_GET_VERSION_NUM:
967 return put_user(sg_version_num, ip);
968 case SG_GET_ACCESS_COUNT:
969 /* faked - we don't have a real access count anymore */
970 val = (sdp->device ? 1 : 0);
971 return put_user(val, ip);
972 case SG_GET_REQUEST_TABLE:
973 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
976 sg_req_info_t *rinfo;
979 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
983 read_lock_irqsave(&sfp->rq_list_lock, iflags);
984 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
985 ++val, srp = srp ? srp->nextrp : srp) {
986 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
988 rinfo[val].req_state = srp->done + 1;
990 srp->header.masked_status &
991 srp->header.host_status &
992 srp->header.driver_status;
994 rinfo[val].duration =
995 srp->header.duration;
997 ms = jiffies_to_msecs(jiffies);
998 rinfo[val].duration =
999 (ms > srp->header.duration) ?
1000 (ms - srp->header.duration) : 0;
1002 rinfo[val].orphan = srp->orphan;
1003 rinfo[val].sg_io_owned =
1005 rinfo[val].pack_id =
1006 srp->header.pack_id;
1007 rinfo[val].usr_ptr =
1008 srp->header.usr_ptr;
1011 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1012 result = __copy_to_user(p, rinfo,
1013 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1014 result = result ? -EFAULT : 0;
1018 case SG_EMULATED_HOST:
1021 return put_user(sdp->device->host->hostt->emulated, ip);
1025 if (filp->f_flags & O_NONBLOCK) {
1026 if (scsi_host_in_recovery(sdp->device->host))
1028 } else if (!scsi_block_when_processing_errors(sdp->device))
1030 result = get_user(val, ip);
1033 if (SG_SCSI_RESET_NOTHING == val)
1036 case SG_SCSI_RESET_DEVICE:
1037 val = SCSI_TRY_RESET_DEVICE;
1039 case SG_SCSI_RESET_TARGET:
1040 val = SCSI_TRY_RESET_TARGET;
1042 case SG_SCSI_RESET_BUS:
1043 val = SCSI_TRY_RESET_BUS;
1045 case SG_SCSI_RESET_HOST:
1046 val = SCSI_TRY_RESET_HOST;
1051 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1053 return (scsi_reset_provider(sdp->device, val) ==
1054 SUCCESS) ? 0 : -EIO;
1055 case SCSI_IOCTL_SEND_COMMAND:
1059 unsigned char opcode = WRITE_6;
1060 Scsi_Ioctl_Command __user *siocp = p;
1062 if (copy_from_user(&opcode, siocp->data, 1))
1064 if (!blk_verify_command(filp, &opcode))
1067 return sg_scsi_ioctl(filp, sdp->device->request_queue, NULL, p);
1069 result = get_user(val, ip);
1072 sdp->sgdebug = (char) val;
1074 case SCSI_IOCTL_GET_IDLUN:
1075 case SCSI_IOCTL_GET_BUS_NUMBER:
1076 case SCSI_IOCTL_PROBE_HOST:
1077 case SG_GET_TRANSFORM:
1080 return scsi_ioctl(sdp->device, cmd_in, p);
1082 return put_user(sdp->device->request_queue->max_sectors * 512,
1085 return blk_trace_setup(sdp->device->request_queue,
1086 sdp->disk->disk_name,
1087 sdp->device->sdev_gendev.devt,
1090 return blk_trace_startstop(sdp->device->request_queue, 1);
1092 return blk_trace_startstop(sdp->device->request_queue, 0);
1093 case BLKTRACETEARDOWN:
1094 return blk_trace_remove(sdp->device->request_queue);
1097 return -EPERM; /* don't know so take safe approach */
1098 return scsi_ioctl(sdp->device, cmd_in, p);
1102 #ifdef CONFIG_COMPAT
1103 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1107 struct scsi_device *sdev;
1109 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1113 if (sdev->host->hostt->compat_ioctl) {
1116 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1121 return -ENOIOCTLCMD;
1126 sg_poll(struct file *filp, poll_table * wait)
1128 unsigned int res = 0;
1133 unsigned long iflags;
1135 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
1138 poll_wait(filp, &sfp->read_wait, wait);
1139 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1140 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1141 /* if any read waiting, flag it */
1142 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1143 res = POLLIN | POLLRDNORM;
1146 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1150 else if (!sfp->cmd_q) {
1152 res |= POLLOUT | POLLWRNORM;
1153 } else if (count < SG_MAX_QUEUE)
1154 res |= POLLOUT | POLLWRNORM;
1155 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1156 sdp->disk->disk_name, (int) res));
1161 sg_fasync(int fd, struct file *filp, int mode)
1167 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1169 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1170 sdp->disk->disk_name, mode));
1172 retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
1173 return (retval < 0) ? retval : 0;
1177 sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1180 unsigned long offset, len, sa;
1181 Sg_scatter_hold *rsv_schp;
1182 struct scatterlist *sg;
1185 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1186 return VM_FAULT_SIGBUS;
1187 rsv_schp = &sfp->reserve;
1188 offset = vmf->pgoff << PAGE_SHIFT;
1189 if (offset >= rsv_schp->bufflen)
1190 return VM_FAULT_SIGBUS;
1191 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
1192 offset, rsv_schp->k_use_sg));
1193 sg = rsv_schp->buffer;
1195 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1196 ++k, sg = sg_next(sg)) {
1197 len = vma->vm_end - sa;
1198 len = (len < sg->length) ? len : sg->length;
1201 page = virt_to_page(page_address(sg_page(sg)) + offset);
1202 get_page(page); /* increment page count */
1204 return 0; /* success */
1210 return VM_FAULT_SIGBUS;
1213 static struct vm_operations_struct sg_mmap_vm_ops = {
1214 .fault = sg_vma_fault,
1218 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1221 unsigned long req_sz, len, sa;
1222 Sg_scatter_hold *rsv_schp;
1224 struct scatterlist *sg;
1226 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1228 req_sz = vma->vm_end - vma->vm_start;
1229 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1230 (void *) vma->vm_start, (int) req_sz));
1232 return -EINVAL; /* want no offset */
1233 rsv_schp = &sfp->reserve;
1234 if (req_sz > rsv_schp->bufflen)
1235 return -ENOMEM; /* cannot map more than reserved buffer */
1238 sg = rsv_schp->buffer;
1239 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1240 ++k, sg = sg_next(sg)) {
1241 len = vma->vm_end - sa;
1242 len = (len < sg->length) ? len : sg->length;
1246 sfp->mmap_called = 1;
1247 vma->vm_flags |= VM_RESERVED;
1248 vma->vm_private_data = sfp;
1249 vma->vm_ops = &sg_mmap_vm_ops;
1253 /* This function is a "bottom half" handler that is called by the
1254 * mid level when a command is completed (or has failed). */
1256 sg_cmd_done(void *data, char *sense, int result, int resid)
1258 Sg_request *srp = data;
1259 Sg_device *sdp = NULL;
1261 unsigned long iflags;
1265 printk(KERN_ERR "sg_cmd_done: NULL request\n");
1268 sfp = srp->parentfp;
1270 sdp = sfp->parentdp;
1271 if ((NULL == sdp) || sdp->detached) {
1272 printk(KERN_INFO "sg_cmd_done: device detached\n");
1277 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1278 sdp->disk->disk_name, srp->header.pack_id, result));
1279 srp->header.resid = resid;
1280 ms = jiffies_to_msecs(jiffies);
1281 srp->header.duration = (ms > srp->header.duration) ?
1282 (ms - srp->header.duration) : 0;
1284 struct scsi_sense_hdr sshdr;
1286 memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
1287 srp->header.status = 0xff & result;
1288 srp->header.masked_status = status_byte(result);
1289 srp->header.msg_status = msg_byte(result);
1290 srp->header.host_status = host_byte(result);
1291 srp->header.driver_status = driver_byte(result);
1292 if ((sdp->sgdebug > 0) &&
1293 ((CHECK_CONDITION == srp->header.masked_status) ||
1294 (COMMAND_TERMINATED == srp->header.masked_status)))
1295 __scsi_print_sense("sg_cmd_done", sense,
1296 SCSI_SENSE_BUFFERSIZE);
1298 /* Following if statement is a patch supplied by Eric Youngdale */
1299 if (driver_byte(result) != 0
1300 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1301 && !scsi_sense_is_deferred(&sshdr)
1302 && sshdr.sense_key == UNIT_ATTENTION
1303 && sdp->device->removable) {
1304 /* Detected possible disc change. Set the bit - this */
1305 /* may be used if there are filesystems using this device */
1306 sdp->device->changed = 1;
1309 /* Rely on write phase to clean out srp status values, so no "else" */
1311 if (sfp->closed) { /* whoops this fd already released, cleanup */
1312 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
1313 sg_finish_rem_req(srp);
1315 if (NULL == sfp->headrp) {
1316 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, final cleanup\n"));
1317 if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */
1318 scsi_device_put(sdp->device);
1322 } else if (srp && srp->orphan) {
1323 if (sfp->keep_orphan)
1324 srp->sg_io_owned = 0;
1326 sg_finish_rem_req(srp);
1331 /* Now wake up any sg_read() that is waiting for this packet. */
1332 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1333 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1335 wake_up_interruptible(&sfp->read_wait);
1336 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1340 static struct file_operations sg_fops = {
1341 .owner = THIS_MODULE,
1346 #ifdef CONFIG_COMPAT
1347 .compat_ioctl = sg_compat_ioctl,
1351 .release = sg_release,
1352 .fasync = sg_fasync,
1355 static struct class *sg_sysfs_class;
1357 static int sg_sysfs_valid = 0;
1359 static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1361 struct request_queue *q = scsidp->request_queue;
1363 unsigned long iflags;
1367 sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1369 printk(KERN_WARNING "kmalloc Sg_device failure\n");
1370 return ERR_PTR(-ENOMEM);
1373 if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
1374 printk(KERN_WARNING "idr expansion Sg_device failure\n");
1378 write_lock_irqsave(&sg_index_lock, iflags);
1379 error = idr_get_new(&sg_index_idr, sdp, &k);
1380 write_unlock_irqrestore(&sg_index_lock, iflags);
1383 printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
1388 if (unlikely(k >= SG_MAX_DEVS))
1391 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1392 sprintf(disk->disk_name, "sg%d", k);
1393 disk->first_minor = k;
1395 sdp->device = scsidp;
1396 init_waitqueue_head(&sdp->o_excl_wait);
1397 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
1404 return ERR_PTR(error);
1409 sdev_printk(KERN_WARNING, scsidp,
1410 "Unable to attach sg device type=%d, minor "
1411 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
1417 sg_add(struct device *cl_dev, struct class_interface *cl_intf)
1419 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1420 struct gendisk *disk;
1421 Sg_device *sdp = NULL;
1422 struct cdev * cdev = NULL;
1424 unsigned long iflags;
1426 disk = alloc_disk(1);
1428 printk(KERN_WARNING "alloc_disk failed\n");
1431 disk->major = SCSI_GENERIC_MAJOR;
1434 cdev = cdev_alloc();
1436 printk(KERN_WARNING "cdev_alloc failed\n");
1439 cdev->owner = THIS_MODULE;
1440 cdev->ops = &sg_fops;
1442 sdp = sg_alloc(disk, scsidp);
1444 printk(KERN_WARNING "sg_alloc failed\n");
1445 error = PTR_ERR(sdp);
1449 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
1454 if (sg_sysfs_valid) {
1455 struct device *sg_class_member;
1457 sg_class_member = device_create_drvdata(sg_sysfs_class,
1459 MKDEV(SCSI_GENERIC_MAJOR,
1462 "%s", disk->disk_name);
1463 if (IS_ERR(sg_class_member)) {
1464 printk(KERN_ERR "sg_add: "
1465 "device_create failed\n");
1466 error = PTR_ERR(sg_class_member);
1469 error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1470 &sg_class_member->kobj, "generic");
1472 printk(KERN_ERR "sg_add: unable to make symlink "
1473 "'generic' back to sg%d\n", sdp->index);
1475 printk(KERN_WARNING "sg_add: sg_sys Invalid\n");
1477 sdev_printk(KERN_NOTICE, scsidp,
1478 "Attached scsi generic sg%d type %d\n", sdp->index,
1481 dev_set_drvdata(cl_dev, sdp);
1486 write_lock_irqsave(&sg_index_lock, iflags);
1487 idr_remove(&sg_index_idr, sdp->index);
1488 write_unlock_irqrestore(&sg_index_lock, iflags);
1499 sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1501 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1502 Sg_device *sdp = dev_get_drvdata(cl_dev);
1503 unsigned long iflags;
1514 write_lock_irqsave(&sg_index_lock, iflags);
1517 for (sfp = sdp->headfp; sfp; sfp = tsfp) {
1519 for (srp = sfp->headrp; srp; srp = tsrp) {
1521 if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
1522 sg_finish_rem_req(srp);
1525 scsi_device_put(sdp->device);
1526 __sg_remove_sfp(sdp, sfp);
1529 wake_up_interruptible(&sfp->read_wait);
1530 kill_fasync(&sfp->async_qp, SIGPOLL,
1534 SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d, dirty\n", sdp->index));
1535 if (NULL == sdp->headfp) {
1536 idr_remove(&sg_index_idr, sdp->index);
1538 } else { /* nothing active, simple case */
1539 SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d\n", sdp->index));
1540 idr_remove(&sg_index_idr, sdp->index);
1542 write_unlock_irqrestore(&sg_index_lock, iflags);
1544 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1545 device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
1546 cdev_del(sdp->cdev);
1548 put_disk(sdp->disk);
1550 if (NULL == sdp->headfp)
1554 msleep(10); /* dirty detach so delay device destruction */
1557 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
1558 module_param_named(def_reserved_size, def_reserved_size, int,
1560 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1562 MODULE_AUTHOR("Douglas Gilbert");
1563 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1564 MODULE_LICENSE("GPL");
1565 MODULE_VERSION(SG_VERSION_STR);
1566 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
1568 MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
1569 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1570 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1571 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1578 if (scatter_elem_sz < PAGE_SIZE) {
1579 scatter_elem_sz = PAGE_SIZE;
1580 scatter_elem_sz_prev = scatter_elem_sz;
1582 if (def_reserved_size >= 0)
1583 sg_big_buff = def_reserved_size;
1585 def_reserved_size = sg_big_buff;
1587 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1591 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1592 if ( IS_ERR(sg_sysfs_class) ) {
1593 rc = PTR_ERR(sg_sysfs_class);
1597 rc = scsi_register_interface(&sg_interface);
1599 #ifdef CONFIG_SCSI_PROC_FS
1601 #endif /* CONFIG_SCSI_PROC_FS */
1604 class_destroy(sg_sysfs_class);
1606 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1613 #ifdef CONFIG_SCSI_PROC_FS
1615 #endif /* CONFIG_SCSI_PROC_FS */
1616 scsi_unregister_interface(&sg_interface);
1617 class_destroy(sg_sysfs_class);
1619 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1621 idr_destroy(&sg_index_idr);
1625 sg_start_req(Sg_request * srp)
1628 Sg_fd *sfp = srp->parentfp;
1629 sg_io_hdr_t *hp = &srp->header;
1630 int dxfer_len = (int) hp->dxfer_len;
1631 int dxfer_dir = hp->dxfer_direction;
1632 Sg_scatter_hold *req_schp = &srp->data;
1633 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1635 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1636 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1638 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
1639 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
1640 (!sfp->parentdp->device->host->unchecked_isa_dma)) {
1641 res = sg_build_direct(srp, sfp, dxfer_len);
1642 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
1645 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
1646 sg_link_reserve(sfp, srp, dxfer_len);
1648 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1650 sg_remove_scat(req_schp);
1658 sg_finish_rem_req(Sg_request * srp)
1660 Sg_fd *sfp = srp->parentfp;
1661 Sg_scatter_hold *req_schp = &srp->data;
1663 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1665 sg_unlink_reserve(sfp, srp);
1667 sg_remove_scat(req_schp);
1668 sg_remove_request(sfp, srp);
1672 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1674 int sg_bufflen = tablesize * sizeof(struct scatterlist);
1675 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1678 * TODO: test without low_dma, we should not need it since
1679 * the block layer will bounce the buffer for us
1681 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
1684 gfp_flags |= GFP_DMA;
1685 schp->buffer = kzalloc(sg_bufflen, gfp_flags);
1688 sg_init_table(schp->buffer, tablesize);
1689 schp->sglist_len = sg_bufflen;
1690 return tablesize; /* number of scat_gath elements allocated */
1693 #ifdef SG_ALLOW_DIO_CODE
1694 /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
1695 /* TODO: hopefully we can use the generic block layer code */
1697 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1698 - mapping of all pages not successful
1699 (i.e., either completely successful or fails)
1702 st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1703 unsigned long uaddr, size_t count, int rw)
1705 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1706 unsigned long start = uaddr >> PAGE_SHIFT;
1707 const int nr_pages = end - start;
1709 struct page **pages;
1711 /* User attempted Overflow! */
1712 if ((uaddr + count) < uaddr)
1716 if (nr_pages > max_pages)
1723 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1726 /* Try to fault in all of the necessary pages */
1727 down_read(¤t->mm->mmap_sem);
1728 /* rw==READ means read from drive, write into memory area */
1729 res = get_user_pages(
1735 0, /* don't force */
1738 up_read(¤t->mm->mmap_sem);
1740 /* Errors and no page mapped should return here */
1744 for (i=0; i < nr_pages; i++) {
1745 /* FIXME: flush superflous for rw==READ,
1746 * probably wrong function for rw==WRITE
1748 flush_dcache_page(pages[i]);
1749 /* ?? Is locking needed? I don't think so */
1750 /* if (!trylock_page(pages[i]))
1754 sg_set_page(sgl, pages[0], 0, uaddr & ~PAGE_MASK);
1756 sgl[0].length = PAGE_SIZE - sgl[0].offset;
1757 count -= sgl[0].length;
1758 for (i=1; i < nr_pages ; i++)
1759 sg_set_page(&sgl[i], pages[i], count < PAGE_SIZE ? count : PAGE_SIZE, 0);
1762 sgl[0].length = count;
1770 for (j=0; j < res; j++)
1771 page_cache_release(pages[j]);
1779 /* And unmap them... */
1781 st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1786 for (i=0; i < nr_pages; i++) {
1787 struct page *page = sg_page(&sgl[i]);
1791 /* unlock_page(page); */
1792 /* FIXME: cache flush missing for rw==READ
1793 * FIXME: call the correct reference counting function
1795 page_cache_release(page);
1801 /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
1805 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1807 sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1809 #ifdef SG_ALLOW_DIO_CODE
1810 sg_io_hdr_t *hp = &srp->header;
1811 Sg_scatter_hold *schp = &srp->data;
1812 int sg_tablesize = sfp->parentdp->sg_tablesize;
1813 int mx_sc_elems, res;
1814 struct scsi_device *sdev = sfp->parentdp->device;
1816 if (((unsigned long)hp->dxferp &
1817 queue_dma_alignment(sdev->request_queue)) != 0)
1820 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1821 if (mx_sc_elems <= 0) {
1824 res = st_map_user_pages(schp->buffer, mx_sc_elems,
1825 (unsigned long)hp->dxferp, dxfer_len,
1826 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
1828 sg_remove_scat(schp);
1831 schp->k_use_sg = res;
1832 schp->dio_in_use = 1;
1833 hp->info |= SG_INFO_DIRECT_IO;
1841 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1843 struct scatterlist *sg;
1844 int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
1845 int sg_tablesize = sfp->parentdp->sg_tablesize;
1846 int blk_size = buff_size;
1847 struct page *p = NULL;
1852 ++blk_size; /* don't know why */
1853 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1854 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
1855 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1856 buff_size, blk_size));
1858 /* N.B. ret_sz carried into this block ... */
1859 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1860 if (mx_sc_elems < 0)
1861 return mx_sc_elems; /* most likely -ENOMEM */
1863 num = scatter_elem_sz;
1864 if (unlikely(num != scatter_elem_sz_prev)) {
1865 if (num < PAGE_SIZE) {
1866 scatter_elem_sz = PAGE_SIZE;
1867 scatter_elem_sz_prev = PAGE_SIZE;
1869 scatter_elem_sz_prev = num;
1871 for (k = 0, sg = schp->buffer, rem_sz = blk_size;
1872 (rem_sz > 0) && (k < mx_sc_elems);
1873 ++k, rem_sz -= ret_sz, sg = sg_next(sg)) {
1875 num = (rem_sz > scatter_elem_sz_prev) ?
1876 scatter_elem_sz_prev : rem_sz;
1877 p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
1881 if (num == scatter_elem_sz_prev) {
1882 if (unlikely(ret_sz > scatter_elem_sz_prev)) {
1883 scatter_elem_sz = ret_sz;
1884 scatter_elem_sz_prev = ret_sz;
1887 sg_set_page(sg, p, (ret_sz > num) ? num : ret_sz, 0);
1889 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1890 "ret_sz=%d\n", k, num, ret_sz));
1891 } /* end of for loop */
1894 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1895 "rem_sz=%d\n", k, rem_sz));
1897 schp->bufflen = blk_size;
1898 if (rem_sz > 0) /* must have failed */
1905 sg_write_xfer(Sg_request * srp)
1907 sg_io_hdr_t *hp = &srp->header;
1908 Sg_scatter_hold *schp = &srp->data;
1909 struct scatterlist *sg = schp->buffer;
1911 int j, k, onum, usglen, ksglen, res;
1912 int iovec_count = (int) hp->iovec_count;
1913 int dxfer_dir = hp->dxfer_direction;
1915 unsigned char __user *up;
1916 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1918 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1919 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1920 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
1921 if (schp->bufflen < num_xfer)
1922 num_xfer = schp->bufflen;
1924 if ((num_xfer <= 0) || (schp->dio_in_use) ||
1926 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1929 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1930 num_xfer, iovec_count, schp->k_use_sg));
1933 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
1938 ksglen = sg->length;
1939 p = page_address(sg_page(sg));
1940 for (j = 0, k = 0; j < onum; ++j) {
1941 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1945 for (; p; sg = sg_next(sg), ksglen = sg->length,
1946 p = page_address(sg_page(sg))) {
1949 if (ksglen > usglen) {
1950 if (usglen >= num_xfer) {
1951 if (__copy_from_user(p, up, num_xfer))
1955 if (__copy_from_user(p, up, usglen))
1961 if (ksglen >= num_xfer) {
1962 if (__copy_from_user(p, up, num_xfer))
1966 if (__copy_from_user(p, up, ksglen))
1972 if (k >= schp->k_use_sg)
1981 sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
1982 int wr_xf, int *countp, unsigned char __user **up)
1984 int num_xfer = (int) hp->dxfer_len;
1985 unsigned char __user *p = hp->dxferp;
1989 if (wr_xf && ('\0' == hp->interface_id))
1990 count = (int) hp->flags; /* holds "old" input_size */
1995 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
1998 count = (int) iovec.iov_len;
2000 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
2010 sg_remove_scat(Sg_scatter_hold * schp)
2012 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
2013 if (schp->buffer && (schp->sglist_len > 0)) {
2014 struct scatterlist *sg = schp->buffer;
2016 if (schp->dio_in_use) {
2017 #ifdef SG_ALLOW_DIO_CODE
2018 st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
2023 for (k = 0; (k < schp->k_use_sg) && sg_page(sg);
2024 ++k, sg = sg_next(sg)) {
2025 SCSI_LOG_TIMEOUT(5, printk(
2026 "sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
2027 k, sg_page(sg), sg->length));
2028 sg_page_free(sg_page(sg), sg->length);
2031 kfree(schp->buffer);
2033 memset(schp, 0, sizeof (*schp));
2037 sg_read_xfer(Sg_request * srp)
2039 sg_io_hdr_t *hp = &srp->header;
2040 Sg_scatter_hold *schp = &srp->data;
2041 struct scatterlist *sg = schp->buffer;
2043 int j, k, onum, usglen, ksglen, res;
2044 int iovec_count = (int) hp->iovec_count;
2045 int dxfer_dir = hp->dxfer_direction;
2047 unsigned char __user *up;
2048 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2050 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2051 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2052 num_xfer = hp->dxfer_len;
2053 if (schp->bufflen < num_xfer)
2054 num_xfer = schp->bufflen;
2056 if ((num_xfer <= 0) || (schp->dio_in_use) ||
2058 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2061 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2062 num_xfer, iovec_count, schp->k_use_sg));
2065 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2070 p = page_address(sg_page(sg));
2071 ksglen = sg->length;
2072 for (j = 0, k = 0; j < onum; ++j) {
2073 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2077 for (; p; sg = sg_next(sg), ksglen = sg->length,
2078 p = page_address(sg_page(sg))) {
2081 if (ksglen > usglen) {
2082 if (usglen >= num_xfer) {
2083 if (__copy_to_user(up, p, num_xfer))
2087 if (__copy_to_user(up, p, usglen))
2093 if (ksglen >= num_xfer) {
2094 if (__copy_to_user(up, p, num_xfer))
2098 if (__copy_to_user(up, p, ksglen))
2104 if (k >= schp->k_use_sg)
2113 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2115 Sg_scatter_hold *schp = &srp->data;
2116 struct scatterlist *sg = schp->buffer;
2119 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
2121 if ((!outp) || (num_read_xfer <= 0))
2124 for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) {
2126 if (num > num_read_xfer) {
2127 if (__copy_to_user(outp, page_address(sg_page(sg)),
2132 if (__copy_to_user(outp, page_address(sg_page(sg)),
2135 num_read_xfer -= num;
2136 if (num_read_xfer <= 0)
2146 sg_build_reserve(Sg_fd * sfp, int req_size)
2148 Sg_scatter_hold *schp = &sfp->reserve;
2150 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
2152 if (req_size < PAGE_SIZE)
2153 req_size = PAGE_SIZE;
2154 if (0 == sg_build_indirect(schp, sfp, req_size))
2157 sg_remove_scat(schp);
2158 req_size >>= 1; /* divide by 2 */
2159 } while (req_size > (PAGE_SIZE / 2));
2163 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2165 Sg_scatter_hold *req_schp = &srp->data;
2166 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2167 struct scatterlist *sg = rsv_schp->buffer;
2171 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2174 for (k = 0; k < rsv_schp->k_use_sg; ++k, sg = sg_next(sg)) {
2177 sfp->save_scat_len = num;
2179 req_schp->k_use_sg = k + 1;
2180 req_schp->sglist_len = rsv_schp->sglist_len;
2181 req_schp->buffer = rsv_schp->buffer;
2183 req_schp->bufflen = size;
2184 req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2190 if (k >= rsv_schp->k_use_sg)
2191 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
2195 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2197 Sg_scatter_hold *req_schp = &srp->data;
2198 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2200 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2201 (int) req_schp->k_use_sg));
2202 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2203 struct scatterlist *sg = rsv_schp->buffer;
2205 if (sfp->save_scat_len > 0)
2206 (sg + (req_schp->k_use_sg - 1))->length =
2207 (unsigned) sfp->save_scat_len;
2209 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2211 req_schp->k_use_sg = 0;
2212 req_schp->bufflen = 0;
2213 req_schp->buffer = NULL;
2214 req_schp->sglist_len = 0;
2215 sfp->save_scat_len = 0;
2220 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2223 unsigned long iflags;
2225 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2226 for (resp = sfp->headrp; resp; resp = resp->nextrp) {
2227 /* look for requests that are ready + not SG_IO owned */
2228 if ((1 == resp->done) && (!resp->sg_io_owned) &&
2229 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2230 resp->done = 2; /* guard against other readers */
2234 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2238 #ifdef CONFIG_SCSI_PROC_FS
2240 sg_get_nth_request(Sg_fd * sfp, int nth)
2243 unsigned long iflags;
2246 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2247 for (k = 0, resp = sfp->headrp; resp && (k < nth);
2248 ++k, resp = resp->nextrp) ;
2249 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2254 /* always adds to end of list */
2256 sg_add_request(Sg_fd * sfp)
2259 unsigned long iflags;
2261 Sg_request *rp = sfp->req_arr;
2263 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2266 memset(rp, 0, sizeof (Sg_request));
2271 if (0 == sfp->cmd_q)
2272 resp = NULL; /* command queuing disallowed */
2274 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2278 if (k < SG_MAX_QUEUE) {
2279 memset(rp, 0, sizeof (Sg_request));
2281 while (resp->nextrp)
2282 resp = resp->nextrp;
2290 resp->nextrp = NULL;
2291 resp->header.duration = jiffies_to_msecs(jiffies);
2293 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2297 /* Return of 1 for found; 0 for not found */
2299 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2301 Sg_request *prev_rp;
2303 unsigned long iflags;
2306 if ((!sfp) || (!srp) || (!sfp->headrp))
2308 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2309 prev_rp = sfp->headrp;
2310 if (srp == prev_rp) {
2311 sfp->headrp = prev_rp->nextrp;
2312 prev_rp->parentfp = NULL;
2315 while ((rp = prev_rp->nextrp)) {
2317 prev_rp->nextrp = rp->nextrp;
2318 rp->parentfp = NULL;
2325 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2329 #ifdef CONFIG_SCSI_PROC_FS
2331 sg_get_nth_sfp(Sg_device * sdp, int nth)
2334 unsigned long iflags;
2337 read_lock_irqsave(&sg_index_lock, iflags);
2338 for (k = 0, resp = sdp->headfp; resp && (k < nth);
2339 ++k, resp = resp->nextfp) ;
2340 read_unlock_irqrestore(&sg_index_lock, iflags);
2346 sg_add_sfp(Sg_device * sdp, int dev)
2349 unsigned long iflags;
2352 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2356 init_waitqueue_head(&sfp->read_wait);
2357 rwlock_init(&sfp->rq_list_lock);
2359 sfp->timeout = SG_DEFAULT_TIMEOUT;
2360 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2361 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2362 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2363 sdp->device->host->unchecked_isa_dma : 1;
2364 sfp->cmd_q = SG_DEF_COMMAND_Q;
2365 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2366 sfp->parentdp = sdp;
2367 write_lock_irqsave(&sg_index_lock, iflags);
2370 else { /* add to tail of existing list */
2371 Sg_fd *pfp = sdp->headfp;
2376 write_unlock_irqrestore(&sg_index_lock, iflags);
2377 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2378 if (unlikely(sg_big_buff != def_reserved_size))
2379 sg_big_buff = def_reserved_size;
2381 bufflen = min_t(int, sg_big_buff,
2382 sdp->device->request_queue->max_sectors * 512);
2383 sg_build_reserve(sfp, bufflen);
2384 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2385 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2390 __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2395 prev_fp = sdp->headfp;
2397 sdp->headfp = prev_fp->nextfp;
2399 while ((fp = prev_fp->nextfp)) {
2401 prev_fp->nextfp = fp->nextfp;
2407 if (sfp->reserve.bufflen > 0) {
2409 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2410 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
2411 sg_remove_scat(&sfp->reserve);
2413 sfp->parentdp = NULL;
2414 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp));
2418 /* Returns 0 in normal case, 1 when detached and sdp object removed */
2420 sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2427 for (srp = sfp->headrp; srp; srp = tsrp) {
2429 if (sg_srp_done(srp, sfp))
2430 sg_finish_rem_req(srp);
2435 unsigned long iflags;
2437 write_lock_irqsave(&sg_index_lock, iflags);
2438 __sg_remove_sfp(sdp, sfp);
2439 if (sdp->detached && (NULL == sdp->headfp)) {
2440 idr_remove(&sg_index_idr, sdp->index);
2444 write_unlock_irqrestore(&sg_index_lock, iflags);
2446 /* MOD_INC's to inhibit unloading sg and associated adapter driver */
2447 /* only bump the access_count if we actually succeeded in
2448 * throwing another counter on the host module */
2449 scsi_device_get(sdp->device); /* XXX: retval ignored? */
2450 sfp->closed = 1; /* flag dirty state on this fd */
2451 SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
2458 sg_res_in_use(Sg_fd * sfp)
2460 const Sg_request *srp;
2461 unsigned long iflags;
2463 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2464 for (srp = sfp->headrp; srp; srp = srp->nextrp)
2467 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2471 /* The size fetched (value output via retSzp) set when non-NULL return */
2472 static struct page *
2473 sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2475 struct page *resp = NULL;
2480 if ((rqSz <= 0) || (NULL == retSzp))
2484 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_COMP | __GFP_NOWARN;
2486 page_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
2488 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2489 order++, a_size <<= 1) ;
2490 resSz = a_size; /* rounded up if necessary */
2491 resp = alloc_pages(page_mask, order);
2492 while ((!resp) && order) {
2494 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2495 resp = alloc_pages(page_mask, order); /* try half */
2499 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2500 memset(page_address(resp), 0, resSz);
2507 sg_page_free(struct page *page, int size)
2513 for (order = 0, a_size = PAGE_SIZE; a_size < size;
2514 order++, a_size <<= 1) ;
2515 __free_pages(page, order);
2518 #ifdef CONFIG_SCSI_PROC_FS
2520 sg_idr_max_id(int id, void *p, void *data)
2534 unsigned long iflags;
2536 read_lock_irqsave(&sg_index_lock, iflags);
2537 idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
2538 read_unlock_irqrestore(&sg_index_lock, iflags);
2539 return k + 1; /* origin 1 */
2547 unsigned long iflags;
2549 read_lock_irqsave(&sg_index_lock, iflags);
2550 sdp = idr_find(&sg_index_idr, dev);
2551 read_unlock_irqrestore(&sg_index_lock, iflags);
2556 #ifdef CONFIG_SCSI_PROC_FS
2558 static struct proc_dir_entry *sg_proc_sgp = NULL;
2560 static char sg_proc_sg_dirname[] = "scsi/sg";
2562 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2564 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2565 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2566 size_t count, loff_t *off);
2567 static struct file_operations adio_fops = {
2568 /* .owner, .read and .llseek added in sg_proc_init() */
2569 .open = sg_proc_single_open_adio,
2570 .write = sg_proc_write_adio,
2571 .release = single_release,
2574 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2575 static ssize_t sg_proc_write_dressz(struct file *filp,
2576 const char __user *buffer, size_t count, loff_t *off);
2577 static struct file_operations dressz_fops = {
2578 .open = sg_proc_single_open_dressz,
2579 .write = sg_proc_write_dressz,
2580 .release = single_release,
2583 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2584 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2585 static struct file_operations version_fops = {
2586 .open = sg_proc_single_open_version,
2587 .release = single_release,
2590 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2591 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2592 static struct file_operations devhdr_fops = {
2593 .open = sg_proc_single_open_devhdr,
2594 .release = single_release,
2597 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2598 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2599 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2600 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2601 static void dev_seq_stop(struct seq_file *s, void *v);
2602 static struct file_operations dev_fops = {
2603 .open = sg_proc_open_dev,
2604 .release = seq_release,
2606 static struct seq_operations dev_seq_ops = {
2607 .start = dev_seq_start,
2608 .next = dev_seq_next,
2609 .stop = dev_seq_stop,
2610 .show = sg_proc_seq_show_dev,
2613 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2614 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2615 static struct file_operations devstrs_fops = {
2616 .open = sg_proc_open_devstrs,
2617 .release = seq_release,
2619 static struct seq_operations devstrs_seq_ops = {
2620 .start = dev_seq_start,
2621 .next = dev_seq_next,
2622 .stop = dev_seq_stop,
2623 .show = sg_proc_seq_show_devstrs,
2626 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2627 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2628 static struct file_operations debug_fops = {
2629 .open = sg_proc_open_debug,
2630 .release = seq_release,
2632 static struct seq_operations debug_seq_ops = {
2633 .start = dev_seq_start,
2634 .next = dev_seq_next,
2635 .stop = dev_seq_stop,
2636 .show = sg_proc_seq_show_debug,
2640 struct sg_proc_leaf {
2642 struct file_operations * fops;
2645 static struct sg_proc_leaf sg_proc_leaf_arr[] = {
2646 {"allow_dio", &adio_fops},
2647 {"debug", &debug_fops},
2648 {"def_reserved_size", &dressz_fops},
2649 {"device_hdr", &devhdr_fops},
2650 {"devices", &dev_fops},
2651 {"device_strs", &devstrs_fops},
2652 {"version", &version_fops}
2659 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2660 struct sg_proc_leaf * leaf;
2662 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2665 for (k = 0; k < num_leaves; ++k) {
2666 leaf = &sg_proc_leaf_arr[k];
2667 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2668 leaf->fops->owner = THIS_MODULE;
2669 leaf->fops->read = seq_read;
2670 leaf->fops->llseek = seq_lseek;
2671 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2677 sg_proc_cleanup(void)
2680 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2684 for (k = 0; k < num_leaves; ++k)
2685 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2686 remove_proc_entry(sg_proc_sg_dirname, NULL);
2690 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2692 seq_printf(s, "%d\n", *((int *)s->private));
2696 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2698 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2702 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2703 size_t count, loff_t *off)
2708 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2710 num = (count < 10) ? count : 10;
2711 if (copy_from_user(buff, buffer, num))
2714 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2718 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2720 return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2724 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2725 size_t count, loff_t *off)
2728 unsigned long k = ULONG_MAX;
2731 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2733 num = (count < 10) ? count : 10;
2734 if (copy_from_user(buff, buffer, num))
2737 k = simple_strtoul(buff, NULL, 10);
2738 if (k <= 1048576) { /* limit "big buff" to 1 MB */
2745 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2747 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2752 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2754 return single_open(file, sg_proc_seq_show_version, NULL);
2757 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2759 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2764 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2766 return single_open(file, sg_proc_seq_show_devhdr, NULL);
2769 struct sg_proc_deviter {
2774 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2776 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2783 it->max = sg_last_dev();
2784 if (it->index >= it->max)
2789 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2791 struct sg_proc_deviter * it = s->private;
2794 return (it->index < it->max) ? it : NULL;
2797 static void dev_seq_stop(struct seq_file *s, void *v)
2802 static int sg_proc_open_dev(struct inode *inode, struct file *file)
2804 return seq_open(file, &dev_seq_ops);
2807 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2809 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2811 struct scsi_device *scsidp;
2813 sdp = it ? sg_get_dev(it->index) : NULL;
2814 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2815 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2816 scsidp->host->host_no, scsidp->channel,
2817 scsidp->id, scsidp->lun, (int) scsidp->type,
2819 (int) scsidp->queue_depth,
2820 (int) scsidp->device_busy,
2821 (int) scsi_device_online(scsidp));
2823 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2827 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2829 return seq_open(file, &devstrs_seq_ops);
2832 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2834 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2836 struct scsi_device *scsidp;
2838 sdp = it ? sg_get_dev(it->index) : NULL;
2839 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2840 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2841 scsidp->vendor, scsidp->model, scsidp->rev);
2843 seq_printf(s, "<no active device>\n");
2847 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2849 int k, m, new_interface, blen, usg;
2852 const sg_io_hdr_t *hp;
2856 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
2857 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
2858 "(res)sgat=%d low_dma=%d\n", k + 1,
2859 jiffies_to_msecs(fp->timeout),
2860 fp->reserve.bufflen,
2861 (int) fp->reserve.k_use_sg,
2863 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
2864 (int) fp->cmd_q, (int) fp->force_packid,
2865 (int) fp->keep_orphan, (int) fp->closed);
2866 for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
2868 new_interface = (hp->interface_id == '\0') ? 0 : 1;
2869 if (srp->res_used) {
2870 if (new_interface &&
2871 (SG_FLAG_MMAP_IO & hp->flags))
2876 if (SG_INFO_DIRECT_IO_MASK & hp->info)
2882 blen = srp->data.bufflen;
2883 usg = srp->data.k_use_sg;
2884 seq_printf(s, srp->done ?
2885 ((1 == srp->done) ? "rcv:" : "fin:")
2887 seq_printf(s, " id=%d blen=%d",
2888 srp->header.pack_id, blen);
2890 seq_printf(s, " dur=%d", hp->duration);
2892 ms = jiffies_to_msecs(jiffies);
2893 seq_printf(s, " t_o/elap=%d/%d",
2894 (new_interface ? hp->timeout :
2895 jiffies_to_msecs(fp->timeout)),
2896 (ms > hp->duration ? ms - hp->duration : 0));
2898 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2899 (int) srp->data.cmd_opcode);
2902 seq_printf(s, " No requests active\n");
2906 static int sg_proc_open_debug(struct inode *inode, struct file *file)
2908 return seq_open(file, &debug_seq_ops);
2911 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2913 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2916 if (it && (0 == it->index)) {
2917 seq_printf(s, "max_active_device=%d(origin 1)\n",
2919 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
2921 sdp = it ? sg_get_dev(it->index) : NULL;
2923 struct scsi_device *scsidp = sdp->device;
2925 if (NULL == scsidp) {
2926 seq_printf(s, "device %d detached ??\n",
2931 if (sg_get_nth_sfp(sdp, 0)) {
2932 seq_printf(s, " >>> device=%s ",
2933 sdp->disk->disk_name);
2935 seq_printf(s, "detached pending close ");
2938 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2939 scsidp->host->host_no,
2940 scsidp->channel, scsidp->id,
2942 scsidp->host->hostt->emulated);
2943 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2944 sdp->sg_tablesize, sdp->exclude);
2946 sg_proc_debug_helper(s, sdp);
2951 #endif /* CONFIG_SCSI_PROC_FS */
2953 module_init(init_sg);
2954 module_exit(exit_sg);