2 * Disk Array driver for HP Smart Array controllers.
3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/init.h>
38 #include <linux/hdreg.h>
39 #include <linux/spinlock.h>
40 #include <linux/compat.h>
41 #include <linux/blktrace_api.h>
42 #include <asm/uaccess.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/blkdev.h>
47 #include <linux/genhd.h>
48 #include <linux/completion.h>
49 #include <scsi/scsi.h>
51 #include <scsi/scsi_ioctl.h>
52 #include <linux/cdrom.h>
53 #include <linux/scatterlist.h>
55 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
56 #define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
57 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
59 /* Embedded module documentation macros - see modules.h */
60 MODULE_AUTHOR("Hewlett-Packard Company");
61 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
62 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
63 " SA6i P600 P800 P400 P400i E200 E200i E500");
64 MODULE_VERSION("3.6.14");
65 MODULE_LICENSE("GPL");
67 #include "cciss_cmd.h"
69 #include <linux/cciss_ioctl.h>
71 /* define the PCI info for the cards we can control */
72 static const struct pci_device_id cciss_pci_device_id[] = {
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
75 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
76 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
77 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
78 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
79 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
80 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
81 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
85 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
93 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
94 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
98 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
100 /* board_id = Subsystem Device ID & Vendor ID
101 * product = Marketing Name for the board
102 * access = Address of the struct of function pointers
103 * nr_cmds = Number of commands supported by controller
105 static struct board_type products[] = {
106 {0x40700E11, "Smart Array 5300", &SA5_access, 512},
107 {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
108 {0x40820E11, "Smart Array 532", &SA5B_access, 512},
109 {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
110 {0x409A0E11, "Smart Array 641", &SA5_access, 512},
111 {0x409B0E11, "Smart Array 642", &SA5_access, 512},
112 {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
113 {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
114 {0x40910E11, "Smart Array 6i", &SA5_access, 512},
115 {0x3225103C, "Smart Array P600", &SA5_access, 512},
116 {0x3223103C, "Smart Array P800", &SA5_access, 512},
117 {0x3234103C, "Smart Array P400", &SA5_access, 512},
118 {0x3235103C, "Smart Array P400i", &SA5_access, 512},
119 {0x3211103C, "Smart Array E200i", &SA5_access, 120},
120 {0x3212103C, "Smart Array E200", &SA5_access, 120},
121 {0x3213103C, "Smart Array E200i", &SA5_access, 120},
122 {0x3214103C, "Smart Array E200i", &SA5_access, 120},
123 {0x3215103C, "Smart Array E200i", &SA5_access, 120},
124 {0x3237103C, "Smart Array E500", &SA5_access, 512},
125 {0x323D103C, "Smart Array P700m", &SA5_access, 512},
126 {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
129 /* How long to wait (in milliseconds) for board to go into simple mode */
130 #define MAX_CONFIG_WAIT 30000
131 #define MAX_IOCTL_CONFIG_WAIT 1000
133 /*define how many times we will try a command because of bus resets */
134 #define MAX_CMD_RETRIES 3
138 /* Originally cciss driver only supports 8 major numbers */
139 #define MAX_CTLR_ORIG 8
141 static ctlr_info_t *hba[MAX_CTLR];
143 static void do_cciss_request(struct request_queue *q);
144 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
145 static int cciss_open(struct inode *inode, struct file *filep);
146 static int cciss_release(struct inode *inode, struct file *filep);
147 static int cciss_ioctl(struct inode *inode, struct file *filep,
148 unsigned int cmd, unsigned long arg);
149 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
151 static int cciss_revalidate(struct gendisk *disk);
152 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
153 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
156 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
157 sector_t *total_size, unsigned int *block_size);
158 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
159 sector_t *total_size, unsigned int *block_size);
160 static void cciss_geometry_inquiry(int ctlr, int logvol,
161 int withirq, sector_t total_size,
162 unsigned int block_size, InquiryData_struct *inq_buff,
163 drive_info_struct *drv);
164 static void cciss_getgeometry(int cntl_num);
165 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
167 static void start_io(ctlr_info_t *h);
168 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
169 unsigned int use_unit_num, unsigned int log_unit,
170 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
171 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
172 unsigned int use_unit_num, unsigned int log_unit,
173 __u8 page_code, int cmd_type);
175 static void fail_all_cmds(unsigned long ctlr);
177 #ifdef CONFIG_PROC_FS
178 static void cciss_procinit(int i);
180 static void cciss_procinit(int i)
183 #endif /* CONFIG_PROC_FS */
186 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
189 static struct block_device_operations cciss_fops = {
190 .owner = THIS_MODULE,
192 .release = cciss_release,
193 .ioctl = cciss_ioctl,
194 .getgeo = cciss_getgeo,
196 .compat_ioctl = cciss_compat_ioctl,
198 .revalidate_disk = cciss_revalidate,
202 * Enqueuing and dequeuing functions for cmdlists.
204 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
208 c->next = c->prev = c;
210 c->prev = (*Qptr)->prev;
212 (*Qptr)->prev->next = c;
217 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
218 CommandList_struct *c)
220 if (c && c->next != c) {
223 c->prev->next = c->next;
224 c->next->prev = c->prev;
231 #include "cciss_scsi.c" /* For SCSI tape support */
233 #define RAID_UNKNOWN 6
235 #ifdef CONFIG_PROC_FS
238 * Report information about this controller.
240 #define ENG_GIG 1000000000
241 #define ENG_GIG_FACTOR (ENG_GIG/512)
242 #define ENGAGE_SCSI "engage scsi"
243 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
247 static struct proc_dir_entry *proc_cciss;
249 static void cciss_seq_show_header(struct seq_file *seq)
251 ctlr_info_t *h = seq->private;
253 seq_printf(seq, "%s: HP %s Controller\n"
254 "Board ID: 0x%08lx\n"
255 "Firmware Version: %c%c%c%c\n"
257 "Logical drives: %d\n"
258 "Current Q depth: %d\n"
259 "Current # commands on controller: %d\n"
260 "Max Q depth since init: %d\n"
261 "Max # commands on controller since init: %d\n"
262 "Max SG entries since init: %d\n",
265 (unsigned long)h->board_id,
266 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
267 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
269 h->Qdepth, h->commands_outstanding,
270 h->maxQsinceinit, h->max_outstanding, h->maxSG);
272 #ifdef CONFIG_CISS_SCSI_TAPE
273 cciss_seq_tape_report(seq, h->ctlr);
274 #endif /* CONFIG_CISS_SCSI_TAPE */
277 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
279 ctlr_info_t *h = seq->private;
280 unsigned ctlr = h->ctlr;
283 /* prevent displaying bogus info during configuration
284 * or deconfiguration of a logical volume
286 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
287 if (h->busy_configuring) {
288 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
289 return ERR_PTR(-EBUSY);
291 h->busy_configuring = 1;
292 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
295 cciss_seq_show_header(seq);
300 static int cciss_seq_show(struct seq_file *seq, void *v)
302 sector_t vol_sz, vol_sz_frac;
303 ctlr_info_t *h = seq->private;
304 unsigned ctlr = h->ctlr;
306 drive_info_struct *drv = &h->drv[*pos];
308 if (*pos > h->highest_lun)
314 vol_sz = drv->nr_blocks;
315 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
317 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
319 if (drv->raid_level > 5)
320 drv->raid_level = RAID_UNKNOWN;
321 seq_printf(seq, "cciss/c%dd%d:"
322 "\t%4u.%02uGB\tRAID %s\n",
323 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
324 raid_label[drv->raid_level]);
328 static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
330 ctlr_info_t *h = seq->private;
332 if (*pos > h->highest_lun)
339 static void cciss_seq_stop(struct seq_file *seq, void *v)
341 ctlr_info_t *h = seq->private;
343 /* Only reset h->busy_configuring if we succeeded in setting
344 * it during cciss_seq_start. */
345 if (v == ERR_PTR(-EBUSY))
348 h->busy_configuring = 0;
351 static struct seq_operations cciss_seq_ops = {
352 .start = cciss_seq_start,
353 .show = cciss_seq_show,
354 .next = cciss_seq_next,
355 .stop = cciss_seq_stop,
358 static int cciss_seq_open(struct inode *inode, struct file *file)
360 int ret = seq_open(file, &cciss_seq_ops);
361 struct seq_file *seq = file->private_data;
364 seq->private = PDE(inode)->data;
370 cciss_proc_write(struct file *file, const char __user *buf,
371 size_t length, loff_t *ppos)
376 #ifndef CONFIG_CISS_SCSI_TAPE
380 if (!buf || length > PAGE_SIZE - 1)
383 buffer = (char *)__get_free_page(GFP_KERNEL);
388 if (copy_from_user(buffer, buf, length))
390 buffer[length] = '\0';
392 #ifdef CONFIG_CISS_SCSI_TAPE
393 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
394 struct seq_file *seq = file->private_data;
395 ctlr_info_t *h = seq->private;
398 rc = cciss_engage_scsi(h->ctlr);
404 #endif /* CONFIG_CISS_SCSI_TAPE */
406 /* might be nice to have "disengage" too, but it's not
407 safely possible. (only 1 module use count, lock issues.) */
410 free_page((unsigned long)buffer);
414 static struct file_operations cciss_proc_fops = {
415 .owner = THIS_MODULE,
416 .open = cciss_seq_open,
419 .release = seq_release,
420 .write = cciss_proc_write,
423 static void __devinit cciss_procinit(int i)
425 struct proc_dir_entry *pde;
427 if (proc_cciss == NULL)
428 proc_cciss = proc_mkdir("cciss", proc_root_driver);
431 pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
439 #endif /* CONFIG_PROC_FS */
442 * For operations that cannot sleep, a command block is allocated at init,
443 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
444 * which ones are free or in use. For operations that can wait for kmalloc
445 * to possible sleep, this routine can be called with get_from_pool set to 0.
446 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
448 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
450 CommandList_struct *c;
453 dma_addr_t cmd_dma_handle, err_dma_handle;
455 if (!get_from_pool) {
456 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
457 sizeof(CommandList_struct), &cmd_dma_handle);
460 memset(c, 0, sizeof(CommandList_struct));
464 c->err_info = (ErrorInfo_struct *)
465 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
468 if (c->err_info == NULL) {
469 pci_free_consistent(h->pdev,
470 sizeof(CommandList_struct), c, cmd_dma_handle);
473 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
474 } else { /* get it out of the controllers pool */
477 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
480 } while (test_and_set_bit
481 (i & (BITS_PER_LONG - 1),
482 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
484 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
487 memset(c, 0, sizeof(CommandList_struct));
488 cmd_dma_handle = h->cmd_pool_dhandle
489 + i * sizeof(CommandList_struct);
490 c->err_info = h->errinfo_pool + i;
491 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
492 err_dma_handle = h->errinfo_pool_dhandle
493 + i * sizeof(ErrorInfo_struct);
499 c->busaddr = (__u32) cmd_dma_handle;
500 temp64.val = (__u64) err_dma_handle;
501 c->ErrDesc.Addr.lower = temp64.val32.lower;
502 c->ErrDesc.Addr.upper = temp64.val32.upper;
503 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
510 * Frees a command block that was previously allocated with cmd_alloc().
512 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
517 if (!got_from_pool) {
518 temp64.val32.lower = c->ErrDesc.Addr.lower;
519 temp64.val32.upper = c->ErrDesc.Addr.upper;
520 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
521 c->err_info, (dma_addr_t) temp64.val);
522 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
523 c, (dma_addr_t) c->busaddr);
526 clear_bit(i & (BITS_PER_LONG - 1),
527 h->cmd_pool_bits + (i / BITS_PER_LONG));
532 static inline ctlr_info_t *get_host(struct gendisk *disk)
534 return disk->queue->queuedata;
537 static inline drive_info_struct *get_drv(struct gendisk *disk)
539 return disk->private_data;
543 * Open. Make sure the device is really there.
545 static int cciss_open(struct inode *inode, struct file *filep)
547 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
548 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
551 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
552 #endif /* CCISS_DEBUG */
554 if (host->busy_initializing || drv->busy_configuring)
557 * Root is allowed to open raw volume zero even if it's not configured
558 * so array config can still work. Root is also allowed to open any
559 * volume that has a LUN ID, so it can issue IOCTL to reread the
560 * disk information. I don't think I really like this
561 * but I'm already using way to many device nodes to claim another one
562 * for "raw controller".
564 if (drv->heads == 0) {
565 if (iminor(inode) != 0) { /* not node 0? */
566 /* if not node 0 make sure it is a partition = 0 */
567 if (iminor(inode) & 0x0f) {
569 /* if it is, make sure we have a LUN ID */
570 } else if (drv->LunID == 0) {
574 if (!capable(CAP_SYS_ADMIN))
585 static int cciss_release(struct inode *inode, struct file *filep)
587 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
588 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
591 printk(KERN_DEBUG "cciss_release %s\n",
592 inode->i_bdev->bd_disk->disk_name);
593 #endif /* CCISS_DEBUG */
602 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
606 ret = cciss_ioctl(f->f_path.dentry->d_inode, f, cmd, arg);
611 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
613 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
616 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
619 case CCISS_GETPCIINFO:
620 case CCISS_GETINTINFO:
621 case CCISS_SETINTINFO:
622 case CCISS_GETNODENAME:
623 case CCISS_SETNODENAME:
624 case CCISS_GETHEARTBEAT:
625 case CCISS_GETBUSTYPES:
626 case CCISS_GETFIRMVER:
627 case CCISS_GETDRIVVER:
628 case CCISS_REVALIDVOLS:
629 case CCISS_DEREGDISK:
630 case CCISS_REGNEWDISK:
632 case CCISS_RESCANDISK:
633 case CCISS_GETLUNINFO:
634 return do_ioctl(f, cmd, arg);
636 case CCISS_PASSTHRU32:
637 return cciss_ioctl32_passthru(f, cmd, arg);
638 case CCISS_BIG_PASSTHRU32:
639 return cciss_ioctl32_big_passthru(f, cmd, arg);
646 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
649 IOCTL32_Command_struct __user *arg32 =
650 (IOCTL32_Command_struct __user *) arg;
651 IOCTL_Command_struct arg64;
652 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
658 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
659 sizeof(arg64.LUN_info));
661 copy_from_user(&arg64.Request, &arg32->Request,
662 sizeof(arg64.Request));
664 copy_from_user(&arg64.error_info, &arg32->error_info,
665 sizeof(arg64.error_info));
666 err |= get_user(arg64.buf_size, &arg32->buf_size);
667 err |= get_user(cp, &arg32->buf);
668 arg64.buf = compat_ptr(cp);
669 err |= copy_to_user(p, &arg64, sizeof(arg64));
674 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
678 copy_in_user(&arg32->error_info, &p->error_info,
679 sizeof(arg32->error_info));
685 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
688 BIG_IOCTL32_Command_struct __user *arg32 =
689 (BIG_IOCTL32_Command_struct __user *) arg;
690 BIG_IOCTL_Command_struct arg64;
691 BIG_IOCTL_Command_struct __user *p =
692 compat_alloc_user_space(sizeof(arg64));
698 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
699 sizeof(arg64.LUN_info));
701 copy_from_user(&arg64.Request, &arg32->Request,
702 sizeof(arg64.Request));
704 copy_from_user(&arg64.error_info, &arg32->error_info,
705 sizeof(arg64.error_info));
706 err |= get_user(arg64.buf_size, &arg32->buf_size);
707 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
708 err |= get_user(cp, &arg32->buf);
709 arg64.buf = compat_ptr(cp);
710 err |= copy_to_user(p, &arg64, sizeof(arg64));
715 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
719 copy_in_user(&arg32->error_info, &p->error_info,
720 sizeof(arg32->error_info));
727 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
729 drive_info_struct *drv = get_drv(bdev->bd_disk);
734 geo->heads = drv->heads;
735 geo->sectors = drv->sectors;
736 geo->cylinders = drv->cylinders;
743 static int cciss_ioctl(struct inode *inode, struct file *filep,
744 unsigned int cmd, unsigned long arg)
746 struct block_device *bdev = inode->i_bdev;
747 struct gendisk *disk = bdev->bd_disk;
748 ctlr_info_t *host = get_host(disk);
749 drive_info_struct *drv = get_drv(disk);
750 int ctlr = host->ctlr;
751 void __user *argp = (void __user *)arg;
754 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
755 #endif /* CCISS_DEBUG */
758 case CCISS_GETPCIINFO:
760 cciss_pci_info_struct pciinfo;
764 pciinfo.domain = pci_domain_nr(host->pdev->bus);
765 pciinfo.bus = host->pdev->bus->number;
766 pciinfo.dev_fn = host->pdev->devfn;
767 pciinfo.board_id = host->board_id;
769 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
773 case CCISS_GETINTINFO:
775 cciss_coalint_struct intinfo;
779 readl(&host->cfgtable->HostWrite.CoalIntDelay);
781 readl(&host->cfgtable->HostWrite.CoalIntCount);
783 (argp, &intinfo, sizeof(cciss_coalint_struct)))
787 case CCISS_SETINTINFO:
789 cciss_coalint_struct intinfo;
795 if (!capable(CAP_SYS_ADMIN))
798 (&intinfo, argp, sizeof(cciss_coalint_struct)))
800 if ((intinfo.delay == 0) && (intinfo.count == 0))
802 // printk("cciss_ioctl: delay and count cannot be 0\n");
805 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
806 /* Update the field, and then ring the doorbell */
807 writel(intinfo.delay,
808 &(host->cfgtable->HostWrite.CoalIntDelay));
809 writel(intinfo.count,
810 &(host->cfgtable->HostWrite.CoalIntCount));
811 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
813 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
814 if (!(readl(host->vaddr + SA5_DOORBELL)
817 /* delay and try again */
820 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
821 if (i >= MAX_IOCTL_CONFIG_WAIT)
825 case CCISS_GETNODENAME:
827 NodeName_type NodeName;
832 for (i = 0; i < 16; i++)
834 readb(&host->cfgtable->ServerName[i]);
835 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
839 case CCISS_SETNODENAME:
841 NodeName_type NodeName;
847 if (!capable(CAP_SYS_ADMIN))
851 (NodeName, argp, sizeof(NodeName_type)))
854 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
856 /* Update the field, and then ring the doorbell */
857 for (i = 0; i < 16; i++)
859 &host->cfgtable->ServerName[i]);
861 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
863 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
864 if (!(readl(host->vaddr + SA5_DOORBELL)
867 /* delay and try again */
870 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
871 if (i >= MAX_IOCTL_CONFIG_WAIT)
876 case CCISS_GETHEARTBEAT:
878 Heartbeat_type heartbeat;
882 heartbeat = readl(&host->cfgtable->HeartBeat);
884 (argp, &heartbeat, sizeof(Heartbeat_type)))
888 case CCISS_GETBUSTYPES:
890 BusTypes_type BusTypes;
894 BusTypes = readl(&host->cfgtable->BusTypes);
896 (argp, &BusTypes, sizeof(BusTypes_type)))
900 case CCISS_GETFIRMVER:
902 FirmwareVer_type firmware;
906 memcpy(firmware, host->firm_ver, 4);
909 (argp, firmware, sizeof(FirmwareVer_type)))
913 case CCISS_GETDRIVVER:
915 DriverVer_type DriverVer = DRIVER_VERSION;
921 (argp, &DriverVer, sizeof(DriverVer_type)))
926 case CCISS_REVALIDVOLS:
927 return rebuild_lun_table(host, NULL);
929 case CCISS_GETLUNINFO:{
930 LogvolInfo_struct luninfo;
932 luninfo.LunID = drv->LunID;
933 luninfo.num_opens = drv->usage_count;
934 luninfo.num_parts = 0;
935 if (copy_to_user(argp, &luninfo,
936 sizeof(LogvolInfo_struct)))
940 case CCISS_DEREGDISK:
941 return rebuild_lun_table(host, disk);
944 return rebuild_lun_table(host, NULL);
948 IOCTL_Command_struct iocommand;
949 CommandList_struct *c;
953 DECLARE_COMPLETION_ONSTACK(wait);
958 if (!capable(CAP_SYS_RAWIO))
962 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
964 if ((iocommand.buf_size < 1) &&
965 (iocommand.Request.Type.Direction != XFER_NONE)) {
968 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
969 /* Check kmalloc limits */
970 if (iocommand.buf_size > 128000)
973 if (iocommand.buf_size > 0) {
974 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
978 if (iocommand.Request.Type.Direction == XFER_WRITE) {
979 /* Copy the data into the buffer we created */
981 (buff, iocommand.buf, iocommand.buf_size)) {
986 memset(buff, 0, iocommand.buf_size);
988 if ((c = cmd_alloc(host, 0)) == NULL) {
992 // Fill in the command type
993 c->cmd_type = CMD_IOCTL_PEND;
994 // Fill in Command Header
995 c->Header.ReplyQueue = 0; // unused in simple mode
996 if (iocommand.buf_size > 0) // buffer to fill
998 c->Header.SGList = 1;
999 c->Header.SGTotal = 1;
1000 } else // no buffers to fill
1002 c->Header.SGList = 0;
1003 c->Header.SGTotal = 0;
1005 c->Header.LUN = iocommand.LUN_info;
1006 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
1008 // Fill in Request block
1009 c->Request = iocommand.Request;
1011 // Fill in the scatter gather information
1012 if (iocommand.buf_size > 0) {
1013 temp64.val = pci_map_single(host->pdev, buff,
1015 PCI_DMA_BIDIRECTIONAL);
1016 c->SG[0].Addr.lower = temp64.val32.lower;
1017 c->SG[0].Addr.upper = temp64.val32.upper;
1018 c->SG[0].Len = iocommand.buf_size;
1019 c->SG[0].Ext = 0; // we are not chaining
1023 /* Put the request on the tail of the request queue */
1024 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1025 addQ(&host->reqQ, c);
1028 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1030 wait_for_completion(&wait);
1032 /* unlock the buffers from DMA */
1033 temp64.val32.lower = c->SG[0].Addr.lower;
1034 temp64.val32.upper = c->SG[0].Addr.upper;
1035 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
1037 PCI_DMA_BIDIRECTIONAL);
1039 /* Copy the error information out */
1040 iocommand.error_info = *(c->err_info);
1042 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
1044 cmd_free(host, c, 0);
1048 if (iocommand.Request.Type.Direction == XFER_READ) {
1049 /* Copy the data out of the buffer we created */
1051 (iocommand.buf, buff, iocommand.buf_size)) {
1053 cmd_free(host, c, 0);
1058 cmd_free(host, c, 0);
1061 case CCISS_BIG_PASSTHRU:{
1062 BIG_IOCTL_Command_struct *ioc;
1063 CommandList_struct *c;
1064 unsigned char **buff = NULL;
1065 int *buff_size = NULL;
1067 unsigned long flags;
1071 DECLARE_COMPLETION_ONSTACK(wait);
1074 BYTE __user *data_ptr;
1078 if (!capable(CAP_SYS_RAWIO))
1080 ioc = (BIG_IOCTL_Command_struct *)
1081 kmalloc(sizeof(*ioc), GFP_KERNEL);
1086 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1090 if ((ioc->buf_size < 1) &&
1091 (ioc->Request.Type.Direction != XFER_NONE)) {
1095 /* Check kmalloc limits using all SGs */
1096 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1100 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1105 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1110 buff_size = kmalloc(MAXSGENTRIES * sizeof(int),
1116 left = ioc->buf_size;
1117 data_ptr = ioc->buf;
1120 ioc->malloc_size) ? ioc->
1122 buff_size[sg_used] = sz;
1123 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1124 if (buff[sg_used] == NULL) {
1128 if (ioc->Request.Type.Direction == XFER_WRITE) {
1130 (buff[sg_used], data_ptr, sz)) {
1135 memset(buff[sg_used], 0, sz);
1141 if ((c = cmd_alloc(host, 0)) == NULL) {
1145 c->cmd_type = CMD_IOCTL_PEND;
1146 c->Header.ReplyQueue = 0;
1148 if (ioc->buf_size > 0) {
1149 c->Header.SGList = sg_used;
1150 c->Header.SGTotal = sg_used;
1152 c->Header.SGList = 0;
1153 c->Header.SGTotal = 0;
1155 c->Header.LUN = ioc->LUN_info;
1156 c->Header.Tag.lower = c->busaddr;
1158 c->Request = ioc->Request;
1159 if (ioc->buf_size > 0) {
1161 for (i = 0; i < sg_used; i++) {
1163 pci_map_single(host->pdev, buff[i],
1165 PCI_DMA_BIDIRECTIONAL);
1166 c->SG[i].Addr.lower =
1168 c->SG[i].Addr.upper =
1170 c->SG[i].Len = buff_size[i];
1171 c->SG[i].Ext = 0; /* we are not chaining */
1175 /* Put the request on the tail of the request queue */
1176 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1177 addQ(&host->reqQ, c);
1180 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1181 wait_for_completion(&wait);
1182 /* unlock the buffers from DMA */
1183 for (i = 0; i < sg_used; i++) {
1184 temp64.val32.lower = c->SG[i].Addr.lower;
1185 temp64.val32.upper = c->SG[i].Addr.upper;
1186 pci_unmap_single(host->pdev,
1187 (dma_addr_t) temp64.val, buff_size[i],
1188 PCI_DMA_BIDIRECTIONAL);
1190 /* Copy the error information out */
1191 ioc->error_info = *(c->err_info);
1192 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1193 cmd_free(host, c, 0);
1197 if (ioc->Request.Type.Direction == XFER_READ) {
1198 /* Copy the data out of the buffer we created */
1199 BYTE __user *ptr = ioc->buf;
1200 for (i = 0; i < sg_used; i++) {
1202 (ptr, buff[i], buff_size[i])) {
1203 cmd_free(host, c, 0);
1207 ptr += buff_size[i];
1210 cmd_free(host, c, 0);
1214 for (i = 0; i < sg_used; i++)
1223 /* scsi_cmd_ioctl handles these, below, though some are not */
1224 /* very meaningful for cciss. SG_IO is the main one people want. */
1226 case SG_GET_VERSION_NUM:
1227 case SG_SET_TIMEOUT:
1228 case SG_GET_TIMEOUT:
1229 case SG_GET_RESERVED_SIZE:
1230 case SG_SET_RESERVED_SIZE:
1231 case SG_EMULATED_HOST:
1233 case SCSI_IOCTL_SEND_COMMAND:
1234 return scsi_cmd_ioctl(filep, disk->queue, disk, cmd, argp);
1236 /* scsi_cmd_ioctl would normally handle these, below, but */
1237 /* they aren't a good fit for cciss, as CD-ROMs are */
1238 /* not supported, and we don't have any bus/target/lun */
1239 /* which we present to the kernel. */
1241 case CDROM_SEND_PACKET:
1242 case CDROMCLOSETRAY:
1244 case SCSI_IOCTL_GET_IDLUN:
1245 case SCSI_IOCTL_GET_BUS_NUMBER:
1251 static void cciss_check_queues(ctlr_info_t *h)
1253 int start_queue = h->next_to_run;
1256 /* check to see if we have maxed out the number of commands that can
1257 * be placed on the queue. If so then exit. We do this check here
1258 * in case the interrupt we serviced was from an ioctl and did not
1259 * free any new commands.
1261 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
1264 /* We have room on the queue for more commands. Now we need to queue
1265 * them up. We will also keep track of the next queue to run so
1266 * that every queue gets a chance to be started first.
1268 for (i = 0; i < h->highest_lun + 1; i++) {
1269 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1270 /* make sure the disk has been added and the drive is real
1271 * because this can be called from the middle of init_one.
1273 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1275 blk_start_queue(h->gendisk[curr_queue]->queue);
1277 /* check to see if we have maxed out the number of commands
1278 * that can be placed on the queue.
1280 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
1281 if (curr_queue == start_queue) {
1283 (start_queue + 1) % (h->highest_lun + 1);
1286 h->next_to_run = curr_queue;
1290 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1295 static void cciss_softirq_done(struct request *rq)
1297 CommandList_struct *cmd = rq->completion_data;
1298 ctlr_info_t *h = hba[cmd->ctlr];
1299 unsigned long flags;
1303 if (cmd->Request.Type.Direction == XFER_READ)
1304 ddir = PCI_DMA_FROMDEVICE;
1306 ddir = PCI_DMA_TODEVICE;
1308 /* command did not need to be retried */
1309 /* unmap the DMA mapping for all the scatter gather elements */
1310 for (i = 0; i < cmd->Header.SGList; i++) {
1311 temp64.val32.lower = cmd->SG[i].Addr.lower;
1312 temp64.val32.upper = cmd->SG[i].Addr.upper;
1313 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1317 printk("Done with %p\n", rq);
1318 #endif /* CCISS_DEBUG */
1320 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
1323 spin_lock_irqsave(&h->lock, flags);
1324 cmd_free(h, cmd, 1);
1325 cciss_check_queues(h);
1326 spin_unlock_irqrestore(&h->lock, flags);
1329 /* This function will check the usage_count of the drive to be updated/added.
1330 * If the usage_count is zero then the drive information will be updated and
1331 * the disk will be re-registered with the kernel. If not then it will be
1332 * left alone for the next reboot. The exception to this is disk 0 which
1333 * will always be left registered with the kernel since it is also the
1334 * controller node. Any changes to disk 0 will show up on the next
1337 static void cciss_update_drive_info(int ctlr, int drv_index)
1339 ctlr_info_t *h = hba[ctlr];
1340 struct gendisk *disk;
1341 InquiryData_struct *inq_buff = NULL;
1342 unsigned int block_size;
1343 sector_t total_size;
1344 unsigned long flags = 0;
1347 /* if the disk already exists then deregister it before proceeding */
1348 if (h->drv[drv_index].raid_level != -1) {
1349 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1350 h->drv[drv_index].busy_configuring = 1;
1351 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1352 ret = deregister_disk(h->gendisk[drv_index],
1353 &h->drv[drv_index], 0);
1354 h->drv[drv_index].busy_configuring = 0;
1357 /* If the disk is in use return */
1361 /* Get information about the disk and modify the driver structure */
1362 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1363 if (inq_buff == NULL)
1366 /* testing to see if 16-byte CDBs are already being used */
1367 if (h->cciss_read == CCISS_READ_16) {
1368 cciss_read_capacity_16(h->ctlr, drv_index, 1,
1369 &total_size, &block_size);
1373 cciss_read_capacity(ctlr, drv_index, 1,
1374 &total_size, &block_size);
1376 /* if read_capacity returns all F's this volume is >2TB in size */
1377 /* so we switch to 16-byte CDB's for all read/write ops */
1378 if (total_size == 0xFFFFFFFFULL) {
1379 cciss_read_capacity_16(ctlr, drv_index, 1,
1380 &total_size, &block_size);
1381 h->cciss_read = CCISS_READ_16;
1382 h->cciss_write = CCISS_WRITE_16;
1384 h->cciss_read = CCISS_READ_10;
1385 h->cciss_write = CCISS_WRITE_10;
1388 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1389 inq_buff, &h->drv[drv_index]);
1392 disk = h->gendisk[drv_index];
1393 set_capacity(disk, h->drv[drv_index].nr_blocks);
1395 /* if it's the controller it's already added */
1397 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1398 sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
1399 disk->major = h->major;
1400 disk->first_minor = drv_index << NWD_SHIFT;
1401 disk->fops = &cciss_fops;
1402 disk->private_data = &h->drv[drv_index];
1404 /* Set up queue information */
1405 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1407 /* This is a hardware imposed limit. */
1408 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1410 /* This is a limit in the driver and could be eliminated. */
1411 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1413 blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
1415 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1417 disk->queue->queuedata = hba[ctlr];
1419 blk_queue_hardsect_size(disk->queue,
1420 hba[ctlr]->drv[drv_index].block_size);
1422 h->drv[drv_index].queue = disk->queue;
1430 printk(KERN_ERR "cciss: out of memory\n");
1434 /* This function will find the first index of the controllers drive array
1435 * that has a -1 for the raid_level and will return that index. This is
1436 * where new drives will be added. If the index to be returned is greater
1437 * than the highest_lun index for the controller then highest_lun is set
1438 * to this new index. If there are no available indexes then -1 is returned.
1440 static int cciss_find_free_drive_index(int ctlr)
1444 for (i = 0; i < CISS_MAX_LUN; i++) {
1445 if (hba[ctlr]->drv[i].raid_level == -1) {
1446 if (i > hba[ctlr]->highest_lun)
1447 hba[ctlr]->highest_lun = i;
1454 /* This function will add and remove logical drives from the Logical
1455 * drive array of the controller and maintain persistency of ordering
1456 * so that mount points are preserved until the next reboot. This allows
1457 * for the removal of logical drives in the middle of the drive array
1458 * without a re-ordering of those drives.
1460 * h = The controller to perform the operations on
1461 * del_disk = The disk to remove if specified. If the value given
1462 * is NULL then no disk is removed.
1464 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1468 ReportLunData_struct *ld_buff = NULL;
1469 drive_info_struct *drv = NULL;
1476 unsigned long flags;
1478 /* Set busy_configuring flag for this operation */
1479 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1480 if (h->busy_configuring) {
1481 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1484 h->busy_configuring = 1;
1486 /* if del_disk is NULL then we are being called to add a new disk
1487 * and update the logical drive table. If it is not NULL then
1488 * we will check if the disk is in use or not.
1490 if (del_disk != NULL) {
1491 drv = get_drv(del_disk);
1492 drv->busy_configuring = 1;
1493 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1494 return_code = deregister_disk(del_disk, drv, 1);
1495 drv->busy_configuring = 0;
1496 h->busy_configuring = 0;
1499 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1500 if (!capable(CAP_SYS_RAWIO))
1503 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1504 if (ld_buff == NULL)
1507 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1508 sizeof(ReportLunData_struct), 0,
1511 if (return_code == IO_OK) {
1513 be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
1514 } else { /* reading number of logical volumes failed */
1515 printk(KERN_WARNING "cciss: report logical volume"
1516 " command failed\n");
1521 num_luns = listlength / 8; /* 8 bytes per entry */
1522 if (num_luns > CISS_MAX_LUN) {
1523 num_luns = CISS_MAX_LUN;
1524 printk(KERN_WARNING "cciss: more luns configured"
1525 " on controller than can be handled by"
1529 /* Compare controller drive array to drivers drive array.
1530 * Check for updates in the drive information and any new drives
1531 * on the controller.
1533 for (i = 0; i < num_luns; i++) {
1539 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1541 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1543 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1544 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1546 /* Find if the LUN is already in the drive array
1547 * of the controller. If so then update its info
1548 * if not is use. If it does not exist then find
1549 * the first free index and add it.
1551 for (j = 0; j <= h->highest_lun; j++) {
1552 if (h->drv[j].LunID == lunid) {
1558 /* check if the drive was found already in the array */
1560 drv_index = cciss_find_free_drive_index(ctlr);
1561 if (drv_index == -1)
1564 /*Check if the gendisk needs to be allocated */
1565 if (!h->gendisk[drv_index]){
1566 h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
1567 if (!h->gendisk[drv_index]){
1568 printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
1573 h->drv[drv_index].LunID = lunid;
1574 cciss_update_drive_info(ctlr, drv_index);
1580 h->busy_configuring = 0;
1581 /* We return -1 here to tell the ACU that we have registered/updated
1582 * all of the drives that we can and to keep it from calling us
1587 printk(KERN_ERR "cciss: out of memory\n");
1591 /* This function will deregister the disk and it's queue from the
1592 * kernel. It must be called with the controller lock held and the
1593 * drv structures busy_configuring flag set. It's parameters are:
1595 * disk = This is the disk to be deregistered
1596 * drv = This is the drive_info_struct associated with the disk to be
1597 * deregistered. It contains information about the disk used
1599 * clear_all = This flag determines whether or not the disk information
1600 * is going to be completely cleared out and the highest_lun
1601 * reset. Sometimes we want to clear out information about
1602 * the disk in preparation for re-adding it. In this case
1603 * the highest_lun should be left unchanged and the LunID
1604 * should not be cleared.
1606 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1610 ctlr_info_t *h = get_host(disk);
1612 if (!capable(CAP_SYS_RAWIO))
1615 /* make sure logical volume is NOT is use */
1616 if (clear_all || (h->gendisk[0] == disk)) {
1617 if (drv->usage_count > 1)
1619 } else if (drv->usage_count > 0)
1622 /* invalidate the devices and deregister the disk. If it is disk
1623 * zero do not deregister it but just zero out it's values. This
1624 * allows us to delete disk zero but keep the controller registered.
1626 if (h->gendisk[0] != disk) {
1627 struct request_queue *q = disk->queue;
1628 if (disk->flags & GENHD_FL_UP)
1631 blk_cleanup_queue(q);
1632 /* Set drv->queue to NULL so that we do not try
1633 * to call blk_start_queue on this queue in the
1638 /* If clear_all is set then we are deleting the logical
1639 * drive, not just refreshing its info. For drives
1640 * other than disk 0 we will call put_disk. We do not
1641 * do this for disk 0 as we need it to be able to
1642 * configure the controller.
1645 /* This isn't pretty, but we need to find the
1646 * disk in our array and NULL our the pointer.
1647 * This is so that we will call alloc_disk if
1648 * this index is used again later.
1650 for (i=0; i < CISS_MAX_LUN; i++){
1651 if(h->gendisk[i] == disk){
1652 h->gendisk[i] = NULL;
1659 set_capacity(disk, 0);
1663 /* zero out the disk size info */
1665 drv->block_size = 0;
1669 drv->raid_level = -1; /* This can be used as a flag variable to
1670 * indicate that this element of the drive
1675 /* check to see if it was the last disk */
1676 if (drv == h->drv + h->highest_lun) {
1677 /* if so, find the new hightest lun */
1678 int i, newhighest = -1;
1679 for (i = 0; i < h->highest_lun; i++) {
1680 /* if the disk has size > 0, it is available */
1681 if (h->drv[i].heads)
1684 h->highest_lun = newhighest;
1692 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1693 1: address logical volume log_unit,
1694 2: periph device address is scsi3addr */
1695 unsigned int log_unit, __u8 page_code,
1696 unsigned char *scsi3addr, int cmd_type)
1698 ctlr_info_t *h = hba[ctlr];
1699 u64bit buff_dma_handle;
1702 c->cmd_type = CMD_IOCTL_PEND;
1703 c->Header.ReplyQueue = 0;
1705 c->Header.SGList = 1;
1706 c->Header.SGTotal = 1;
1708 c->Header.SGList = 0;
1709 c->Header.SGTotal = 0;
1711 c->Header.Tag.lower = c->busaddr;
1713 c->Request.Type.Type = cmd_type;
1714 if (cmd_type == TYPE_CMD) {
1717 /* If the logical unit number is 0 then, this is going
1718 to controller so It's a physical command
1719 mode = 0 target = 0. So we have nothing to write.
1720 otherwise, if use_unit_num == 1,
1721 mode = 1(volume set addressing) target = LUNID
1722 otherwise, if use_unit_num == 2,
1723 mode = 0(periph dev addr) target = scsi3addr */
1724 if (use_unit_num == 1) {
1725 c->Header.LUN.LogDev.VolId =
1726 h->drv[log_unit].LunID;
1727 c->Header.LUN.LogDev.Mode = 1;
1728 } else if (use_unit_num == 2) {
1729 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1731 c->Header.LUN.LogDev.Mode = 0;
1733 /* are we trying to read a vital product page */
1734 if (page_code != 0) {
1735 c->Request.CDB[1] = 0x01;
1736 c->Request.CDB[2] = page_code;
1738 c->Request.CDBLen = 6;
1739 c->Request.Type.Attribute = ATTR_SIMPLE;
1740 c->Request.Type.Direction = XFER_READ;
1741 c->Request.Timeout = 0;
1742 c->Request.CDB[0] = CISS_INQUIRY;
1743 c->Request.CDB[4] = size & 0xFF;
1745 case CISS_REPORT_LOG:
1746 case CISS_REPORT_PHYS:
1747 /* Talking to controller so It's a physical command
1748 mode = 00 target = 0. Nothing to write.
1750 c->Request.CDBLen = 12;
1751 c->Request.Type.Attribute = ATTR_SIMPLE;
1752 c->Request.Type.Direction = XFER_READ;
1753 c->Request.Timeout = 0;
1754 c->Request.CDB[0] = cmd;
1755 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1756 c->Request.CDB[7] = (size >> 16) & 0xFF;
1757 c->Request.CDB[8] = (size >> 8) & 0xFF;
1758 c->Request.CDB[9] = size & 0xFF;
1761 case CCISS_READ_CAPACITY:
1762 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1763 c->Header.LUN.LogDev.Mode = 1;
1764 c->Request.CDBLen = 10;
1765 c->Request.Type.Attribute = ATTR_SIMPLE;
1766 c->Request.Type.Direction = XFER_READ;
1767 c->Request.Timeout = 0;
1768 c->Request.CDB[0] = cmd;
1770 case CCISS_READ_CAPACITY_16:
1771 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1772 c->Header.LUN.LogDev.Mode = 1;
1773 c->Request.CDBLen = 16;
1774 c->Request.Type.Attribute = ATTR_SIMPLE;
1775 c->Request.Type.Direction = XFER_READ;
1776 c->Request.Timeout = 0;
1777 c->Request.CDB[0] = cmd;
1778 c->Request.CDB[1] = 0x10;
1779 c->Request.CDB[10] = (size >> 24) & 0xFF;
1780 c->Request.CDB[11] = (size >> 16) & 0xFF;
1781 c->Request.CDB[12] = (size >> 8) & 0xFF;
1782 c->Request.CDB[13] = size & 0xFF;
1783 c->Request.Timeout = 0;
1784 c->Request.CDB[0] = cmd;
1786 case CCISS_CACHE_FLUSH:
1787 c->Request.CDBLen = 12;
1788 c->Request.Type.Attribute = ATTR_SIMPLE;
1789 c->Request.Type.Direction = XFER_WRITE;
1790 c->Request.Timeout = 0;
1791 c->Request.CDB[0] = BMIC_WRITE;
1792 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1796 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1799 } else if (cmd_type == TYPE_MSG) {
1801 case 0: /* ABORT message */
1802 c->Request.CDBLen = 12;
1803 c->Request.Type.Attribute = ATTR_SIMPLE;
1804 c->Request.Type.Direction = XFER_WRITE;
1805 c->Request.Timeout = 0;
1806 c->Request.CDB[0] = cmd; /* abort */
1807 c->Request.CDB[1] = 0; /* abort a command */
1808 /* buff contains the tag of the command to abort */
1809 memcpy(&c->Request.CDB[4], buff, 8);
1811 case 1: /* RESET message */
1812 c->Request.CDBLen = 12;
1813 c->Request.Type.Attribute = ATTR_SIMPLE;
1814 c->Request.Type.Direction = XFER_WRITE;
1815 c->Request.Timeout = 0;
1816 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1817 c->Request.CDB[0] = cmd; /* reset */
1818 c->Request.CDB[1] = 0x04; /* reset a LUN */
1820 case 3: /* No-Op message */
1821 c->Request.CDBLen = 1;
1822 c->Request.Type.Attribute = ATTR_SIMPLE;
1823 c->Request.Type.Direction = XFER_WRITE;
1824 c->Request.Timeout = 0;
1825 c->Request.CDB[0] = cmd;
1829 "cciss%d: unknown message type %d\n", ctlr, cmd);
1834 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1837 /* Fill in the scatter gather information */
1839 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1841 PCI_DMA_BIDIRECTIONAL);
1842 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1843 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1844 c->SG[0].Len = size;
1845 c->SG[0].Ext = 0; /* we are not chaining */
1850 static int sendcmd_withirq(__u8 cmd,
1854 unsigned int use_unit_num,
1855 unsigned int log_unit, __u8 page_code, int cmd_type)
1857 ctlr_info_t *h = hba[ctlr];
1858 CommandList_struct *c;
1859 u64bit buff_dma_handle;
1860 unsigned long flags;
1862 DECLARE_COMPLETION_ONSTACK(wait);
1864 if ((c = cmd_alloc(h, 0)) == NULL)
1866 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1867 log_unit, page_code, NULL, cmd_type);
1868 if (return_status != IO_OK) {
1870 return return_status;
1875 /* Put the request on the tail of the queue and send it */
1876 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1880 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1882 wait_for_completion(&wait);
1884 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1885 switch (c->err_info->CommandStatus) {
1886 case CMD_TARGET_STATUS:
1887 printk(KERN_WARNING "cciss: cmd %p has "
1888 " completed with errors\n", c);
1889 if (c->err_info->ScsiStatus) {
1890 printk(KERN_WARNING "cciss: cmd %p "
1891 "has SCSI Status = %x\n",
1892 c, c->err_info->ScsiStatus);
1896 case CMD_DATA_UNDERRUN:
1897 case CMD_DATA_OVERRUN:
1898 /* expected for inquire and report lun commands */
1901 printk(KERN_WARNING "cciss: Cmd %p is "
1902 "reported invalid\n", c);
1903 return_status = IO_ERROR;
1905 case CMD_PROTOCOL_ERR:
1906 printk(KERN_WARNING "cciss: cmd %p has "
1907 "protocol error \n", c);
1908 return_status = IO_ERROR;
1910 case CMD_HARDWARE_ERR:
1911 printk(KERN_WARNING "cciss: cmd %p had "
1912 " hardware error\n", c);
1913 return_status = IO_ERROR;
1915 case CMD_CONNECTION_LOST:
1916 printk(KERN_WARNING "cciss: cmd %p had "
1917 "connection lost\n", c);
1918 return_status = IO_ERROR;
1921 printk(KERN_WARNING "cciss: cmd %p was "
1923 return_status = IO_ERROR;
1925 case CMD_ABORT_FAILED:
1926 printk(KERN_WARNING "cciss: cmd %p reports "
1927 "abort failed\n", c);
1928 return_status = IO_ERROR;
1930 case CMD_UNSOLICITED_ABORT:
1932 "cciss%d: unsolicited abort %p\n", ctlr, c);
1933 if (c->retry_count < MAX_CMD_RETRIES) {
1935 "cciss%d: retrying %p\n", ctlr, c);
1937 /* erase the old error information */
1938 memset(c->err_info, 0,
1939 sizeof(ErrorInfo_struct));
1940 return_status = IO_OK;
1941 INIT_COMPLETION(wait);
1944 return_status = IO_ERROR;
1947 printk(KERN_WARNING "cciss: cmd %p returned "
1948 "unknown status %x\n", c,
1949 c->err_info->CommandStatus);
1950 return_status = IO_ERROR;
1953 /* unlock the buffers from DMA */
1954 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1955 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1956 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1957 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1959 return return_status;
1962 static void cciss_geometry_inquiry(int ctlr, int logvol,
1963 int withirq, sector_t total_size,
1964 unsigned int block_size,
1965 InquiryData_struct *inq_buff,
1966 drive_info_struct *drv)
1971 memset(inq_buff, 0, sizeof(InquiryData_struct));
1973 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1974 inq_buff, sizeof(*inq_buff), 1,
1975 logvol, 0xC1, TYPE_CMD);
1977 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1978 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1980 if (return_code == IO_OK) {
1981 if (inq_buff->data_byte[8] == 0xFF) {
1983 "cciss: reading geometry failed, volume "
1984 "does not support reading geometry\n");
1986 drv->sectors = 32; // Sectors per track
1987 drv->cylinders = total_size + 1;
1988 drv->raid_level = RAID_UNKNOWN;
1990 drv->heads = inq_buff->data_byte[6];
1991 drv->sectors = inq_buff->data_byte[7];
1992 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1993 drv->cylinders += inq_buff->data_byte[5];
1994 drv->raid_level = inq_buff->data_byte[8];
1996 drv->block_size = block_size;
1997 drv->nr_blocks = total_size + 1;
1998 t = drv->heads * drv->sectors;
2000 sector_t real_size = total_size + 1;
2001 unsigned long rem = sector_div(real_size, t);
2004 drv->cylinders = real_size;
2006 } else { /* Get geometry failed */
2007 printk(KERN_WARNING "cciss: reading geometry failed\n");
2009 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2010 drv->heads, drv->sectors, drv->cylinders);
2014 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2015 unsigned int *block_size)
2017 ReadCapdata_struct *buf;
2020 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
2022 printk(KERN_WARNING "cciss: out of memory\n");
2027 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
2028 ctlr, buf, sizeof(ReadCapdata_struct),
2029 1, logvol, 0, TYPE_CMD);
2031 return_code = sendcmd(CCISS_READ_CAPACITY,
2032 ctlr, buf, sizeof(ReadCapdata_struct),
2033 1, logvol, 0, NULL, TYPE_CMD);
2034 if (return_code == IO_OK) {
2035 *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
2036 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2037 } else { /* read capacity command failed */
2038 printk(KERN_WARNING "cciss: read capacity failed\n");
2040 *block_size = BLOCK_SIZE;
2042 if (*total_size != 0)
2043 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2044 (unsigned long long)*total_size+1, *block_size);
2049 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2051 ReadCapdata_struct_16 *buf;
2054 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2056 printk(KERN_WARNING "cciss: out of memory\n");
2061 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2062 ctlr, buf, sizeof(ReadCapdata_struct_16),
2063 1, logvol, 0, TYPE_CMD);
2066 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2067 ctlr, buf, sizeof(ReadCapdata_struct_16),
2068 1, logvol, 0, NULL, TYPE_CMD);
2070 if (return_code == IO_OK) {
2071 *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
2072 *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
2073 } else { /* read capacity command failed */
2074 printk(KERN_WARNING "cciss: read capacity failed\n");
2076 *block_size = BLOCK_SIZE;
2078 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2079 (unsigned long long)*total_size+1, *block_size);
2083 static int cciss_revalidate(struct gendisk *disk)
2085 ctlr_info_t *h = get_host(disk);
2086 drive_info_struct *drv = get_drv(disk);
2089 unsigned int block_size;
2090 sector_t total_size;
2091 InquiryData_struct *inq_buff = NULL;
2093 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2094 if (h->drv[logvol].LunID == drv->LunID) {
2103 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2104 if (inq_buff == NULL) {
2105 printk(KERN_WARNING "cciss: out of memory\n");
2108 if (h->cciss_read == CCISS_READ_10) {
2109 cciss_read_capacity(h->ctlr, logvol, 1,
2110 &total_size, &block_size);
2112 cciss_read_capacity_16(h->ctlr, logvol, 1,
2113 &total_size, &block_size);
2115 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2118 blk_queue_hardsect_size(drv->queue, drv->block_size);
2119 set_capacity(disk, drv->nr_blocks);
2126 * Wait polling for a command to complete.
2127 * The memory mapped FIFO is polled for the completion.
2128 * Used only at init time, interrupts from the HBA are disabled.
2130 static unsigned long pollcomplete(int ctlr)
2135 /* Wait (up to 20 seconds) for a command to complete */
2137 for (i = 20 * HZ; i > 0; i--) {
2138 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2139 if (done == FIFO_EMPTY)
2140 schedule_timeout_uninterruptible(1);
2144 /* Invalid address to tell caller we ran out of time */
2148 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2150 /* We get in here if sendcmd() is polling for completions
2151 and gets some command back that it wasn't expecting --
2152 something other than that which it just sent down.
2153 Ordinarily, that shouldn't happen, but it can happen when
2154 the scsi tape stuff gets into error handling mode, and
2155 starts using sendcmd() to try to abort commands and
2156 reset tape drives. In that case, sendcmd may pick up
2157 completions of commands that were sent to logical drives
2158 through the block i/o system, or cciss ioctls completing, etc.
2159 In that case, we need to save those completions for later
2160 processing by the interrupt handler.
2163 #ifdef CONFIG_CISS_SCSI_TAPE
2164 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2166 /* If it's not the scsi tape stuff doing error handling, (abort */
2167 /* or reset) then we don't expect anything weird. */
2168 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2170 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2171 "Invalid command list address returned! (%lx)\n",
2173 /* not much we can do. */
2174 #ifdef CONFIG_CISS_SCSI_TAPE
2178 /* We've sent down an abort or reset, but something else
2180 if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
2181 /* Uh oh. No room to save it for later... */
2182 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2183 "reject list overflow, command lost!\n", ctlr);
2186 /* Save it for later */
2187 srl->complete[srl->ncompletions] = complete;
2188 srl->ncompletions++;
2194 * Send a command to the controller, and wait for it to complete.
2195 * Only used at init time.
2197 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2198 1: address logical volume log_unit,
2199 2: periph device address is scsi3addr */
2200 unsigned int log_unit,
2201 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2203 CommandList_struct *c;
2205 unsigned long complete;
2206 ctlr_info_t *info_p = hba[ctlr];
2207 u64bit buff_dma_handle;
2208 int status, done = 0;
2210 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2211 printk(KERN_WARNING "cciss: unable to get memory");
2214 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2215 log_unit, page_code, scsi3addr, cmd_type);
2216 if (status != IO_OK) {
2217 cmd_free(info_p, c, 1);
2225 printk(KERN_DEBUG "cciss: turning intr off\n");
2226 #endif /* CCISS_DEBUG */
2227 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2229 /* Make sure there is room in the command FIFO */
2230 /* Actually it should be completely empty at this time */
2231 /* unless we are in here doing error handling for the scsi */
2232 /* tape side of the driver. */
2233 for (i = 200000; i > 0; i--) {
2234 /* if fifo isn't full go */
2235 if (!(info_p->access.fifo_full(info_p))) {
2240 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2241 " waiting!\n", ctlr);
2246 info_p->access.submit_command(info_p, c);
2249 complete = pollcomplete(ctlr);
2252 printk(KERN_DEBUG "cciss: command completed\n");
2253 #endif /* CCISS_DEBUG */
2255 if (complete == 1) {
2257 "cciss cciss%d: SendCmd Timeout out, "
2258 "No command list address returned!\n", ctlr);
2264 /* This will need to change for direct lookup completions */
2265 if ((complete & CISS_ERROR_BIT)
2266 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2267 /* if data overrun or underun on Report command
2270 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2271 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2272 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2273 ((c->err_info->CommandStatus ==
2274 CMD_DATA_OVERRUN) ||
2275 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2277 complete = c->busaddr;
2279 if (c->err_info->CommandStatus ==
2280 CMD_UNSOLICITED_ABORT) {
2281 printk(KERN_WARNING "cciss%d: "
2282 "unsolicited abort %p\n",
2284 if (c->retry_count < MAX_CMD_RETRIES) {
2286 "cciss%d: retrying %p\n",
2289 /* erase the old error */
2291 memset(c->err_info, 0,
2293 (ErrorInfo_struct));
2297 "cciss%d: retried %p too "
2298 "many times\n", ctlr, c);
2302 } else if (c->err_info->CommandStatus ==
2305 "cciss%d: command could not be aborted.\n",
2310 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2311 " Error %x \n", ctlr,
2312 c->err_info->CommandStatus);
2313 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2315 " size %x\n num %x value %x\n",
2317 c->err_info->MoreErrInfo.Invalid_Cmd.
2319 c->err_info->MoreErrInfo.Invalid_Cmd.
2321 c->err_info->MoreErrInfo.Invalid_Cmd.
2327 /* This will need changing for direct lookup completions */
2328 if (complete != c->busaddr) {
2329 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2330 BUG(); /* we are pretty much hosed if we get here. */
2338 /* unlock the data buffer from DMA */
2339 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2340 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2341 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2342 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2343 #ifdef CONFIG_CISS_SCSI_TAPE
2344 /* if we saved some commands for later, process them now. */
2345 if (info_p->scsi_rejects.ncompletions > 0)
2346 do_cciss_intr(0, info_p);
2348 cmd_free(info_p, c, 1);
2353 * Map (physical) PCI mem into (virtual) kernel space
2355 static void __iomem *remap_pci_mem(ulong base, ulong size)
2357 ulong page_base = ((ulong) base) & PAGE_MASK;
2358 ulong page_offs = ((ulong) base) - page_base;
2359 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2361 return page_remapped ? (page_remapped + page_offs) : NULL;
2365 * Takes jobs of the Q and sends them to the hardware, then puts it on
2366 * the Q to wait for completion.
2368 static void start_io(ctlr_info_t *h)
2370 CommandList_struct *c;
2372 while ((c = h->reqQ) != NULL) {
2373 /* can't do anything if fifo is full */
2374 if ((h->access.fifo_full(h))) {
2375 printk(KERN_WARNING "cciss: fifo full\n");
2379 /* Get the first entry from the Request Q */
2380 removeQ(&(h->reqQ), c);
2383 /* Tell the controller execute command */
2384 h->access.submit_command(h, c);
2386 /* Put job onto the completed Q */
2387 addQ(&(h->cmpQ), c);
2391 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2392 /* Zeros out the error record and then resends the command back */
2393 /* to the controller */
2394 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2396 /* erase the old error information */
2397 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2399 /* add it to software queue and then send it to the controller */
2400 addQ(&(h->reqQ), c);
2402 if (h->Qdepth > h->maxQsinceinit)
2403 h->maxQsinceinit = h->Qdepth;
2408 static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
2409 unsigned int msg_byte, unsigned int host_byte,
2410 unsigned int driver_byte)
2412 /* inverse of macros in scsi.h */
2413 return (scsi_status_byte & 0xff) |
2414 ((msg_byte & 0xff) << 8) |
2415 ((host_byte & 0xff) << 16) |
2416 ((driver_byte & 0xff) << 24);
2419 static inline int evaluate_target_status(CommandList_struct *cmd)
2421 unsigned char sense_key;
2422 unsigned char status_byte, msg_byte, host_byte, driver_byte;
2425 /* If we get in here, it means we got "target status", that is, scsi status */
2426 status_byte = cmd->err_info->ScsiStatus;
2427 driver_byte = DRIVER_OK;
2428 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
2430 if (blk_pc_request(cmd->rq))
2431 host_byte = DID_PASSTHROUGH;
2435 error_value = make_status_bytes(status_byte, msg_byte,
2436 host_byte, driver_byte);
2438 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
2439 if (!blk_pc_request(cmd->rq))
2440 printk(KERN_WARNING "cciss: cmd %p "
2441 "has SCSI Status 0x%x\n",
2442 cmd, cmd->err_info->ScsiStatus);
2446 /* check the sense key */
2447 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2448 /* no status or recovered error */
2449 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
2452 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2453 if (error_value != 0)
2454 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
2455 " sense key = 0x%x\n", cmd, sense_key);
2459 /* SG_IO or similar, copy sense data back */
2460 if (cmd->rq->sense) {
2461 if (cmd->rq->sense_len > cmd->err_info->SenseLen)
2462 cmd->rq->sense_len = cmd->err_info->SenseLen;
2463 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
2464 cmd->rq->sense_len);
2466 cmd->rq->sense_len = 0;
2471 /* checks the status of the job and calls complete buffers to mark all
2472 * buffers for the completed job. Note that this function does not need
2473 * to hold the hba/queue lock.
2475 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2479 struct request *rq = cmd->rq;
2484 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
2486 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
2487 goto after_error_processing;
2489 switch (cmd->err_info->CommandStatus) {
2490 case CMD_TARGET_STATUS:
2491 rq->errors = evaluate_target_status(cmd);
2493 case CMD_DATA_UNDERRUN:
2494 if (blk_fs_request(cmd->rq)) {
2495 printk(KERN_WARNING "cciss: cmd %p has"
2496 " completed with data underrun "
2498 cmd->rq->data_len = cmd->err_info->ResidualCnt;
2501 case CMD_DATA_OVERRUN:
2502 if (blk_fs_request(cmd->rq))
2503 printk(KERN_WARNING "cciss: cmd %p has"
2504 " completed with data overrun "
2508 printk(KERN_WARNING "cciss: cmd %p is "
2509 "reported invalid\n", cmd);
2510 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2511 cmd->err_info->CommandStatus, DRIVER_OK,
2512 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2514 case CMD_PROTOCOL_ERR:
2515 printk(KERN_WARNING "cciss: cmd %p has "
2516 "protocol error \n", cmd);
2517 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2518 cmd->err_info->CommandStatus, DRIVER_OK,
2519 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2521 case CMD_HARDWARE_ERR:
2522 printk(KERN_WARNING "cciss: cmd %p had "
2523 " hardware error\n", cmd);
2524 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2525 cmd->err_info->CommandStatus, DRIVER_OK,
2526 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2528 case CMD_CONNECTION_LOST:
2529 printk(KERN_WARNING "cciss: cmd %p had "
2530 "connection lost\n", cmd);
2531 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2532 cmd->err_info->CommandStatus, DRIVER_OK,
2533 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2536 printk(KERN_WARNING "cciss: cmd %p was "
2538 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2539 cmd->err_info->CommandStatus, DRIVER_OK,
2540 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2542 case CMD_ABORT_FAILED:
2543 printk(KERN_WARNING "cciss: cmd %p reports "
2544 "abort failed\n", cmd);
2545 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2546 cmd->err_info->CommandStatus, DRIVER_OK,
2547 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2549 case CMD_UNSOLICITED_ABORT:
2550 printk(KERN_WARNING "cciss%d: unsolicited "
2551 "abort %p\n", h->ctlr, cmd);
2552 if (cmd->retry_count < MAX_CMD_RETRIES) {
2555 "cciss%d: retrying %p\n", h->ctlr, cmd);
2559 "cciss%d: %p retried too "
2560 "many times\n", h->ctlr, cmd);
2561 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2562 cmd->err_info->CommandStatus, DRIVER_OK,
2563 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
2566 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2567 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2568 cmd->err_info->CommandStatus, DRIVER_OK,
2569 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2572 printk(KERN_WARNING "cciss: cmd %p returned "
2573 "unknown status %x\n", cmd,
2574 cmd->err_info->CommandStatus);
2575 rq->errors = make_status_bytes(SAM_STAT_GOOD,
2576 cmd->err_info->CommandStatus, DRIVER_OK,
2577 blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
2580 after_error_processing:
2582 /* We need to return this command */
2584 resend_cciss_cmd(h, cmd);
2587 cmd->rq->completion_data = cmd;
2588 blk_complete_request(cmd->rq);
2592 * Get a request and submit it to the controller.
2594 static void do_cciss_request(struct request_queue *q)
2596 ctlr_info_t *h = q->queuedata;
2597 CommandList_struct *c;
2600 struct request *creq;
2602 struct scatterlist tmp_sg[MAXSGENTRIES];
2603 drive_info_struct *drv;
2606 /* We call start_io here in case there is a command waiting on the
2607 * queue that has not been sent.
2609 if (blk_queue_plugged(q))
2613 creq = elv_next_request(q);
2617 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2619 if ((c = cmd_alloc(h, 1)) == NULL)
2622 blkdev_dequeue_request(creq);
2624 spin_unlock_irq(q->queue_lock);
2626 c->cmd_type = CMD_RWREQ;
2629 /* fill in the request */
2630 drv = creq->rq_disk->private_data;
2631 c->Header.ReplyQueue = 0; // unused in simple mode
2632 /* got command from pool, so use the command block index instead */
2633 /* for direct lookups. */
2634 /* The first 2 bits are reserved for controller error reporting. */
2635 c->Header.Tag.lower = (c->cmdindex << 3);
2636 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2637 c->Header.LUN.LogDev.VolId = drv->LunID;
2638 c->Header.LUN.LogDev.Mode = 1;
2639 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2640 c->Request.Type.Type = TYPE_CMD; // It is a command.
2641 c->Request.Type.Attribute = ATTR_SIMPLE;
2642 c->Request.Type.Direction =
2643 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
2644 c->Request.Timeout = 0; // Don't time out
2646 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2647 start_blk = creq->sector;
2649 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2650 (int)creq->nr_sectors);
2651 #endif /* CCISS_DEBUG */
2653 sg_init_table(tmp_sg, MAXSGENTRIES);
2654 seg = blk_rq_map_sg(q, creq, tmp_sg);
2656 /* get the DMA records for the setup */
2657 if (c->Request.Type.Direction == XFER_READ)
2658 dir = PCI_DMA_FROMDEVICE;
2660 dir = PCI_DMA_TODEVICE;
2662 for (i = 0; i < seg; i++) {
2663 c->SG[i].Len = tmp_sg[i].length;
2664 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
2666 tmp_sg[i].length, dir);
2667 c->SG[i].Addr.lower = temp64.val32.lower;
2668 c->SG[i].Addr.upper = temp64.val32.upper;
2669 c->SG[i].Ext = 0; // we are not chaining
2671 /* track how many SG entries we are using */
2676 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2677 creq->nr_sectors, seg);
2678 #endif /* CCISS_DEBUG */
2680 c->Header.SGList = c->Header.SGTotal = seg;
2681 if (likely(blk_fs_request(creq))) {
2682 if(h->cciss_read == CCISS_READ_10) {
2683 c->Request.CDB[1] = 0;
2684 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2685 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2686 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2687 c->Request.CDB[5] = start_blk & 0xff;
2688 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2689 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2690 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2691 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2693 u32 upper32 = upper_32_bits(start_blk);
2695 c->Request.CDBLen = 16;
2696 c->Request.CDB[1]= 0;
2697 c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
2698 c->Request.CDB[3]= (upper32 >> 16) & 0xff;
2699 c->Request.CDB[4]= (upper32 >> 8) & 0xff;
2700 c->Request.CDB[5]= upper32 & 0xff;
2701 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2702 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2703 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2704 c->Request.CDB[9]= start_blk & 0xff;
2705 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2706 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2707 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2708 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2709 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2711 } else if (blk_pc_request(creq)) {
2712 c->Request.CDBLen = creq->cmd_len;
2713 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
2715 printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
2719 spin_lock_irq(q->queue_lock);
2721 addQ(&(h->reqQ), c);
2723 if (h->Qdepth > h->maxQsinceinit)
2724 h->maxQsinceinit = h->Qdepth;
2730 /* We will already have the driver lock here so not need
2736 static inline unsigned long get_next_completion(ctlr_info_t *h)
2738 #ifdef CONFIG_CISS_SCSI_TAPE
2739 /* Any rejects from sendcmd() lying around? Process them first */
2740 if (h->scsi_rejects.ncompletions == 0)
2741 return h->access.command_completed(h);
2743 struct sendcmd_reject_list *srl;
2745 srl = &h->scsi_rejects;
2746 n = --srl->ncompletions;
2747 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2749 return srl->complete[n];
2752 return h->access.command_completed(h);
2756 static inline int interrupt_pending(ctlr_info_t *h)
2758 #ifdef CONFIG_CISS_SCSI_TAPE
2759 return (h->access.intr_pending(h)
2760 || (h->scsi_rejects.ncompletions > 0));
2762 return h->access.intr_pending(h);
2766 static inline long interrupt_not_for_us(ctlr_info_t *h)
2768 #ifdef CONFIG_CISS_SCSI_TAPE
2769 return (((h->access.intr_pending(h) == 0) ||
2770 (h->interrupts_enabled == 0))
2771 && (h->scsi_rejects.ncompletions == 0));
2773 return (((h->access.intr_pending(h) == 0) ||
2774 (h->interrupts_enabled == 0)));
2778 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2780 ctlr_info_t *h = dev_id;
2781 CommandList_struct *c;
2782 unsigned long flags;
2785 if (interrupt_not_for_us(h))
2788 * If there are completed commands in the completion queue,
2789 * we had better do something about it.
2791 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2792 while (interrupt_pending(h)) {
2793 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2797 if (a2 >= h->nr_cmds) {
2799 "cciss: controller cciss%d failed, stopping.\n",
2801 fail_all_cmds(h->ctlr);
2805 c = h->cmd_pool + a2;
2810 if ((c = h->cmpQ) == NULL) {
2812 "cciss: Completion of %08x ignored\n",
2816 while (c->busaddr != a) {
2823 * If we've found the command, take it off the
2824 * completion Q and free it
2826 if (c->busaddr == a) {
2827 removeQ(&h->cmpQ, c);
2828 if (c->cmd_type == CMD_RWREQ) {
2829 complete_command(h, c, 0);
2830 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2831 complete(c->waiting);
2833 # ifdef CONFIG_CISS_SCSI_TAPE
2834 else if (c->cmd_type == CMD_SCSI)
2835 complete_scsi_command(c, 0, a1);
2842 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2847 * We cannot read the structure directly, for portability we must use
2849 * This is for debug only.
2852 static void print_cfg_table(CfgTable_struct *tb)
2857 printk("Controller Configuration information\n");
2858 printk("------------------------------------\n");
2859 for (i = 0; i < 4; i++)
2860 temp_name[i] = readb(&(tb->Signature[i]));
2861 temp_name[4] = '\0';
2862 printk(" Signature = %s\n", temp_name);
2863 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2864 printk(" Transport methods supported = 0x%x\n",
2865 readl(&(tb->TransportSupport)));
2866 printk(" Transport methods active = 0x%x\n",
2867 readl(&(tb->TransportActive)));
2868 printk(" Requested transport Method = 0x%x\n",
2869 readl(&(tb->HostWrite.TransportRequest)));
2870 printk(" Coalesce Interrupt Delay = 0x%x\n",
2871 readl(&(tb->HostWrite.CoalIntDelay)));
2872 printk(" Coalesce Interrupt Count = 0x%x\n",
2873 readl(&(tb->HostWrite.CoalIntCount)));
2874 printk(" Max outstanding commands = 0x%d\n",
2875 readl(&(tb->CmdsOutMax)));
2876 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2877 for (i = 0; i < 16; i++)
2878 temp_name[i] = readb(&(tb->ServerName[i]));
2879 temp_name[16] = '\0';
2880 printk(" Server Name = %s\n", temp_name);
2881 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2883 #endif /* CCISS_DEBUG */
2885 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2887 int i, offset, mem_type, bar_type;
2888 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2891 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2892 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2893 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2896 mem_type = pci_resource_flags(pdev, i) &
2897 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2899 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2900 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2901 offset += 4; /* 32 bit */
2903 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2906 default: /* reserved in PCI 2.2 */
2908 "Base address is invalid\n");
2913 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2919 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2920 * controllers that are capable. If not, we use IO-APIC mode.
2923 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2924 struct pci_dev *pdev, __u32 board_id)
2926 #ifdef CONFIG_PCI_MSI
2928 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2932 /* Some boards advertise MSI but don't really support it */
2933 if ((board_id == 0x40700E11) ||
2934 (board_id == 0x40800E11) ||
2935 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2936 goto default_int_mode;
2938 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2939 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2941 c->intr[0] = cciss_msix_entries[0].vector;
2942 c->intr[1] = cciss_msix_entries[1].vector;
2943 c->intr[2] = cciss_msix_entries[2].vector;
2944 c->intr[3] = cciss_msix_entries[3].vector;
2949 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2950 "available\n", err);
2951 goto default_int_mode;
2953 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2955 goto default_int_mode;
2958 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2959 if (!pci_enable_msi(pdev)) {
2962 printk(KERN_WARNING "cciss: MSI init failed\n");
2966 #endif /* CONFIG_PCI_MSI */
2967 /* if we get here we're going to use the default interrupt mode */
2968 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2972 static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2974 ushort subsystem_vendor_id, subsystem_device_id, command;
2975 __u32 board_id, scratchpad = 0;
2977 __u32 cfg_base_addr;
2978 __u64 cfg_base_addr_index;
2981 /* check to see if controller has been disabled */
2982 /* BEFORE trying to enable it */
2983 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2984 if (!(command & 0x02)) {
2986 "cciss: controller appears to be disabled\n");
2990 err = pci_enable_device(pdev);
2992 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2996 err = pci_request_regions(pdev, "cciss");
2998 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
3003 subsystem_vendor_id = pdev->subsystem_vendor;
3004 subsystem_device_id = pdev->subsystem_device;
3005 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
3006 subsystem_vendor_id);
3009 printk("command = %x\n", command);
3010 printk("irq = %x\n", pdev->irq);
3011 printk("board_id = %x\n", board_id);
3012 #endif /* CCISS_DEBUG */
3014 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
3015 * else we use the IO-APIC interrupt assigned to us by system ROM.
3017 cciss_interrupt_mode(c, pdev, board_id);
3020 * Memory base addr is first addr , the second points to the config
3024 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
3026 printk("address 0 = %x\n", c->paddr);
3027 #endif /* CCISS_DEBUG */
3028 c->vaddr = remap_pci_mem(c->paddr, 0x250);
3030 /* Wait for the board to become ready. (PCI hotplug needs this.)
3031 * We poll for up to 120 secs, once per 100ms. */
3032 for (i = 0; i < 1200; i++) {
3033 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
3034 if (scratchpad == CCISS_FIRMWARE_READY)
3036 set_current_state(TASK_INTERRUPTIBLE);
3037 schedule_timeout(HZ / 10); /* wait 100ms */
3039 if (scratchpad != CCISS_FIRMWARE_READY) {
3040 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
3042 goto err_out_free_res;
3045 /* get the address index number */
3046 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
3047 cfg_base_addr &= (__u32) 0x0000ffff;
3049 printk("cfg base address = %x\n", cfg_base_addr);
3050 #endif /* CCISS_DEBUG */
3051 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
3053 printk("cfg base address index = %x\n", cfg_base_addr_index);
3054 #endif /* CCISS_DEBUG */
3055 if (cfg_base_addr_index == -1) {
3056 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
3058 goto err_out_free_res;
3061 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
3063 printk("cfg offset = %x\n", cfg_offset);
3064 #endif /* CCISS_DEBUG */
3065 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
3066 cfg_base_addr_index) +
3067 cfg_offset, sizeof(CfgTable_struct));
3068 c->board_id = board_id;
3071 print_cfg_table(c->cfgtable);
3072 #endif /* CCISS_DEBUG */
3074 for (i = 0; i < ARRAY_SIZE(products); i++) {
3075 if (board_id == products[i].board_id) {
3076 c->product_name = products[i].product_name;
3077 c->access = *(products[i].access);
3078 c->nr_cmds = products[i].nr_cmds;
3082 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
3083 (readb(&c->cfgtable->Signature[1]) != 'I') ||
3084 (readb(&c->cfgtable->Signature[2]) != 'S') ||
3085 (readb(&c->cfgtable->Signature[3]) != 'S')) {
3086 printk("Does not appear to be a valid CISS config table\n");
3088 goto err_out_free_res;
3090 /* We didn't find the controller in our list. We know the
3091 * signature is valid. If it's an HP device let's try to
3092 * bind to the device and fire it up. Otherwise we bail.
3094 if (i == ARRAY_SIZE(products)) {
3095 if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
3096 c->product_name = products[i-1].product_name;
3097 c->access = *(products[i-1].access);
3098 c->nr_cmds = products[i-1].nr_cmds;
3099 printk(KERN_WARNING "cciss: This is an unknown "
3100 "Smart Array controller.\n"
3101 "cciss: Please update to the latest driver "
3102 "available from www.hp.com.\n");
3104 printk(KERN_WARNING "cciss: Sorry, I don't know how"
3105 " to access the Smart Array controller %08lx\n"
3106 , (unsigned long)board_id);
3108 goto err_out_free_res;
3113 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
3115 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
3117 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
3121 /* Disabling DMA prefetch and refetch for the P600.
3122 * An ASIC bug may result in accesses to invalid memory addresses.
3123 * We've disabled prefetch for some time now. Testing with XEN
3124 * kernels revealed a bug in the refetch if dom0 resides on a P600.
3126 if(board_id == 0x3225103C) {
3129 dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
3130 dma_prefetch |= 0x8000;
3131 writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
3132 pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
3134 pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
3138 printk("Trying to put board into Simple mode\n");
3139 #endif /* CCISS_DEBUG */
3140 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
3141 /* Update the field, and then ring the doorbell */
3142 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
3143 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
3145 /* under certain very rare conditions, this can take awhile.
3146 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3147 * as we enter this code.) */
3148 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3149 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3151 /* delay and try again */
3152 set_current_state(TASK_INTERRUPTIBLE);
3153 schedule_timeout(10);
3157 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3158 readl(c->vaddr + SA5_DOORBELL));
3159 #endif /* CCISS_DEBUG */
3161 print_cfg_table(c->cfgtable);
3162 #endif /* CCISS_DEBUG */
3164 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3165 printk(KERN_WARNING "cciss: unable to get board into"
3168 goto err_out_free_res;
3174 * Deliberately omit pci_disable_device(): it does something nasty to
3175 * Smart Array controllers that pci_enable_device does not undo
3177 pci_release_regions(pdev);
3182 * Gets information about the local volumes attached to the controller.
3184 static void cciss_getgeometry(int cntl_num)
3186 ReportLunData_struct *ld_buff;
3187 InquiryData_struct *inq_buff;
3192 unsigned block_size;
3193 sector_t total_size;
3195 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3196 if (ld_buff == NULL) {
3197 printk(KERN_ERR "cciss: out of memory\n");
3200 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3201 if (inq_buff == NULL) {
3202 printk(KERN_ERR "cciss: out of memory\n");
3206 /* Get the firmware version */
3207 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3208 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3210 if (return_code == IO_OK) {
3211 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3212 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3213 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3214 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3215 } else { /* send command failed */
3217 printk(KERN_WARNING "cciss: unable to determine firmware"
3218 " version of controller\n");
3220 /* Get the number of logical volumes */
3221 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3222 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3225 if (return_code == IO_OK) {
3227 printk("LUN Data\n--------------------------\n");
3228 #endif /* CCISS_DEBUG */
3231 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3233 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3235 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3236 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3237 } else { /* reading number of logical volumes failed */
3239 printk(KERN_WARNING "cciss: report logical volume"
3240 " command failed\n");
3243 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3244 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3246 "ciss: only %d number of logical volumes supported\n",
3248 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3251 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3252 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3253 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3254 hba[cntl_num]->num_luns);
3255 #endif /* CCISS_DEBUG */
3257 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3258 for (i = 0; i < CISS_MAX_LUN; i++) {
3259 if (i < hba[cntl_num]->num_luns) {
3260 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3262 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3264 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3266 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3268 hba[cntl_num]->drv[i].LunID = lunid;
3271 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3272 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3273 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3274 hba[cntl_num]->drv[i].LunID);
3275 #endif /* CCISS_DEBUG */
3277 /* testing to see if 16-byte CDBs are already being used */
3278 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3279 cciss_read_capacity_16(cntl_num, i, 0,
3280 &total_size, &block_size);
3283 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3285 /* If read_capacity returns all F's the logical is >2TB */
3286 /* so we switch to 16-byte CDBs for all read/write ops */
3287 if(total_size == 0xFFFFFFFFULL) {
3288 cciss_read_capacity_16(cntl_num, i, 0,
3289 &total_size, &block_size);
3290 hba[cntl_num]->cciss_read = CCISS_READ_16;
3291 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3293 hba[cntl_num]->cciss_read = CCISS_READ_10;
3294 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3297 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3298 block_size, inq_buff,
3299 &hba[cntl_num]->drv[i]);
3301 /* initialize raid_level to indicate a free space */
3302 hba[cntl_num]->drv[i].raid_level = -1;
3309 /* Function to find the first free pointer into our hba[] array */
3310 /* Returns -1 if no free entries are left. */
3311 static int alloc_cciss_hba(void)
3315 for (i = 0; i < MAX_CTLR; i++) {
3319 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3322 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
3323 if (!p->gendisk[0]) {
3331 printk(KERN_WARNING "cciss: This driver supports a maximum"
3332 " of %d controllers.\n", MAX_CTLR);
3335 printk(KERN_ERR "cciss: out of memory.\n");
3339 static void free_hba(int i)
3341 ctlr_info_t *p = hba[i];
3345 for (n = 0; n < CISS_MAX_LUN; n++)
3346 put_disk(p->gendisk[n]);
3351 * This is it. Find all the controllers and register them. I really hate
3352 * stealing all these major device numbers.
3353 * returns the number of block devices registered.
3355 static int __devinit cciss_init_one(struct pci_dev *pdev,
3356 const struct pci_device_id *ent)
3363 i = alloc_cciss_hba();
3367 hba[i]->busy_initializing = 1;
3369 if (cciss_pci_init(hba[i], pdev) != 0)
3372 sprintf(hba[i]->devname, "cciss%d", i);
3374 hba[i]->pdev = pdev;
3376 /* configure PCI DMA stuff */
3377 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3379 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3382 printk(KERN_ERR "cciss: no suitable DMA available\n");
3387 * register with the major number, or get a dynamic major number
3388 * by passing 0 as argument. This is done for greater than
3389 * 8 controller support.
3391 if (i < MAX_CTLR_ORIG)
3392 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3393 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3394 if (rc == -EBUSY || rc == -EINVAL) {
3396 "cciss: Unable to get major number %d for %s "
3397 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3400 if (i >= MAX_CTLR_ORIG)
3404 /* make sure the board interrupts are off */
3405 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3406 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3407 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3408 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3409 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3413 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3414 hba[i]->devname, pdev->device, pci_name(pdev),
3415 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3417 hba[i]->cmd_pool_bits =
3418 kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
3419 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3420 hba[i]->cmd_pool = (CommandList_struct *)
3421 pci_alloc_consistent(hba[i]->pdev,
3422 hba[i]->nr_cmds * sizeof(CommandList_struct),
3423 &(hba[i]->cmd_pool_dhandle));
3424 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3425 pci_alloc_consistent(hba[i]->pdev,
3426 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3427 &(hba[i]->errinfo_pool_dhandle));
3428 if ((hba[i]->cmd_pool_bits == NULL)
3429 || (hba[i]->cmd_pool == NULL)
3430 || (hba[i]->errinfo_pool == NULL)) {
3431 printk(KERN_ERR "cciss: out of memory");
3434 #ifdef CONFIG_CISS_SCSI_TAPE
3435 hba[i]->scsi_rejects.complete =
3436 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3437 (hba[i]->nr_cmds + 5), GFP_KERNEL);
3438 if (hba[i]->scsi_rejects.complete == NULL) {
3439 printk(KERN_ERR "cciss: out of memory");
3443 spin_lock_init(&hba[i]->lock);
3445 /* Initialize the pdev driver private data.
3446 have it point to hba[i]. */
3447 pci_set_drvdata(pdev, hba[i]);
3448 /* command and error info recs zeroed out before
3450 memset(hba[i]->cmd_pool_bits, 0,
3451 ((hba[i]->nr_cmds + BITS_PER_LONG -
3452 1) / BITS_PER_LONG) * sizeof(unsigned long));
3455 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3456 #endif /* CCISS_DEBUG */
3458 cciss_getgeometry(i);
3460 cciss_scsi_setup(i);
3462 /* Turn the interrupts on so we can service requests */
3463 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3467 hba[i]->cciss_max_sectors = 2048;
3469 hba[i]->busy_initializing = 0;
3472 drive_info_struct *drv = &(hba[i]->drv[j]);
3473 struct gendisk *disk = hba[i]->gendisk[j];
3474 struct request_queue *q;
3476 /* Check if the disk was allocated already */
3478 hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
3479 disk = hba[i]->gendisk[j];
3482 /* Check that the disk was able to be allocated */
3484 printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
3488 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3491 "cciss: unable to allocate queue for disk %d\n",
3497 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3499 /* This is a hardware imposed limit. */
3500 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3502 /* This is a limit in the driver and could be eliminated. */
3503 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3505 blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
3507 blk_queue_softirq_done(q, cciss_softirq_done);
3509 q->queuedata = hba[i];
3510 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3511 disk->major = hba[i]->major;
3512 disk->first_minor = j << NWD_SHIFT;
3513 disk->fops = &cciss_fops;
3515 disk->private_data = drv;
3516 disk->driverfs_dev = &pdev->dev;
3517 /* we must register the controller even if no disks exist */
3518 /* this is for the online array utilities */
3519 if (!drv->heads && j)
3521 blk_queue_hardsect_size(q, drv->block_size);
3522 set_capacity(disk, drv->nr_blocks);
3525 } while (j <= hba[i]->highest_lun);
3530 #ifdef CONFIG_CISS_SCSI_TAPE
3531 kfree(hba[i]->scsi_rejects.complete);
3533 kfree(hba[i]->cmd_pool_bits);
3534 if (hba[i]->cmd_pool)
3535 pci_free_consistent(hba[i]->pdev,
3536 hba[i]->nr_cmds * sizeof(CommandList_struct),
3537 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3538 if (hba[i]->errinfo_pool)
3539 pci_free_consistent(hba[i]->pdev,
3540 hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3541 hba[i]->errinfo_pool,
3542 hba[i]->errinfo_pool_dhandle);
3543 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3545 unregister_blkdev(hba[i]->major, hba[i]->devname);
3547 hba[i]->busy_initializing = 0;
3548 /* cleanup any queues that may have been initialized */
3549 for (j=0; j <= hba[i]->highest_lun; j++){
3550 drive_info_struct *drv = &(hba[i]->drv[j]);
3552 blk_cleanup_queue(drv->queue);
3555 * Deliberately omit pci_disable_device(): it does something nasty to
3556 * Smart Array controllers that pci_enable_device does not undo
3558 pci_release_regions(pdev);
3559 pci_set_drvdata(pdev, NULL);
3564 static void cciss_shutdown(struct pci_dev *pdev)
3566 ctlr_info_t *tmp_ptr;
3571 tmp_ptr = pci_get_drvdata(pdev);
3572 if (tmp_ptr == NULL)
3578 /* Turn board interrupts off and send the flush cache command */
3579 /* sendcmd will turn off interrupt, and send the flush...
3580 * To write all data in the battery backed cache to disks */
3581 memset(flush_buf, 0, 4);
3582 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3584 if (return_code == IO_OK) {
3585 printk(KERN_INFO "Completed flushing cache on controller %d\n", i);
3587 printk(KERN_WARNING "Error flushing cache on controller %d\n", i);
3589 free_irq(hba[i]->intr[2], hba[i]);
3592 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3594 ctlr_info_t *tmp_ptr;
3597 if (pci_get_drvdata(pdev) == NULL) {
3598 printk(KERN_ERR "cciss: Unable to remove device \n");
3601 tmp_ptr = pci_get_drvdata(pdev);
3603 if (hba[i] == NULL) {
3604 printk(KERN_ERR "cciss: device appears to "
3605 "already be removed \n");
3609 remove_proc_entry(hba[i]->devname, proc_cciss);
3610 unregister_blkdev(hba[i]->major, hba[i]->devname);
3612 /* remove it from the disk list */
3613 for (j = 0; j < CISS_MAX_LUN; j++) {
3614 struct gendisk *disk = hba[i]->gendisk[j];
3616 struct request_queue *q = disk->queue;
3618 if (disk->flags & GENHD_FL_UP)
3621 blk_cleanup_queue(q);
3625 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3627 cciss_shutdown(pdev);
3629 #ifdef CONFIG_PCI_MSI
3630 if (hba[i]->msix_vector)
3631 pci_disable_msix(hba[i]->pdev);
3632 else if (hba[i]->msi_vector)
3633 pci_disable_msi(hba[i]->pdev);
3634 #endif /* CONFIG_PCI_MSI */
3636 iounmap(hba[i]->vaddr);
3638 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
3639 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3640 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
3641 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3642 kfree(hba[i]->cmd_pool_bits);
3643 #ifdef CONFIG_CISS_SCSI_TAPE
3644 kfree(hba[i]->scsi_rejects.complete);
3647 * Deliberately omit pci_disable_device(): it does something nasty to
3648 * Smart Array controllers that pci_enable_device does not undo
3650 pci_release_regions(pdev);
3651 pci_set_drvdata(pdev, NULL);
3655 static struct pci_driver cciss_pci_driver = {
3657 .probe = cciss_init_one,
3658 .remove = __devexit_p(cciss_remove_one),
3659 .id_table = cciss_pci_device_id, /* id_table */
3660 .shutdown = cciss_shutdown,
3664 * This is it. Register the PCI driver information for the cards we control
3665 * the OS will call our registered routines when it finds one of our cards.
3667 static int __init cciss_init(void)
3669 printk(KERN_INFO DRIVER_NAME "\n");
3671 /* Register for our PCI devices */
3672 return pci_register_driver(&cciss_pci_driver);
3675 static void __exit cciss_cleanup(void)
3679 pci_unregister_driver(&cciss_pci_driver);
3680 /* double check that all controller entrys have been removed */
3681 for (i = 0; i < MAX_CTLR; i++) {
3682 if (hba[i] != NULL) {
3683 printk(KERN_WARNING "cciss: had to remove"
3684 " controller %d\n", i);
3685 cciss_remove_one(hba[i]->pdev);
3688 remove_proc_entry("cciss", proc_root_driver);
3691 static void fail_all_cmds(unsigned long ctlr)
3693 /* If we get here, the board is apparently dead. */
3694 ctlr_info_t *h = hba[ctlr];
3695 CommandList_struct *c;
3696 unsigned long flags;
3698 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3699 h->alive = 0; /* the controller apparently died... */
3701 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3703 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3705 /* move everything off the request queue onto the completed queue */
3706 while ((c = h->reqQ) != NULL) {
3707 removeQ(&(h->reqQ), c);
3709 addQ(&(h->cmpQ), c);
3712 /* Now, fail everything on the completed queue with a HW error */
3713 while ((c = h->cmpQ) != NULL) {
3714 removeQ(&h->cmpQ, c);
3715 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3716 if (c->cmd_type == CMD_RWREQ) {
3717 complete_command(h, c, 0);
3718 } else if (c->cmd_type == CMD_IOCTL_PEND)
3719 complete(c->waiting);
3720 #ifdef CONFIG_CISS_SCSI_TAPE
3721 else if (c->cmd_type == CMD_SCSI)
3722 complete_scsi_command(c, 0, 0);
3725 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3729 module_init(cciss_init);
3730 module_exit(cciss_cleanup);