2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/types.h>
26 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/major.h>
32 #include <linux/bio.h>
33 #include <linux/blkpg.h>
34 #include <linux/timer.h>
35 #include <linux/proc_fs.h>
36 #include <linux/init.h>
37 #include <linux/hdreg.h>
38 #include <linux/spinlock.h>
39 #include <linux/compat.h>
40 #include <linux/blktrace_api.h>
41 #include <asm/uaccess.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/blkdev.h>
46 #include <linux/genhd.h>
47 #include <linux/completion.h>
49 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50 #define DRIVER_NAME "HP CISS Driver (v 3.6.10)"
51 #define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,10)
53 /* Embedded module documentation macros - see modules.h */
54 MODULE_AUTHOR("Hewlett-Packard Company");
55 MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.10");
56 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 P400 P400i E200 E200i E500");
58 MODULE_LICENSE("GPL");
60 #include "cciss_cmd.h"
62 #include <linux/cciss_ioctl.h>
64 /* define the PCI info for the cards we can control */
65 static const struct pci_device_id cciss_pci_device_id[] = {
66 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
67 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
68 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
69 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
70 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
71 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
72 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
73 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
74 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
75 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
76 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
77 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
78 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
79 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
80 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
81 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
82 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
83 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
84 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3233},
88 MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
90 /* board_id = Subsystem Device ID & Vendor ID
91 * product = Marketing Name for the board
92 * access = Address of the struct of function pointers
94 static struct board_type products[] = {
95 {0x40700E11, "Smart Array 5300", &SA5_access},
96 {0x40800E11, "Smart Array 5i", &SA5B_access},
97 {0x40820E11, "Smart Array 532", &SA5B_access},
98 {0x40830E11, "Smart Array 5312", &SA5B_access},
99 {0x409A0E11, "Smart Array 641", &SA5_access},
100 {0x409B0E11, "Smart Array 642", &SA5_access},
101 {0x409C0E11, "Smart Array 6400", &SA5_access},
102 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
103 {0x40910E11, "Smart Array 6i", &SA5_access},
104 {0x3225103C, "Smart Array P600", &SA5_access},
105 {0x3223103C, "Smart Array P800", &SA5_access},
106 {0x3234103C, "Smart Array P400", &SA5_access},
107 {0x3235103C, "Smart Array P400i", &SA5_access},
108 {0x3211103C, "Smart Array E200i", &SA5_access},
109 {0x3212103C, "Smart Array E200", &SA5_access},
110 {0x3213103C, "Smart Array E200i", &SA5_access},
111 {0x3214103C, "Smart Array E200i", &SA5_access},
112 {0x3215103C, "Smart Array E200i", &SA5_access},
113 {0x3233103C, "Smart Array E500", &SA5_access},
116 /* How long to wait (in milliseconds) for board to go into simple mode */
117 #define MAX_CONFIG_WAIT 30000
118 #define MAX_IOCTL_CONFIG_WAIT 1000
120 /*define how many times we will try a command because of bus resets */
121 #define MAX_CMD_RETRIES 3
123 #define READ_AHEAD 1024
124 #define NR_CMDS 384 /* #commands that can be outstanding */
127 /* Originally cciss driver only supports 8 major numbers */
128 #define MAX_CTLR_ORIG 8
130 static ctlr_info_t *hba[MAX_CTLR];
132 static void do_cciss_request(request_queue_t *q);
133 static irqreturn_t do_cciss_intr(int irq, void *dev_id);
134 static int cciss_open(struct inode *inode, struct file *filep);
135 static int cciss_release(struct inode *inode, struct file *filep);
136 static int cciss_ioctl(struct inode *inode, struct file *filep,
137 unsigned int cmd, unsigned long arg);
138 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
140 static int revalidate_allvol(ctlr_info_t *host);
141 static int cciss_revalidate(struct gendisk *disk);
142 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
143 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
146 static void cciss_read_capacity(int ctlr, int logvol, int withirq,
147 sector_t *total_size, unsigned int *block_size);
148 static void cciss_read_capacity_16(int ctlr, int logvol, int withirq,
149 sector_t *total_size, unsigned int *block_size);
150 static void cciss_geometry_inquiry(int ctlr, int logvol,
151 int withirq, sector_t total_size,
152 unsigned int block_size, InquiryData_struct *inq_buff,
153 drive_info_struct *drv);
154 static void cciss_getgeometry(int cntl_num);
155 static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
157 static void start_io(ctlr_info_t *h);
158 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size,
159 unsigned int use_unit_num, unsigned int log_unit,
160 __u8 page_code, unsigned char *scsi3addr, int cmd_type);
161 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
162 unsigned int use_unit_num, unsigned int log_unit,
163 __u8 page_code, int cmd_type);
165 static void fail_all_cmds(unsigned long ctlr);
167 #ifdef CONFIG_PROC_FS
168 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
169 int length, int *eof, void *data);
170 static void cciss_procinit(int i);
172 static void cciss_procinit(int i)
175 #endif /* CONFIG_PROC_FS */
178 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
181 static struct block_device_operations cciss_fops = {
182 .owner = THIS_MODULE,
184 .release = cciss_release,
185 .ioctl = cciss_ioctl,
186 .getgeo = cciss_getgeo,
188 .compat_ioctl = cciss_compat_ioctl,
190 .revalidate_disk = cciss_revalidate,
194 * Enqueuing and dequeuing functions for cmdlists.
196 static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
200 c->next = c->prev = c;
202 c->prev = (*Qptr)->prev;
204 (*Qptr)->prev->next = c;
209 static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
210 CommandList_struct *c)
212 if (c && c->next != c) {
215 c->prev->next = c->next;
216 c->next->prev = c->prev;
223 #include "cciss_scsi.c" /* For SCSI tape support */
225 #ifdef CONFIG_PROC_FS
228 * Report information about this controller.
230 #define ENG_GIG 1000000000
231 #define ENG_GIG_FACTOR (ENG_GIG/512)
232 #define RAID_UNKNOWN 6
233 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
237 static struct proc_dir_entry *proc_cciss;
239 static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
240 int length, int *eof, void *data)
245 ctlr_info_t *h = (ctlr_info_t *) data;
246 drive_info_struct *drv;
248 sector_t vol_sz, vol_sz_frac;
252 /* prevent displaying bogus info during configuration
253 * or deconfiguration of a logical volume
255 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
256 if (h->busy_configuring) {
257 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
260 h->busy_configuring = 1;
261 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
263 size = sprintf(buffer, "%s: HP %s Controller\n"
264 "Board ID: 0x%08lx\n"
265 "Firmware Version: %c%c%c%c\n"
267 "Logical drives: %d\n"
268 "Current Q depth: %d\n"
269 "Current # commands on controller: %d\n"
270 "Max Q depth since init: %d\n"
271 "Max # commands on controller since init: %d\n"
272 "Max SG entries since init: %d\n\n",
275 (unsigned long)h->board_id,
276 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
277 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
278 h->num_luns, h->Qdepth, h->commands_outstanding,
279 h->maxQsinceinit, h->max_outstanding, h->maxSG);
283 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
284 for (i = 0; i <= h->highest_lun; i++) {
290 vol_sz = drv->nr_blocks;
291 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
293 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
295 if (drv->raid_level > 5)
296 drv->raid_level = RAID_UNKNOWN;
297 size = sprintf(buffer + len, "cciss/c%dd%d:"
298 "\t%4u.%02uGB\tRAID %s\n",
299 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
300 raid_label[drv->raid_level]);
306 *start = buffer + offset;
310 h->busy_configuring = 0;
315 cciss_proc_write(struct file *file, const char __user *buffer,
316 unsigned long count, void *data)
318 unsigned char cmd[80];
320 #ifdef CONFIG_CISS_SCSI_TAPE
321 ctlr_info_t *h = (ctlr_info_t *) data;
325 if (count > sizeof(cmd) - 1)
327 if (copy_from_user(cmd, buffer, count))
330 len = strlen(cmd); // above 3 lines ensure safety
331 if (len && cmd[len - 1] == '\n')
333 # ifdef CONFIG_CISS_SCSI_TAPE
334 if (strcmp("engage scsi", cmd) == 0) {
335 rc = cciss_engage_scsi(h->ctlr);
340 /* might be nice to have "disengage" too, but it's not
341 safely possible. (only 1 module use count, lock issues.) */
347 * Get us a file in /proc/cciss that says something about each controller.
348 * Create /proc/cciss if it doesn't exist yet.
350 static void __devinit cciss_procinit(int i)
352 struct proc_dir_entry *pde;
354 if (proc_cciss == NULL) {
355 proc_cciss = proc_mkdir("cciss", proc_root_driver);
360 pde = create_proc_read_entry(hba[i]->devname,
361 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
362 proc_cciss, cciss_proc_get_info, hba[i]);
363 pde->write_proc = cciss_proc_write;
365 #endif /* CONFIG_PROC_FS */
368 * For operations that cannot sleep, a command block is allocated at init,
369 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
370 * which ones are free or in use. For operations that can wait for kmalloc
371 * to possible sleep, this routine can be called with get_from_pool set to 0.
372 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
374 static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
376 CommandList_struct *c;
379 dma_addr_t cmd_dma_handle, err_dma_handle;
381 if (!get_from_pool) {
382 c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
383 sizeof(CommandList_struct), &cmd_dma_handle);
386 memset(c, 0, sizeof(CommandList_struct));
390 c->err_info = (ErrorInfo_struct *)
391 pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
394 if (c->err_info == NULL) {
395 pci_free_consistent(h->pdev,
396 sizeof(CommandList_struct), c, cmd_dma_handle);
399 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
400 } else { /* get it out of the controllers pool */
403 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
406 } while (test_and_set_bit
407 (i & (BITS_PER_LONG - 1),
408 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
410 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
413 memset(c, 0, sizeof(CommandList_struct));
414 cmd_dma_handle = h->cmd_pool_dhandle
415 + i * sizeof(CommandList_struct);
416 c->err_info = h->errinfo_pool + i;
417 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
418 err_dma_handle = h->errinfo_pool_dhandle
419 + i * sizeof(ErrorInfo_struct);
425 c->busaddr = (__u32) cmd_dma_handle;
426 temp64.val = (__u64) err_dma_handle;
427 c->ErrDesc.Addr.lower = temp64.val32.lower;
428 c->ErrDesc.Addr.upper = temp64.val32.upper;
429 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
436 * Frees a command block that was previously allocated with cmd_alloc().
438 static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
443 if (!got_from_pool) {
444 temp64.val32.lower = c->ErrDesc.Addr.lower;
445 temp64.val32.upper = c->ErrDesc.Addr.upper;
446 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
447 c->err_info, (dma_addr_t) temp64.val);
448 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
449 c, (dma_addr_t) c->busaddr);
452 clear_bit(i & (BITS_PER_LONG - 1),
453 h->cmd_pool_bits + (i / BITS_PER_LONG));
458 static inline ctlr_info_t *get_host(struct gendisk *disk)
460 return disk->queue->queuedata;
463 static inline drive_info_struct *get_drv(struct gendisk *disk)
465 return disk->private_data;
469 * Open. Make sure the device is really there.
471 static int cciss_open(struct inode *inode, struct file *filep)
473 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
474 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
477 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
478 #endif /* CCISS_DEBUG */
480 if (host->busy_initializing || drv->busy_configuring)
483 * Root is allowed to open raw volume zero even if it's not configured
484 * so array config can still work. Root is also allowed to open any
485 * volume that has a LUN ID, so it can issue IOCTL to reread the
486 * disk information. I don't think I really like this
487 * but I'm already using way to many device nodes to claim another one
488 * for "raw controller".
490 if (drv->nr_blocks == 0) {
491 if (iminor(inode) != 0) { /* not node 0? */
492 /* if not node 0 make sure it is a partition = 0 */
493 if (iminor(inode) & 0x0f) {
495 /* if it is, make sure we have a LUN ID */
496 } else if (drv->LunID == 0) {
500 if (!capable(CAP_SYS_ADMIN))
511 static int cciss_release(struct inode *inode, struct file *filep)
513 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
514 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
517 printk(KERN_DEBUG "cciss_release %s\n",
518 inode->i_bdev->bd_disk->disk_name);
519 #endif /* CCISS_DEBUG */
528 static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
532 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
537 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
539 static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd,
542 static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
545 case CCISS_GETPCIINFO:
546 case CCISS_GETINTINFO:
547 case CCISS_SETINTINFO:
548 case CCISS_GETNODENAME:
549 case CCISS_SETNODENAME:
550 case CCISS_GETHEARTBEAT:
551 case CCISS_GETBUSTYPES:
552 case CCISS_GETFIRMVER:
553 case CCISS_GETDRIVVER:
554 case CCISS_REVALIDVOLS:
555 case CCISS_DEREGDISK:
556 case CCISS_REGNEWDISK:
558 case CCISS_RESCANDISK:
559 case CCISS_GETLUNINFO:
560 return do_ioctl(f, cmd, arg);
562 case CCISS_PASSTHRU32:
563 return cciss_ioctl32_passthru(f, cmd, arg);
564 case CCISS_BIG_PASSTHRU32:
565 return cciss_ioctl32_big_passthru(f, cmd, arg);
572 static int cciss_ioctl32_passthru(struct file *f, unsigned cmd,
575 IOCTL32_Command_struct __user *arg32 =
576 (IOCTL32_Command_struct __user *) arg;
577 IOCTL_Command_struct arg64;
578 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
584 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
585 sizeof(arg64.LUN_info));
587 copy_from_user(&arg64.Request, &arg32->Request,
588 sizeof(arg64.Request));
590 copy_from_user(&arg64.error_info, &arg32->error_info,
591 sizeof(arg64.error_info));
592 err |= get_user(arg64.buf_size, &arg32->buf_size);
593 err |= get_user(cp, &arg32->buf);
594 arg64.buf = compat_ptr(cp);
595 err |= copy_to_user(p, &arg64, sizeof(arg64));
600 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long)p);
604 copy_in_user(&arg32->error_info, &p->error_info,
605 sizeof(arg32->error_info));
611 static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd,
614 BIG_IOCTL32_Command_struct __user *arg32 =
615 (BIG_IOCTL32_Command_struct __user *) arg;
616 BIG_IOCTL_Command_struct arg64;
617 BIG_IOCTL_Command_struct __user *p =
618 compat_alloc_user_space(sizeof(arg64));
624 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
625 sizeof(arg64.LUN_info));
627 copy_from_user(&arg64.Request, &arg32->Request,
628 sizeof(arg64.Request));
630 copy_from_user(&arg64.error_info, &arg32->error_info,
631 sizeof(arg64.error_info));
632 err |= get_user(arg64.buf_size, &arg32->buf_size);
633 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
634 err |= get_user(cp, &arg32->buf);
635 arg64.buf = compat_ptr(cp);
636 err |= copy_to_user(p, &arg64, sizeof(arg64));
641 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long)p);
645 copy_in_user(&arg32->error_info, &p->error_info,
646 sizeof(arg32->error_info));
653 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
655 drive_info_struct *drv = get_drv(bdev->bd_disk);
660 geo->heads = drv->heads;
661 geo->sectors = drv->sectors;
662 geo->cylinders = drv->cylinders;
669 static int cciss_ioctl(struct inode *inode, struct file *filep,
670 unsigned int cmd, unsigned long arg)
672 struct block_device *bdev = inode->i_bdev;
673 struct gendisk *disk = bdev->bd_disk;
674 ctlr_info_t *host = get_host(disk);
675 drive_info_struct *drv = get_drv(disk);
676 int ctlr = host->ctlr;
677 void __user *argp = (void __user *)arg;
680 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
681 #endif /* CCISS_DEBUG */
684 case CCISS_GETPCIINFO:
686 cciss_pci_info_struct pciinfo;
690 pciinfo.domain = pci_domain_nr(host->pdev->bus);
691 pciinfo.bus = host->pdev->bus->number;
692 pciinfo.dev_fn = host->pdev->devfn;
693 pciinfo.board_id = host->board_id;
695 (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
699 case CCISS_GETINTINFO:
701 cciss_coalint_struct intinfo;
705 readl(&host->cfgtable->HostWrite.CoalIntDelay);
707 readl(&host->cfgtable->HostWrite.CoalIntCount);
709 (argp, &intinfo, sizeof(cciss_coalint_struct)))
713 case CCISS_SETINTINFO:
715 cciss_coalint_struct intinfo;
721 if (!capable(CAP_SYS_ADMIN))
724 (&intinfo, argp, sizeof(cciss_coalint_struct)))
726 if ((intinfo.delay == 0) && (intinfo.count == 0))
728 // printk("cciss_ioctl: delay and count cannot be 0\n");
731 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
732 /* Update the field, and then ring the doorbell */
733 writel(intinfo.delay,
734 &(host->cfgtable->HostWrite.CoalIntDelay));
735 writel(intinfo.count,
736 &(host->cfgtable->HostWrite.CoalIntCount));
737 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
739 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
740 if (!(readl(host->vaddr + SA5_DOORBELL)
743 /* delay and try again */
746 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
747 if (i >= MAX_IOCTL_CONFIG_WAIT)
751 case CCISS_GETNODENAME:
753 NodeName_type NodeName;
758 for (i = 0; i < 16; i++)
760 readb(&host->cfgtable->ServerName[i]);
761 if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
765 case CCISS_SETNODENAME:
767 NodeName_type NodeName;
773 if (!capable(CAP_SYS_ADMIN))
777 (NodeName, argp, sizeof(NodeName_type)))
780 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
782 /* Update the field, and then ring the doorbell */
783 for (i = 0; i < 16; i++)
785 &host->cfgtable->ServerName[i]);
787 writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
789 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
790 if (!(readl(host->vaddr + SA5_DOORBELL)
793 /* delay and try again */
796 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
797 if (i >= MAX_IOCTL_CONFIG_WAIT)
802 case CCISS_GETHEARTBEAT:
804 Heartbeat_type heartbeat;
808 heartbeat = readl(&host->cfgtable->HeartBeat);
810 (argp, &heartbeat, sizeof(Heartbeat_type)))
814 case CCISS_GETBUSTYPES:
816 BusTypes_type BusTypes;
820 BusTypes = readl(&host->cfgtable->BusTypes);
822 (argp, &BusTypes, sizeof(BusTypes_type)))
826 case CCISS_GETFIRMVER:
828 FirmwareVer_type firmware;
832 memcpy(firmware, host->firm_ver, 4);
835 (argp, firmware, sizeof(FirmwareVer_type)))
839 case CCISS_GETDRIVVER:
841 DriverVer_type DriverVer = DRIVER_VERSION;
847 (argp, &DriverVer, sizeof(DriverVer_type)))
852 case CCISS_REVALIDVOLS:
853 if (bdev != bdev->bd_contains || drv != host->drv)
855 return revalidate_allvol(host);
857 case CCISS_GETLUNINFO:{
858 LogvolInfo_struct luninfo;
860 luninfo.LunID = drv->LunID;
861 luninfo.num_opens = drv->usage_count;
862 luninfo.num_parts = 0;
863 if (copy_to_user(argp, &luninfo,
864 sizeof(LogvolInfo_struct)))
868 case CCISS_DEREGDISK:
869 return rebuild_lun_table(host, disk);
872 return rebuild_lun_table(host, NULL);
876 IOCTL_Command_struct iocommand;
877 CommandList_struct *c;
881 DECLARE_COMPLETION_ONSTACK(wait);
886 if (!capable(CAP_SYS_RAWIO))
890 (&iocommand, argp, sizeof(IOCTL_Command_struct)))
892 if ((iocommand.buf_size < 1) &&
893 (iocommand.Request.Type.Direction != XFER_NONE)) {
896 #if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
897 /* Check kmalloc limits */
898 if (iocommand.buf_size > 128000)
901 if (iocommand.buf_size > 0) {
902 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
906 if (iocommand.Request.Type.Direction == XFER_WRITE) {
907 /* Copy the data into the buffer we created */
909 (buff, iocommand.buf, iocommand.buf_size)) {
914 memset(buff, 0, iocommand.buf_size);
916 if ((c = cmd_alloc(host, 0)) == NULL) {
920 // Fill in the command type
921 c->cmd_type = CMD_IOCTL_PEND;
922 // Fill in Command Header
923 c->Header.ReplyQueue = 0; // unused in simple mode
924 if (iocommand.buf_size > 0) // buffer to fill
926 c->Header.SGList = 1;
927 c->Header.SGTotal = 1;
928 } else // no buffers to fill
930 c->Header.SGList = 0;
931 c->Header.SGTotal = 0;
933 c->Header.LUN = iocommand.LUN_info;
934 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
936 // Fill in Request block
937 c->Request = iocommand.Request;
939 // Fill in the scatter gather information
940 if (iocommand.buf_size > 0) {
941 temp64.val = pci_map_single(host->pdev, buff,
943 PCI_DMA_BIDIRECTIONAL);
944 c->SG[0].Addr.lower = temp64.val32.lower;
945 c->SG[0].Addr.upper = temp64.val32.upper;
946 c->SG[0].Len = iocommand.buf_size;
947 c->SG[0].Ext = 0; // we are not chaining
951 /* Put the request on the tail of the request queue */
952 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
953 addQ(&host->reqQ, c);
956 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
958 wait_for_completion(&wait);
960 /* unlock the buffers from DMA */
961 temp64.val32.lower = c->SG[0].Addr.lower;
962 temp64.val32.upper = c->SG[0].Addr.upper;
963 pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
965 PCI_DMA_BIDIRECTIONAL);
967 /* Copy the error information out */
968 iocommand.error_info = *(c->err_info);
970 (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
972 cmd_free(host, c, 0);
976 if (iocommand.Request.Type.Direction == XFER_READ) {
977 /* Copy the data out of the buffer we created */
979 (iocommand.buf, buff, iocommand.buf_size)) {
981 cmd_free(host, c, 0);
986 cmd_free(host, c, 0);
989 case CCISS_BIG_PASSTHRU:{
990 BIG_IOCTL_Command_struct *ioc;
991 CommandList_struct *c;
992 unsigned char **buff = NULL;
993 int *buff_size = NULL;
999 DECLARE_COMPLETION_ONSTACK(wait);
1002 BYTE __user *data_ptr;
1006 if (!capable(CAP_SYS_RAWIO))
1008 ioc = (BIG_IOCTL_Command_struct *)
1009 kmalloc(sizeof(*ioc), GFP_KERNEL);
1014 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
1018 if ((ioc->buf_size < 1) &&
1019 (ioc->Request.Type.Direction != XFER_NONE)) {
1023 /* Check kmalloc limits using all SGs */
1024 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
1028 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
1033 kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
1038 buff_size = (int *)kmalloc(MAXSGENTRIES * sizeof(int),
1044 left = ioc->buf_size;
1045 data_ptr = ioc->buf;
1048 ioc->malloc_size) ? ioc->
1050 buff_size[sg_used] = sz;
1051 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
1052 if (buff[sg_used] == NULL) {
1056 if (ioc->Request.Type.Direction == XFER_WRITE) {
1058 (buff[sg_used], data_ptr, sz)) {
1063 memset(buff[sg_used], 0, sz);
1069 if ((c = cmd_alloc(host, 0)) == NULL) {
1073 c->cmd_type = CMD_IOCTL_PEND;
1074 c->Header.ReplyQueue = 0;
1076 if (ioc->buf_size > 0) {
1077 c->Header.SGList = sg_used;
1078 c->Header.SGTotal = sg_used;
1080 c->Header.SGList = 0;
1081 c->Header.SGTotal = 0;
1083 c->Header.LUN = ioc->LUN_info;
1084 c->Header.Tag.lower = c->busaddr;
1086 c->Request = ioc->Request;
1087 if (ioc->buf_size > 0) {
1089 for (i = 0; i < sg_used; i++) {
1091 pci_map_single(host->pdev, buff[i],
1093 PCI_DMA_BIDIRECTIONAL);
1094 c->SG[i].Addr.lower =
1096 c->SG[i].Addr.upper =
1098 c->SG[i].Len = buff_size[i];
1099 c->SG[i].Ext = 0; /* we are not chaining */
1103 /* Put the request on the tail of the request queue */
1104 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1105 addQ(&host->reqQ, c);
1108 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1109 wait_for_completion(&wait);
1110 /* unlock the buffers from DMA */
1111 for (i = 0; i < sg_used; i++) {
1112 temp64.val32.lower = c->SG[i].Addr.lower;
1113 temp64.val32.upper = c->SG[i].Addr.upper;
1114 pci_unmap_single(host->pdev,
1115 (dma_addr_t) temp64.val, buff_size[i],
1116 PCI_DMA_BIDIRECTIONAL);
1118 /* Copy the error information out */
1119 ioc->error_info = *(c->err_info);
1120 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1121 cmd_free(host, c, 0);
1125 if (ioc->Request.Type.Direction == XFER_READ) {
1126 /* Copy the data out of the buffer we created */
1127 BYTE __user *ptr = ioc->buf;
1128 for (i = 0; i < sg_used; i++) {
1130 (ptr, buff[i], buff_size[i])) {
1131 cmd_free(host, c, 0);
1135 ptr += buff_size[i];
1138 cmd_free(host, c, 0);
1142 for (i = 0; i < sg_used; i++)
1156 * revalidate_allvol is for online array config utilities. After a
1157 * utility reconfigures the drives in the array, it can use this function
1158 * (through an ioctl) to make the driver zap any previous disk structs for
1159 * that controller and get new ones.
1161 * Right now I'm using the getgeometry() function to do this, but this
1162 * function should probably be finer grained and allow you to revalidate one
1163 * particular logical volume (instead of all of them on a particular
1166 static int revalidate_allvol(ctlr_info_t *host)
1168 int ctlr = host->ctlr, i;
1169 unsigned long flags;
1171 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1172 if (host->usage_count > 1) {
1173 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1174 printk(KERN_WARNING "cciss: Device busy for volume"
1175 " revalidation (usage=%d)\n", host->usage_count);
1178 host->usage_count++;
1179 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1181 for (i = 0; i < NWD; i++) {
1182 struct gendisk *disk = host->gendisk[i];
1184 request_queue_t *q = disk->queue;
1186 if (disk->flags & GENHD_FL_UP)
1189 blk_cleanup_queue(q);
1194 * Set the partition and block size structures for all volumes
1195 * on this controller to zero. We will reread all of this data
1197 memset(host->drv, 0, sizeof(drive_info_struct)
1200 * Tell the array controller not to give us any interrupts while
1201 * we check the new geometry. Then turn interrupts back on when
1204 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1205 cciss_getgeometry(ctlr);
1206 host->access.set_intr_mask(host, CCISS_INTR_ON);
1208 /* Loop through each real device */
1209 for (i = 0; i < NWD; i++) {
1210 struct gendisk *disk = host->gendisk[i];
1211 drive_info_struct *drv = &(host->drv[i]);
1212 /* we must register the controller even if no disks exist */
1213 /* this is for the online array utilities */
1214 if (!drv->heads && i)
1216 blk_queue_hardsect_size(drv->queue, drv->block_size);
1217 set_capacity(disk, drv->nr_blocks);
1220 host->usage_count--;
1224 static inline void complete_buffers(struct bio *bio, int status)
1227 struct bio *xbh = bio->bi_next;
1228 int nr_sectors = bio_sectors(bio);
1230 bio->bi_next = NULL;
1231 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1236 static void cciss_check_queues(ctlr_info_t *h)
1238 int start_queue = h->next_to_run;
1241 /* check to see if we have maxed out the number of commands that can
1242 * be placed on the queue. If so then exit. We do this check here
1243 * in case the interrupt we serviced was from an ioctl and did not
1244 * free any new commands.
1246 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
1249 /* We have room on the queue for more commands. Now we need to queue
1250 * them up. We will also keep track of the next queue to run so
1251 * that every queue gets a chance to be started first.
1253 for (i = 0; i < h->highest_lun + 1; i++) {
1254 int curr_queue = (start_queue + i) % (h->highest_lun + 1);
1255 /* make sure the disk has been added and the drive is real
1256 * because this can be called from the middle of init_one.
1258 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
1260 blk_start_queue(h->gendisk[curr_queue]->queue);
1262 /* check to see if we have maxed out the number of commands
1263 * that can be placed on the queue.
1265 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
1266 if (curr_queue == start_queue) {
1268 (start_queue + 1) % (h->highest_lun + 1);
1271 h->next_to_run = curr_queue;
1275 curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
1280 static void cciss_softirq_done(struct request *rq)
1282 CommandList_struct *cmd = rq->completion_data;
1283 ctlr_info_t *h = hba[cmd->ctlr];
1284 unsigned long flags;
1288 if (cmd->Request.Type.Direction == XFER_READ)
1289 ddir = PCI_DMA_FROMDEVICE;
1291 ddir = PCI_DMA_TODEVICE;
1293 /* command did not need to be retried */
1294 /* unmap the DMA mapping for all the scatter gather elements */
1295 for (i = 0; i < cmd->Header.SGList; i++) {
1296 temp64.val32.lower = cmd->SG[i].Addr.lower;
1297 temp64.val32.upper = cmd->SG[i].Addr.upper;
1298 pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
1301 complete_buffers(rq->bio, rq->errors);
1304 printk("Done with %p\n", rq);
1305 #endif /* CCISS_DEBUG */
1307 add_disk_randomness(rq->rq_disk);
1308 spin_lock_irqsave(&h->lock, flags);
1309 end_that_request_last(rq, rq->errors);
1310 cmd_free(h, cmd, 1);
1311 cciss_check_queues(h);
1312 spin_unlock_irqrestore(&h->lock, flags);
1315 /* This function will check the usage_count of the drive to be updated/added.
1316 * If the usage_count is zero then the drive information will be updated and
1317 * the disk will be re-registered with the kernel. If not then it will be
1318 * left alone for the next reboot. The exception to this is disk 0 which
1319 * will always be left registered with the kernel since it is also the
1320 * controller node. Any changes to disk 0 will show up on the next
1323 static void cciss_update_drive_info(int ctlr, int drv_index)
1325 ctlr_info_t *h = hba[ctlr];
1326 struct gendisk *disk;
1327 InquiryData_struct *inq_buff = NULL;
1328 unsigned int block_size;
1329 sector_t total_size;
1330 unsigned long flags = 0;
1333 /* if the disk already exists then deregister it before proceeding */
1334 if (h->drv[drv_index].raid_level != -1) {
1335 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1336 h->drv[drv_index].busy_configuring = 1;
1337 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1338 ret = deregister_disk(h->gendisk[drv_index],
1339 &h->drv[drv_index], 0);
1340 h->drv[drv_index].busy_configuring = 0;
1343 /* If the disk is in use return */
1347 /* Get information about the disk and modify the driver structure */
1348 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1349 if (inq_buff == NULL)
1352 cciss_read_capacity(ctlr, drv_index, 1,
1353 &total_size, &block_size);
1355 /* total size = last LBA + 1 */
1356 /* FFFFFFFF + 1 = 0, cannot have a logical volume of size 0 */
1357 /* so we assume this volume this must be >2TB in size */
1358 if (total_size == (__u32) 0) {
1359 cciss_read_capacity_16(ctlr, drv_index, 1,
1360 &total_size, &block_size);
1361 h->cciss_read = CCISS_READ_16;
1362 h->cciss_write = CCISS_WRITE_16;
1364 h->cciss_read = CCISS_READ_10;
1365 h->cciss_write = CCISS_WRITE_10;
1367 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1368 inq_buff, &h->drv[drv_index]);
1371 disk = h->gendisk[drv_index];
1372 set_capacity(disk, h->drv[drv_index].nr_blocks);
1374 /* if it's the controller it's already added */
1376 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1378 /* Set up queue information */
1379 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1380 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1382 /* This is a hardware imposed limit. */
1383 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1385 /* This is a limit in the driver and could be eliminated. */
1386 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1388 blk_queue_max_sectors(disk->queue, 512);
1390 blk_queue_softirq_done(disk->queue, cciss_softirq_done);
1392 disk->queue->queuedata = hba[ctlr];
1394 blk_queue_hardsect_size(disk->queue,
1395 hba[ctlr]->drv[drv_index].block_size);
1397 h->drv[drv_index].queue = disk->queue;
1405 printk(KERN_ERR "cciss: out of memory\n");
1409 /* This function will find the first index of the controllers drive array
1410 * that has a -1 for the raid_level and will return that index. This is
1411 * where new drives will be added. If the index to be returned is greater
1412 * than the highest_lun index for the controller then highest_lun is set
1413 * to this new index. If there are no available indexes then -1 is returned.
1415 static int cciss_find_free_drive_index(int ctlr)
1419 for (i = 0; i < CISS_MAX_LUN; i++) {
1420 if (hba[ctlr]->drv[i].raid_level == -1) {
1421 if (i > hba[ctlr]->highest_lun)
1422 hba[ctlr]->highest_lun = i;
1429 /* This function will add and remove logical drives from the Logical
1430 * drive array of the controller and maintain persistency of ordering
1431 * so that mount points are preserved until the next reboot. This allows
1432 * for the removal of logical drives in the middle of the drive array
1433 * without a re-ordering of those drives.
1435 * h = The controller to perform the operations on
1436 * del_disk = The disk to remove if specified. If the value given
1437 * is NULL then no disk is removed.
1439 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1443 ReportLunData_struct *ld_buff = NULL;
1444 drive_info_struct *drv = NULL;
1451 unsigned long flags;
1453 /* Set busy_configuring flag for this operation */
1454 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1455 if (h->num_luns >= CISS_MAX_LUN) {
1456 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1460 if (h->busy_configuring) {
1461 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1464 h->busy_configuring = 1;
1466 /* if del_disk is NULL then we are being called to add a new disk
1467 * and update the logical drive table. If it is not NULL then
1468 * we will check if the disk is in use or not.
1470 if (del_disk != NULL) {
1471 drv = get_drv(del_disk);
1472 drv->busy_configuring = 1;
1473 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1474 return_code = deregister_disk(del_disk, drv, 1);
1475 drv->busy_configuring = 0;
1476 h->busy_configuring = 0;
1479 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1480 if (!capable(CAP_SYS_RAWIO))
1483 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1484 if (ld_buff == NULL)
1487 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1488 sizeof(ReportLunData_struct), 0,
1491 if (return_code == IO_OK) {
1493 (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
1496 (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
1499 (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
1502 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1503 } else { /* reading number of logical volumes failed */
1504 printk(KERN_WARNING "cciss: report logical volume"
1505 " command failed\n");
1510 num_luns = listlength / 8; /* 8 bytes per entry */
1511 if (num_luns > CISS_MAX_LUN) {
1512 num_luns = CISS_MAX_LUN;
1513 printk(KERN_WARNING "cciss: more luns configured"
1514 " on controller than can be handled by"
1518 /* Compare controller drive array to drivers drive array.
1519 * Check for updates in the drive information and any new drives
1520 * on the controller.
1522 for (i = 0; i < num_luns; i++) {
1528 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1530 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1532 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1533 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1535 /* Find if the LUN is already in the drive array
1536 * of the controller. If so then update its info
1537 * if not is use. If it does not exist then find
1538 * the first free index and add it.
1540 for (j = 0; j <= h->highest_lun; j++) {
1541 if (h->drv[j].LunID == lunid) {
1547 /* check if the drive was found already in the array */
1549 drv_index = cciss_find_free_drive_index(ctlr);
1550 if (drv_index == -1)
1554 h->drv[drv_index].LunID = lunid;
1555 cciss_update_drive_info(ctlr, drv_index);
1561 h->busy_configuring = 0;
1562 /* We return -1 here to tell the ACU that we have registered/updated
1563 * all of the drives that we can and to keep it from calling us
1568 printk(KERN_ERR "cciss: out of memory\n");
1572 /* This function will deregister the disk and it's queue from the
1573 * kernel. It must be called with the controller lock held and the
1574 * drv structures busy_configuring flag set. It's parameters are:
1576 * disk = This is the disk to be deregistered
1577 * drv = This is the drive_info_struct associated with the disk to be
1578 * deregistered. It contains information about the disk used
1580 * clear_all = This flag determines whether or not the disk information
1581 * is going to be completely cleared out and the highest_lun
1582 * reset. Sometimes we want to clear out information about
1583 * the disk in preparation for re-adding it. In this case
1584 * the highest_lun should be left unchanged and the LunID
1585 * should not be cleared.
1587 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1590 ctlr_info_t *h = get_host(disk);
1592 if (!capable(CAP_SYS_RAWIO))
1595 /* make sure logical volume is NOT is use */
1596 if (clear_all || (h->gendisk[0] == disk)) {
1597 if (drv->usage_count > 1)
1599 } else if (drv->usage_count > 0)
1602 /* invalidate the devices and deregister the disk. If it is disk
1603 * zero do not deregister it but just zero out it's values. This
1604 * allows us to delete disk zero but keep the controller registered.
1606 if (h->gendisk[0] != disk) {
1608 request_queue_t *q = disk->queue;
1609 if (disk->flags & GENHD_FL_UP)
1612 blk_cleanup_queue(q);
1619 /* zero out the disk size info */
1621 drv->block_size = 0;
1625 drv->raid_level = -1; /* This can be used as a flag variable to
1626 * indicate that this element of the drive
1631 /* check to see if it was the last disk */
1632 if (drv == h->drv + h->highest_lun) {
1633 /* if so, find the new hightest lun */
1634 int i, newhighest = -1;
1635 for (i = 0; i < h->highest_lun; i++) {
1636 /* if the disk has size > 0, it is available */
1637 if (h->drv[i].heads)
1640 h->highest_lun = newhighest;
1648 static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
1649 1: address logical volume log_unit,
1650 2: periph device address is scsi3addr */
1651 unsigned int log_unit, __u8 page_code,
1652 unsigned char *scsi3addr, int cmd_type)
1654 ctlr_info_t *h = hba[ctlr];
1655 u64bit buff_dma_handle;
1658 c->cmd_type = CMD_IOCTL_PEND;
1659 c->Header.ReplyQueue = 0;
1661 c->Header.SGList = 1;
1662 c->Header.SGTotal = 1;
1664 c->Header.SGList = 0;
1665 c->Header.SGTotal = 0;
1667 c->Header.Tag.lower = c->busaddr;
1669 c->Request.Type.Type = cmd_type;
1670 if (cmd_type == TYPE_CMD) {
1673 /* If the logical unit number is 0 then, this is going
1674 to controller so It's a physical command
1675 mode = 0 target = 0. So we have nothing to write.
1676 otherwise, if use_unit_num == 1,
1677 mode = 1(volume set addressing) target = LUNID
1678 otherwise, if use_unit_num == 2,
1679 mode = 0(periph dev addr) target = scsi3addr */
1680 if (use_unit_num == 1) {
1681 c->Header.LUN.LogDev.VolId =
1682 h->drv[log_unit].LunID;
1683 c->Header.LUN.LogDev.Mode = 1;
1684 } else if (use_unit_num == 2) {
1685 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr,
1687 c->Header.LUN.LogDev.Mode = 0;
1689 /* are we trying to read a vital product page */
1690 if (page_code != 0) {
1691 c->Request.CDB[1] = 0x01;
1692 c->Request.CDB[2] = page_code;
1694 c->Request.CDBLen = 6;
1695 c->Request.Type.Attribute = ATTR_SIMPLE;
1696 c->Request.Type.Direction = XFER_READ;
1697 c->Request.Timeout = 0;
1698 c->Request.CDB[0] = CISS_INQUIRY;
1699 c->Request.CDB[4] = size & 0xFF;
1701 case CISS_REPORT_LOG:
1702 case CISS_REPORT_PHYS:
1703 /* Talking to controller so It's a physical command
1704 mode = 00 target = 0. Nothing to write.
1706 c->Request.CDBLen = 12;
1707 c->Request.Type.Attribute = ATTR_SIMPLE;
1708 c->Request.Type.Direction = XFER_READ;
1709 c->Request.Timeout = 0;
1710 c->Request.CDB[0] = cmd;
1711 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1712 c->Request.CDB[7] = (size >> 16) & 0xFF;
1713 c->Request.CDB[8] = (size >> 8) & 0xFF;
1714 c->Request.CDB[9] = size & 0xFF;
1717 case CCISS_READ_CAPACITY:
1718 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1719 c->Header.LUN.LogDev.Mode = 1;
1720 c->Request.CDBLen = 10;
1721 c->Request.Type.Attribute = ATTR_SIMPLE;
1722 c->Request.Type.Direction = XFER_READ;
1723 c->Request.Timeout = 0;
1724 c->Request.CDB[0] = cmd;
1726 case CCISS_READ_CAPACITY_16:
1727 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1728 c->Header.LUN.LogDev.Mode = 1;
1729 c->Request.CDBLen = 16;
1730 c->Request.Type.Attribute = ATTR_SIMPLE;
1731 c->Request.Type.Direction = XFER_READ;
1732 c->Request.Timeout = 0;
1733 c->Request.CDB[0] = cmd;
1734 c->Request.CDB[1] = 0x10;
1735 c->Request.CDB[10] = (size >> 24) & 0xFF;
1736 c->Request.CDB[11] = (size >> 16) & 0xFF;
1737 c->Request.CDB[12] = (size >> 8) & 0xFF;
1738 c->Request.CDB[13] = size & 0xFF;
1739 c->Request.Timeout = 0;
1740 c->Request.CDB[0] = cmd;
1742 case CCISS_CACHE_FLUSH:
1743 c->Request.CDBLen = 12;
1744 c->Request.Type.Attribute = ATTR_SIMPLE;
1745 c->Request.Type.Direction = XFER_WRITE;
1746 c->Request.Timeout = 0;
1747 c->Request.CDB[0] = BMIC_WRITE;
1748 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1752 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1755 } else if (cmd_type == TYPE_MSG) {
1757 case 0: /* ABORT message */
1758 c->Request.CDBLen = 12;
1759 c->Request.Type.Attribute = ATTR_SIMPLE;
1760 c->Request.Type.Direction = XFER_WRITE;
1761 c->Request.Timeout = 0;
1762 c->Request.CDB[0] = cmd; /* abort */
1763 c->Request.CDB[1] = 0; /* abort a command */
1764 /* buff contains the tag of the command to abort */
1765 memcpy(&c->Request.CDB[4], buff, 8);
1767 case 1: /* RESET message */
1768 c->Request.CDBLen = 12;
1769 c->Request.Type.Attribute = ATTR_SIMPLE;
1770 c->Request.Type.Direction = XFER_WRITE;
1771 c->Request.Timeout = 0;
1772 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
1773 c->Request.CDB[0] = cmd; /* reset */
1774 c->Request.CDB[1] = 0x04; /* reset a LUN */
1776 case 3: /* No-Op message */
1777 c->Request.CDBLen = 1;
1778 c->Request.Type.Attribute = ATTR_SIMPLE;
1779 c->Request.Type.Direction = XFER_WRITE;
1780 c->Request.Timeout = 0;
1781 c->Request.CDB[0] = cmd;
1785 "cciss%d: unknown message type %d\n", ctlr, cmd);
1790 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1793 /* Fill in the scatter gather information */
1795 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1797 PCI_DMA_BIDIRECTIONAL);
1798 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1799 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1800 c->SG[0].Len = size;
1801 c->SG[0].Ext = 0; /* we are not chaining */
1806 static int sendcmd_withirq(__u8 cmd,
1810 unsigned int use_unit_num,
1811 unsigned int log_unit, __u8 page_code, int cmd_type)
1813 ctlr_info_t *h = hba[ctlr];
1814 CommandList_struct *c;
1815 u64bit buff_dma_handle;
1816 unsigned long flags;
1818 DECLARE_COMPLETION_ONSTACK(wait);
1820 if ((c = cmd_alloc(h, 0)) == NULL)
1822 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1823 log_unit, page_code, NULL, cmd_type);
1824 if (return_status != IO_OK) {
1826 return return_status;
1831 /* Put the request on the tail of the queue and send it */
1832 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1836 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1838 wait_for_completion(&wait);
1840 if (c->err_info->CommandStatus != 0) { /* an error has occurred */
1841 switch (c->err_info->CommandStatus) {
1842 case CMD_TARGET_STATUS:
1843 printk(KERN_WARNING "cciss: cmd %p has "
1844 " completed with errors\n", c);
1845 if (c->err_info->ScsiStatus) {
1846 printk(KERN_WARNING "cciss: cmd %p "
1847 "has SCSI Status = %x\n",
1848 c, c->err_info->ScsiStatus);
1852 case CMD_DATA_UNDERRUN:
1853 case CMD_DATA_OVERRUN:
1854 /* expected for inquire and report lun commands */
1857 printk(KERN_WARNING "cciss: Cmd %p is "
1858 "reported invalid\n", c);
1859 return_status = IO_ERROR;
1861 case CMD_PROTOCOL_ERR:
1862 printk(KERN_WARNING "cciss: cmd %p has "
1863 "protocol error \n", c);
1864 return_status = IO_ERROR;
1866 case CMD_HARDWARE_ERR:
1867 printk(KERN_WARNING "cciss: cmd %p had "
1868 " hardware error\n", c);
1869 return_status = IO_ERROR;
1871 case CMD_CONNECTION_LOST:
1872 printk(KERN_WARNING "cciss: cmd %p had "
1873 "connection lost\n", c);
1874 return_status = IO_ERROR;
1877 printk(KERN_WARNING "cciss: cmd %p was "
1879 return_status = IO_ERROR;
1881 case CMD_ABORT_FAILED:
1882 printk(KERN_WARNING "cciss: cmd %p reports "
1883 "abort failed\n", c);
1884 return_status = IO_ERROR;
1886 case CMD_UNSOLICITED_ABORT:
1888 "cciss%d: unsolicited abort %p\n", ctlr, c);
1889 if (c->retry_count < MAX_CMD_RETRIES) {
1891 "cciss%d: retrying %p\n", ctlr, c);
1893 /* erase the old error information */
1894 memset(c->err_info, 0,
1895 sizeof(ErrorInfo_struct));
1896 return_status = IO_OK;
1897 INIT_COMPLETION(wait);
1900 return_status = IO_ERROR;
1903 printk(KERN_WARNING "cciss: cmd %p returned "
1904 "unknown status %x\n", c,
1905 c->err_info->CommandStatus);
1906 return_status = IO_ERROR;
1909 /* unlock the buffers from DMA */
1910 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1911 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1912 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
1913 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1915 return return_status;
1918 static void cciss_geometry_inquiry(int ctlr, int logvol,
1919 int withirq, sector_t total_size,
1920 unsigned int block_size,
1921 InquiryData_struct *inq_buff,
1922 drive_info_struct *drv)
1927 memset(inq_buff, 0, sizeof(InquiryData_struct));
1929 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1930 inq_buff, sizeof(*inq_buff), 1,
1931 logvol, 0xC1, TYPE_CMD);
1933 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1934 sizeof(*inq_buff), 1, logvol, 0xC1, NULL,
1936 if (return_code == IO_OK) {
1937 if (inq_buff->data_byte[8] == 0xFF) {
1939 "cciss: reading geometry failed, volume "
1940 "does not support reading geometry\n");
1942 drv->sectors = 32; // Sectors per track
1944 drv->heads = inq_buff->data_byte[6];
1945 drv->sectors = inq_buff->data_byte[7];
1946 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1947 drv->cylinders += inq_buff->data_byte[5];
1948 drv->raid_level = inq_buff->data_byte[8];
1950 drv->block_size = block_size;
1951 drv->nr_blocks = total_size;
1952 t = drv->heads * drv->sectors;
1954 unsigned rem = sector_div(total_size, t);
1957 drv->cylinders = total_size;
1959 } else { /* Get geometry failed */
1960 printk(KERN_WARNING "cciss: reading geometry failed\n");
1962 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
1963 drv->heads, drv->sectors, drv->cylinders);
1967 cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
1968 unsigned int *block_size)
1970 ReadCapdata_struct *buf;
1972 buf = kmalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
1974 printk(KERN_WARNING "cciss: out of memory\n");
1977 memset(buf, 0, sizeof(ReadCapdata_struct));
1979 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1980 ctlr, buf, sizeof(ReadCapdata_struct),
1981 1, logvol, 0, TYPE_CMD);
1983 return_code = sendcmd(CCISS_READ_CAPACITY,
1984 ctlr, buf, sizeof(ReadCapdata_struct),
1985 1, logvol, 0, NULL, TYPE_CMD);
1986 if (return_code == IO_OK) {
1987 *total_size = be32_to_cpu(*(__u32 *) buf->total_size)+1;
1988 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
1989 } else { /* read capacity command failed */
1990 printk(KERN_WARNING "cciss: read capacity failed\n");
1992 *block_size = BLOCK_SIZE;
1994 if (*total_size != (__u32) 0)
1995 printk(KERN_INFO " blocks= %llu block_size= %d\n",
1996 (unsigned long long)*total_size, *block_size);
2002 cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size)
2004 ReadCapdata_struct_16 *buf;
2006 buf = kmalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
2008 printk(KERN_WARNING "cciss: out of memory\n");
2011 memset(buf, 0, sizeof(ReadCapdata_struct_16));
2013 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
2014 ctlr, buf, sizeof(ReadCapdata_struct_16),
2015 1, logvol, 0, TYPE_CMD);
2018 return_code = sendcmd(CCISS_READ_CAPACITY_16,
2019 ctlr, buf, sizeof(ReadCapdata_struct_16),
2020 1, logvol, 0, NULL, TYPE_CMD);
2022 if (return_code == IO_OK) {
2023 *total_size = be64_to_cpu(*(__u64 *) buf->total_size)+1;
2024 *block_size = be32_to_cpu(*(__u32 *) buf->block_size);
2025 } else { /* read capacity command failed */
2026 printk(KERN_WARNING "cciss: read capacity failed\n");
2028 *block_size = BLOCK_SIZE;
2030 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2031 (unsigned long long)*total_size, *block_size);
2036 static int cciss_revalidate(struct gendisk *disk)
2038 ctlr_info_t *h = get_host(disk);
2039 drive_info_struct *drv = get_drv(disk);
2042 unsigned int block_size;
2043 sector_t total_size;
2044 InquiryData_struct *inq_buff = NULL;
2046 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2047 if (h->drv[logvol].LunID == drv->LunID) {
2056 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
2057 if (inq_buff == NULL) {
2058 printk(KERN_WARNING "cciss: out of memory\n");
2061 if (h->cciss_read == CCISS_READ_10) {
2062 cciss_read_capacity(h->ctlr, logvol, 1,
2063 &total_size, &block_size);
2065 cciss_read_capacity_16(h->ctlr, logvol, 1,
2066 &total_size, &block_size);
2068 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
2071 blk_queue_hardsect_size(drv->queue, drv->block_size);
2072 set_capacity(disk, drv->nr_blocks);
2079 * Wait polling for a command to complete.
2080 * The memory mapped FIFO is polled for the completion.
2081 * Used only at init time, interrupts from the HBA are disabled.
2083 static unsigned long pollcomplete(int ctlr)
2088 /* Wait (up to 20 seconds) for a command to complete */
2090 for (i = 20 * HZ; i > 0; i--) {
2091 done = hba[ctlr]->access.command_completed(hba[ctlr]);
2092 if (done == FIFO_EMPTY)
2093 schedule_timeout_uninterruptible(1);
2097 /* Invalid address to tell caller we ran out of time */
2101 static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
2103 /* We get in here if sendcmd() is polling for completions
2104 and gets some command back that it wasn't expecting --
2105 something other than that which it just sent down.
2106 Ordinarily, that shouldn't happen, but it can happen when
2107 the scsi tape stuff gets into error handling mode, and
2108 starts using sendcmd() to try to abort commands and
2109 reset tape drives. In that case, sendcmd may pick up
2110 completions of commands that were sent to logical drives
2111 through the block i/o system, or cciss ioctls completing, etc.
2112 In that case, we need to save those completions for later
2113 processing by the interrupt handler.
2116 #ifdef CONFIG_CISS_SCSI_TAPE
2117 struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects;
2119 /* If it's not the scsi tape stuff doing error handling, (abort */
2120 /* or reset) then we don't expect anything weird. */
2121 if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) {
2123 printk(KERN_WARNING "cciss cciss%d: SendCmd "
2124 "Invalid command list address returned! (%lx)\n",
2126 /* not much we can do. */
2127 #ifdef CONFIG_CISS_SCSI_TAPE
2131 /* We've sent down an abort or reset, but something else
2133 if (srl->ncompletions >= (NR_CMDS + 2)) {
2134 /* Uh oh. No room to save it for later... */
2135 printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
2136 "reject list overflow, command lost!\n", ctlr);
2139 /* Save it for later */
2140 srl->complete[srl->ncompletions] = complete;
2141 srl->ncompletions++;
2147 * Send a command to the controller, and wait for it to complete.
2148 * Only used at init time.
2150 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller,
2151 1: address logical volume log_unit,
2152 2: periph device address is scsi3addr */
2153 unsigned int log_unit,
2154 __u8 page_code, unsigned char *scsi3addr, int cmd_type)
2156 CommandList_struct *c;
2158 unsigned long complete;
2159 ctlr_info_t *info_p = hba[ctlr];
2160 u64bit buff_dma_handle;
2161 int status, done = 0;
2163 if ((c = cmd_alloc(info_p, 1)) == NULL) {
2164 printk(KERN_WARNING "cciss: unable to get memory");
2167 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
2168 log_unit, page_code, scsi3addr, cmd_type);
2169 if (status != IO_OK) {
2170 cmd_free(info_p, c, 1);
2178 printk(KERN_DEBUG "cciss: turning intr off\n");
2179 #endif /* CCISS_DEBUG */
2180 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
2182 /* Make sure there is room in the command FIFO */
2183 /* Actually it should be completely empty at this time */
2184 /* unless we are in here doing error handling for the scsi */
2185 /* tape side of the driver. */
2186 for (i = 200000; i > 0; i--) {
2187 /* if fifo isn't full go */
2188 if (!(info_p->access.fifo_full(info_p))) {
2193 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
2194 " waiting!\n", ctlr);
2199 info_p->access.submit_command(info_p, c);
2202 complete = pollcomplete(ctlr);
2205 printk(KERN_DEBUG "cciss: command completed\n");
2206 #endif /* CCISS_DEBUG */
2208 if (complete == 1) {
2210 "cciss cciss%d: SendCmd Timeout out, "
2211 "No command list address returned!\n", ctlr);
2217 /* This will need to change for direct lookup completions */
2218 if ((complete & CISS_ERROR_BIT)
2219 && (complete & ~CISS_ERROR_BIT) == c->busaddr) {
2220 /* if data overrun or underun on Report command
2223 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
2224 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
2225 (c->Request.CDB[0] == CISS_INQUIRY)) &&
2226 ((c->err_info->CommandStatus ==
2227 CMD_DATA_OVERRUN) ||
2228 (c->err_info->CommandStatus == CMD_DATA_UNDERRUN)
2230 complete = c->busaddr;
2232 if (c->err_info->CommandStatus ==
2233 CMD_UNSOLICITED_ABORT) {
2234 printk(KERN_WARNING "cciss%d: "
2235 "unsolicited abort %p\n",
2237 if (c->retry_count < MAX_CMD_RETRIES) {
2239 "cciss%d: retrying %p\n",
2242 /* erase the old error */
2244 memset(c->err_info, 0,
2246 (ErrorInfo_struct));
2250 "cciss%d: retried %p too "
2251 "many times\n", ctlr, c);
2255 } else if (c->err_info->CommandStatus ==
2258 "cciss%d: command could not be aborted.\n",
2263 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2264 " Error %x \n", ctlr,
2265 c->err_info->CommandStatus);
2266 printk(KERN_WARNING "ciss ciss%d: sendcmd"
2268 " size %x\n num %x value %x\n",
2270 c->err_info->MoreErrInfo.Invalid_Cmd.
2272 c->err_info->MoreErrInfo.Invalid_Cmd.
2274 c->err_info->MoreErrInfo.Invalid_Cmd.
2280 /* This will need changing for direct lookup completions */
2281 if (complete != c->busaddr) {
2282 if (add_sendcmd_reject(cmd, ctlr, complete) != 0) {
2283 BUG(); /* we are pretty much hosed if we get here. */
2291 /* unlock the data buffer from DMA */
2292 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2293 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
2294 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
2295 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
2296 #ifdef CONFIG_CISS_SCSI_TAPE
2297 /* if we saved some commands for later, process them now. */
2298 if (info_p->scsi_rejects.ncompletions > 0)
2299 do_cciss_intr(0, info_p);
2301 cmd_free(info_p, c, 1);
2306 * Map (physical) PCI mem into (virtual) kernel space
2308 static void __iomem *remap_pci_mem(ulong base, ulong size)
2310 ulong page_base = ((ulong) base) & PAGE_MASK;
2311 ulong page_offs = ((ulong) base) - page_base;
2312 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2314 return page_remapped ? (page_remapped + page_offs) : NULL;
2318 * Takes jobs of the Q and sends them to the hardware, then puts it on
2319 * the Q to wait for completion.
2321 static void start_io(ctlr_info_t *h)
2323 CommandList_struct *c;
2325 while ((c = h->reqQ) != NULL) {
2326 /* can't do anything if fifo is full */
2327 if ((h->access.fifo_full(h))) {
2328 printk(KERN_WARNING "cciss: fifo full\n");
2332 /* Get the first entry from the Request Q */
2333 removeQ(&(h->reqQ), c);
2336 /* Tell the controller execute command */
2337 h->access.submit_command(h, c);
2339 /* Put job onto the completed Q */
2340 addQ(&(h->cmpQ), c);
2344 /* Assumes that CCISS_LOCK(h->ctlr) is held. */
2345 /* Zeros out the error record and then resends the command back */
2346 /* to the controller */
2347 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
2349 /* erase the old error information */
2350 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
2352 /* add it to software queue and then send it to the controller */
2353 addQ(&(h->reqQ), c);
2355 if (h->Qdepth > h->maxQsinceinit)
2356 h->maxQsinceinit = h->Qdepth;
2361 /* checks the status of the job and calls complete buffers to mark all
2362 * buffers for the completed job. Note that this function does not need
2363 * to hold the hba/queue lock.
2365 static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2374 if (cmd->err_info->CommandStatus != 0) { /* an error has occurred */
2375 switch (cmd->err_info->CommandStatus) {
2376 unsigned char sense_key;
2377 case CMD_TARGET_STATUS:
2380 if (cmd->err_info->ScsiStatus == 0x02) {
2381 printk(KERN_WARNING "cciss: cmd %p "
2382 "has CHECK CONDITION "
2383 " byte 2 = 0x%x\n", cmd,
2384 cmd->err_info->SenseInfo[2]
2386 /* check the sense key */
2387 sense_key = 0xf & cmd->err_info->SenseInfo[2];
2388 /* no status or recovered error */
2389 if ((sense_key == 0x0) || (sense_key == 0x1)) {
2393 printk(KERN_WARNING "cciss: cmd %p "
2394 "has SCSI Status 0x%x\n",
2395 cmd, cmd->err_info->ScsiStatus);
2398 case CMD_DATA_UNDERRUN:
2399 printk(KERN_WARNING "cciss: cmd %p has"
2400 " completed with data underrun "
2403 case CMD_DATA_OVERRUN:
2404 printk(KERN_WARNING "cciss: cmd %p has"
2405 " completed with data overrun "
2409 printk(KERN_WARNING "cciss: cmd %p is "
2410 "reported invalid\n", cmd);
2413 case CMD_PROTOCOL_ERR:
2414 printk(KERN_WARNING "cciss: cmd %p has "
2415 "protocol error \n", cmd);
2418 case CMD_HARDWARE_ERR:
2419 printk(KERN_WARNING "cciss: cmd %p had "
2420 " hardware error\n", cmd);
2423 case CMD_CONNECTION_LOST:
2424 printk(KERN_WARNING "cciss: cmd %p had "
2425 "connection lost\n", cmd);
2429 printk(KERN_WARNING "cciss: cmd %p was "
2433 case CMD_ABORT_FAILED:
2434 printk(KERN_WARNING "cciss: cmd %p reports "
2435 "abort failed\n", cmd);
2438 case CMD_UNSOLICITED_ABORT:
2439 printk(KERN_WARNING "cciss%d: unsolicited "
2440 "abort %p\n", h->ctlr, cmd);
2441 if (cmd->retry_count < MAX_CMD_RETRIES) {
2444 "cciss%d: retrying %p\n", h->ctlr, cmd);
2448 "cciss%d: %p retried too "
2449 "many times\n", h->ctlr, cmd);
2453 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
2457 printk(KERN_WARNING "cciss: cmd %p returned "
2458 "unknown status %x\n", cmd,
2459 cmd->err_info->CommandStatus);
2463 /* We need to return this command */
2465 resend_cciss_cmd(h, cmd);
2469 cmd->rq->completion_data = cmd;
2470 cmd->rq->errors = status;
2471 blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
2472 blk_complete_request(cmd->rq);
2476 * Get a request and submit it to the controller.
2478 static void do_cciss_request(request_queue_t *q)
2480 ctlr_info_t *h = q->queuedata;
2481 CommandList_struct *c;
2484 struct request *creq;
2486 struct scatterlist tmp_sg[MAXSGENTRIES];
2487 drive_info_struct *drv;
2490 /* We call start_io here in case there is a command waiting on the
2491 * queue that has not been sent.
2493 if (blk_queue_plugged(q))
2497 creq = elv_next_request(q);
2501 BUG_ON(creq->nr_phys_segments > MAXSGENTRIES);
2503 if ((c = cmd_alloc(h, 1)) == NULL)
2506 blkdev_dequeue_request(creq);
2508 spin_unlock_irq(q->queue_lock);
2510 c->cmd_type = CMD_RWREQ;
2513 /* fill in the request */
2514 drv = creq->rq_disk->private_data;
2515 c->Header.ReplyQueue = 0; // unused in simple mode
2516 /* got command from pool, so use the command block index instead */
2517 /* for direct lookups. */
2518 /* The first 2 bits are reserved for controller error reporting. */
2519 c->Header.Tag.lower = (c->cmdindex << 3);
2520 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2521 c->Header.LUN.LogDev.VolId = drv->LunID;
2522 c->Header.LUN.LogDev.Mode = 1;
2523 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2524 c->Request.Type.Type = TYPE_CMD; // It is a command.
2525 c->Request.Type.Attribute = ATTR_SIMPLE;
2526 c->Request.Type.Direction =
2527 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2528 c->Request.Timeout = 0; // Don't time out
2530 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
2531 start_blk = creq->sector;
2533 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
2534 (int)creq->nr_sectors);
2535 #endif /* CCISS_DEBUG */
2537 seg = blk_rq_map_sg(q, creq, tmp_sg);
2539 /* get the DMA records for the setup */
2540 if (c->Request.Type.Direction == XFER_READ)
2541 dir = PCI_DMA_FROMDEVICE;
2543 dir = PCI_DMA_TODEVICE;
2545 for (i = 0; i < seg; i++) {
2546 c->SG[i].Len = tmp_sg[i].length;
2547 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2549 tmp_sg[i].length, dir);
2550 c->SG[i].Addr.lower = temp64.val32.lower;
2551 c->SG[i].Addr.upper = temp64.val32.upper;
2552 c->SG[i].Ext = 0; // we are not chaining
2554 /* track how many SG entries we are using */
2559 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n",
2560 creq->nr_sectors, seg);
2561 #endif /* CCISS_DEBUG */
2563 c->Header.SGList = c->Header.SGTotal = seg;
2564 if(h->cciss_read == CCISS_READ_10) {
2565 c->Request.CDB[1] = 0;
2566 c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB
2567 c->Request.CDB[3] = (start_blk >> 16) & 0xff;
2568 c->Request.CDB[4] = (start_blk >> 8) & 0xff;
2569 c->Request.CDB[5] = start_blk & 0xff;
2570 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB
2571 c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff;
2572 c->Request.CDB[8] = creq->nr_sectors & 0xff;
2573 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2575 c->Request.CDBLen = 16;
2576 c->Request.CDB[1]= 0;
2577 c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
2578 c->Request.CDB[3]= (start_blk >> 48) & 0xff;
2579 c->Request.CDB[4]= (start_blk >> 40) & 0xff;
2580 c->Request.CDB[5]= (start_blk >> 32) & 0xff;
2581 c->Request.CDB[6]= (start_blk >> 24) & 0xff;
2582 c->Request.CDB[7]= (start_blk >> 16) & 0xff;
2583 c->Request.CDB[8]= (start_blk >> 8) & 0xff;
2584 c->Request.CDB[9]= start_blk & 0xff;
2585 c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff;
2586 c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff;
2587 c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff;
2588 c->Request.CDB[13]= creq->nr_sectors & 0xff;
2589 c->Request.CDB[14] = c->Request.CDB[15] = 0;
2592 spin_lock_irq(q->queue_lock);
2594 addQ(&(h->reqQ), c);
2596 if (h->Qdepth > h->maxQsinceinit)
2597 h->maxQsinceinit = h->Qdepth;
2603 /* We will already have the driver lock here so not need
2609 static inline unsigned long get_next_completion(ctlr_info_t *h)
2611 #ifdef CONFIG_CISS_SCSI_TAPE
2612 /* Any rejects from sendcmd() lying around? Process them first */
2613 if (h->scsi_rejects.ncompletions == 0)
2614 return h->access.command_completed(h);
2616 struct sendcmd_reject_list *srl;
2618 srl = &h->scsi_rejects;
2619 n = --srl->ncompletions;
2620 /* printk("cciss%d: processing saved reject\n", h->ctlr); */
2622 return srl->complete[n];
2625 return h->access.command_completed(h);
2629 static inline int interrupt_pending(ctlr_info_t *h)
2631 #ifdef CONFIG_CISS_SCSI_TAPE
2632 return (h->access.intr_pending(h)
2633 || (h->scsi_rejects.ncompletions > 0));
2635 return h->access.intr_pending(h);
2639 static inline long interrupt_not_for_us(ctlr_info_t *h)
2641 #ifdef CONFIG_CISS_SCSI_TAPE
2642 return (((h->access.intr_pending(h) == 0) ||
2643 (h->interrupts_enabled == 0))
2644 && (h->scsi_rejects.ncompletions == 0));
2646 return (((h->access.intr_pending(h) == 0) ||
2647 (h->interrupts_enabled == 0)));
2651 static irqreturn_t do_cciss_intr(int irq, void *dev_id)
2653 ctlr_info_t *h = dev_id;
2654 CommandList_struct *c;
2655 unsigned long flags;
2658 if (interrupt_not_for_us(h))
2661 * If there are completed commands in the completion queue,
2662 * we had better do something about it.
2664 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2665 while (interrupt_pending(h)) {
2666 while ((a = get_next_completion(h)) != FIFO_EMPTY) {
2670 if (a2 >= NR_CMDS) {
2672 "cciss: controller cciss%d failed, stopping.\n",
2674 fail_all_cmds(h->ctlr);
2678 c = h->cmd_pool + a2;
2683 if ((c = h->cmpQ) == NULL) {
2685 "cciss: Completion of %08x ignored\n",
2689 while (c->busaddr != a) {
2696 * If we've found the command, take it off the
2697 * completion Q and free it
2699 if (c->busaddr == a) {
2700 removeQ(&h->cmpQ, c);
2701 if (c->cmd_type == CMD_RWREQ) {
2702 complete_command(h, c, 0);
2703 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2704 complete(c->waiting);
2706 # ifdef CONFIG_CISS_SCSI_TAPE
2707 else if (c->cmd_type == CMD_SCSI)
2708 complete_scsi_command(c, 0, a1);
2715 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2720 * We cannot read the structure directly, for portability we must use
2722 * This is for debug only.
2725 static void print_cfg_table(CfgTable_struct *tb)
2730 printk("Controller Configuration information\n");
2731 printk("------------------------------------\n");
2732 for (i = 0; i < 4; i++)
2733 temp_name[i] = readb(&(tb->Signature[i]));
2734 temp_name[4] = '\0';
2735 printk(" Signature = %s\n", temp_name);
2736 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2737 printk(" Transport methods supported = 0x%x\n",
2738 readl(&(tb->TransportSupport)));
2739 printk(" Transport methods active = 0x%x\n",
2740 readl(&(tb->TransportActive)));
2741 printk(" Requested transport Method = 0x%x\n",
2742 readl(&(tb->HostWrite.TransportRequest)));
2743 printk(" Coalesce Interrupt Delay = 0x%x\n",
2744 readl(&(tb->HostWrite.CoalIntDelay)));
2745 printk(" Coalesce Interrupt Count = 0x%x\n",
2746 readl(&(tb->HostWrite.CoalIntCount)));
2747 printk(" Max outstanding commands = 0x%d\n",
2748 readl(&(tb->CmdsOutMax)));
2749 printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
2750 for (i = 0; i < 16; i++)
2751 temp_name[i] = readb(&(tb->ServerName[i]));
2752 temp_name[16] = '\0';
2753 printk(" Server Name = %s\n", temp_name);
2754 printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
2756 #endif /* CCISS_DEBUG */
2758 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
2760 int i, offset, mem_type, bar_type;
2761 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2764 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
2765 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
2766 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2769 mem_type = pci_resource_flags(pdev, i) &
2770 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2772 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2773 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2774 offset += 4; /* 32 bit */
2776 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2779 default: /* reserved in PCI 2.2 */
2781 "Base address is invalid\n");
2786 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2792 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
2793 * controllers that are capable. If not, we use IO-APIC mode.
2796 static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
2797 struct pci_dev *pdev, __u32 board_id)
2799 #ifdef CONFIG_PCI_MSI
2801 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
2805 /* Some boards advertise MSI but don't really support it */
2806 if ((board_id == 0x40700E11) ||
2807 (board_id == 0x40800E11) ||
2808 (board_id == 0x40820E11) || (board_id == 0x40830E11))
2809 goto default_int_mode;
2811 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
2812 err = pci_enable_msix(pdev, cciss_msix_entries, 4);
2814 c->intr[0] = cciss_msix_entries[0].vector;
2815 c->intr[1] = cciss_msix_entries[1].vector;
2816 c->intr[2] = cciss_msix_entries[2].vector;
2817 c->intr[3] = cciss_msix_entries[3].vector;
2822 printk(KERN_WARNING "cciss: only %d MSI-X vectors "
2823 "available\n", err);
2825 printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
2829 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
2830 if (!pci_enable_msi(pdev)) {
2831 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2835 printk(KERN_WARNING "cciss: MSI init failed\n");
2836 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2841 #endif /* CONFIG_PCI_MSI */
2842 /* if we get here we're going to use the default interrupt mode */
2843 c->intr[SIMPLE_MODE_INT] = pdev->irq;
2847 static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2849 ushort subsystem_vendor_id, subsystem_device_id, command;
2850 __u32 board_id, scratchpad = 0;
2852 __u32 cfg_base_addr;
2853 __u64 cfg_base_addr_index;
2856 /* check to see if controller has been disabled */
2857 /* BEFORE trying to enable it */
2858 (void)pci_read_config_word(pdev, PCI_COMMAND, &command);
2859 if (!(command & 0x02)) {
2861 "cciss: controller appears to be disabled\n");
2865 err = pci_enable_device(pdev);
2867 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2871 err = pci_request_regions(pdev, "cciss");
2873 printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
2875 goto err_out_disable_pdev;
2878 subsystem_vendor_id = pdev->subsystem_vendor;
2879 subsystem_device_id = pdev->subsystem_device;
2880 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2881 subsystem_vendor_id);
2884 printk("command = %x\n", command);
2885 printk("irq = %x\n", pdev->irq);
2886 printk("board_id = %x\n", board_id);
2887 #endif /* CCISS_DEBUG */
2889 /* If the kernel supports MSI/MSI-X we will try to enable that functionality,
2890 * else we use the IO-APIC interrupt assigned to us by system ROM.
2892 cciss_interrupt_mode(c, pdev, board_id);
2895 * Memory base addr is first addr , the second points to the config
2899 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2901 printk("address 0 = %x\n", c->paddr);
2902 #endif /* CCISS_DEBUG */
2903 c->vaddr = remap_pci_mem(c->paddr, 200);
2905 /* Wait for the board to become ready. (PCI hotplug needs this.)
2906 * We poll for up to 120 secs, once per 100ms. */
2907 for (i = 0; i < 1200; i++) {
2908 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2909 if (scratchpad == CCISS_FIRMWARE_READY)
2911 set_current_state(TASK_INTERRUPTIBLE);
2912 schedule_timeout(HZ / 10); /* wait 100ms */
2914 if (scratchpad != CCISS_FIRMWARE_READY) {
2915 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2917 goto err_out_free_res;
2920 /* get the address index number */
2921 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2922 cfg_base_addr &= (__u32) 0x0000ffff;
2924 printk("cfg base address = %x\n", cfg_base_addr);
2925 #endif /* CCISS_DEBUG */
2926 cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
2928 printk("cfg base address index = %x\n", cfg_base_addr_index);
2929 #endif /* CCISS_DEBUG */
2930 if (cfg_base_addr_index == -1) {
2931 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2933 goto err_out_free_res;
2936 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2938 printk("cfg offset = %x\n", cfg_offset);
2939 #endif /* CCISS_DEBUG */
2940 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2941 cfg_base_addr_index) +
2942 cfg_offset, sizeof(CfgTable_struct));
2943 c->board_id = board_id;
2946 print_cfg_table(c->cfgtable);
2947 #endif /* CCISS_DEBUG */
2949 for (i = 0; i < ARRAY_SIZE(products); i++) {
2950 if (board_id == products[i].board_id) {
2951 c->product_name = products[i].product_name;
2952 c->access = *(products[i].access);
2956 if (i == ARRAY_SIZE(products)) {
2957 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2958 " to access the Smart Array controller %08lx\n",
2959 (unsigned long)board_id);
2961 goto err_out_free_res;
2963 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
2964 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2965 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2966 (readb(&c->cfgtable->Signature[3]) != 'S')) {
2967 printk("Does not appear to be a valid CISS config table\n");
2969 goto err_out_free_res;
2973 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2975 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2977 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2982 printk("Trying to put board into Simple mode\n");
2983 #endif /* CCISS_DEBUG */
2984 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2985 /* Update the field, and then ring the doorbell */
2986 writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
2987 writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2989 /* under certain very rare conditions, this can take awhile.
2990 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2991 * as we enter this code.) */
2992 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
2993 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2995 /* delay and try again */
2996 set_current_state(TASK_INTERRUPTIBLE);
2997 schedule_timeout(10);
3001 printk(KERN_DEBUG "I counter got to %d %x\n", i,
3002 readl(c->vaddr + SA5_DOORBELL));
3003 #endif /* CCISS_DEBUG */
3005 print_cfg_table(c->cfgtable);
3006 #endif /* CCISS_DEBUG */
3008 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3009 printk(KERN_WARNING "cciss: unable to get board into"
3012 goto err_out_free_res;
3017 pci_release_regions(pdev);
3019 err_out_disable_pdev:
3020 pci_disable_device(pdev);
3025 * Gets information about the local volumes attached to the controller.
3027 static void cciss_getgeometry(int cntl_num)
3029 ReportLunData_struct *ld_buff;
3030 InquiryData_struct *inq_buff;
3036 sector_t total_size;
3038 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
3039 if (ld_buff == NULL) {
3040 printk(KERN_ERR "cciss: out of memory\n");
3043 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
3044 if (inq_buff == NULL) {
3045 printk(KERN_ERR "cciss: out of memory\n");
3049 /* Get the firmware version */
3050 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
3051 sizeof(InquiryData_struct), 0, 0, 0, NULL,
3053 if (return_code == IO_OK) {
3054 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
3055 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
3056 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
3057 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
3058 } else { /* send command failed */
3060 printk(KERN_WARNING "cciss: unable to determine firmware"
3061 " version of controller\n");
3063 /* Get the number of logical volumes */
3064 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
3065 sizeof(ReportLunData_struct), 0, 0, 0, NULL,
3068 if (return_code == IO_OK) {
3070 printk("LUN Data\n--------------------------\n");
3071 #endif /* CCISS_DEBUG */
3074 (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
3076 (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
3078 (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
3079 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
3080 } else { /* reading number of logical volumes failed */
3082 printk(KERN_WARNING "cciss: report logical volume"
3083 " command failed\n");
3086 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
3087 if (hba[cntl_num]->num_luns > CISS_MAX_LUN) {
3089 "ciss: only %d number of logical volumes supported\n",
3091 hba[cntl_num]->num_luns = CISS_MAX_LUN;
3094 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n",
3095 ld_buff->LUNListLength[0], ld_buff->LUNListLength[1],
3096 ld_buff->LUNListLength[2], ld_buff->LUNListLength[3],
3097 hba[cntl_num]->num_luns);
3098 #endif /* CCISS_DEBUG */
3100 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns - 1;
3101 for (i = 0; i < CISS_MAX_LUN; i++) {
3102 if (i < hba[cntl_num]->num_luns) {
3103 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
3105 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
3107 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
3109 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
3111 hba[cntl_num]->drv[i].LunID = lunid;
3114 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
3115 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
3116 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
3117 hba[cntl_num]->drv[i].LunID);
3118 #endif /* CCISS_DEBUG */
3120 /* testing to see if 16-byte CDBs are already being used */
3121 if(hba[cntl_num]->cciss_read == CCISS_READ_16) {
3122 cciss_read_capacity_16(cntl_num, i, 0,
3123 &total_size, &block_size);
3126 cciss_read_capacity(cntl_num, i, 0, &total_size, &block_size);
3128 /* total_size = last LBA + 1 */
3129 if(total_size == (__u32) 0) {
3130 cciss_read_capacity_16(cntl_num, i, 0,
3131 &total_size, &block_size);
3132 hba[cntl_num]->cciss_read = CCISS_READ_16;
3133 hba[cntl_num]->cciss_write = CCISS_WRITE_16;
3135 hba[cntl_num]->cciss_read = CCISS_READ_10;
3136 hba[cntl_num]->cciss_write = CCISS_WRITE_10;
3139 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
3140 block_size, inq_buff,
3141 &hba[cntl_num]->drv[i]);
3143 /* initialize raid_level to indicate a free space */
3144 hba[cntl_num]->drv[i].raid_level = -1;
3151 /* Function to find the first free pointer into our hba[] array */
3152 /* Returns -1 if no free entries are left. */
3153 static int alloc_cciss_hba(void)
3155 struct gendisk *disk[NWD];
3157 for (n = 0; n < NWD; n++) {
3158 disk[n] = alloc_disk(1 << NWD_SHIFT);
3163 for (i = 0; i < MAX_CTLR; i++) {
3166 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
3169 for (n = 0; n < NWD; n++)
3170 p->gendisk[n] = disk[n];
3175 printk(KERN_WARNING "cciss: This driver supports a maximum"
3176 " of %d controllers.\n", MAX_CTLR);
3179 printk(KERN_ERR "cciss: out of memory.\n");
3186 static void free_hba(int i)
3188 ctlr_info_t *p = hba[i];
3192 for (n = 0; n < NWD; n++)
3193 put_disk(p->gendisk[n]);
3198 * This is it. Find all the controllers and register them. I really hate
3199 * stealing all these major device numbers.
3200 * returns the number of block devices registered.
3202 static int __devinit cciss_init_one(struct pci_dev *pdev,
3203 const struct pci_device_id *ent)
3211 i = alloc_cciss_hba();
3215 hba[i]->busy_initializing = 1;
3217 if (cciss_pci_init(hba[i], pdev) != 0)
3220 sprintf(hba[i]->devname, "cciss%d", i);
3222 hba[i]->pdev = pdev;
3224 /* configure PCI DMA stuff */
3225 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
3227 else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
3230 printk(KERN_ERR "cciss: no suitable DMA available\n");
3235 * register with the major number, or get a dynamic major number
3236 * by passing 0 as argument. This is done for greater than
3237 * 8 controller support.
3239 if (i < MAX_CTLR_ORIG)
3240 hba[i]->major = COMPAQ_CISS_MAJOR + i;
3241 rc = register_blkdev(hba[i]->major, hba[i]->devname);
3242 if (rc == -EBUSY || rc == -EINVAL) {
3244 "cciss: Unable to get major number %d for %s "
3245 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
3248 if (i >= MAX_CTLR_ORIG)
3252 /* make sure the board interrupts are off */
3253 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
3254 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
3255 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
3256 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
3257 hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
3261 printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
3262 hba[i]->devname, pdev->device, pci_name(pdev),
3263 hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
3265 hba[i]->cmd_pool_bits =
3266 kmalloc(((NR_CMDS + BITS_PER_LONG -
3267 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3268 hba[i]->cmd_pool = (CommandList_struct *)
3269 pci_alloc_consistent(hba[i]->pdev,
3270 NR_CMDS * sizeof(CommandList_struct),
3271 &(hba[i]->cmd_pool_dhandle));
3272 hba[i]->errinfo_pool = (ErrorInfo_struct *)
3273 pci_alloc_consistent(hba[i]->pdev,
3274 NR_CMDS * sizeof(ErrorInfo_struct),
3275 &(hba[i]->errinfo_pool_dhandle));
3276 if ((hba[i]->cmd_pool_bits == NULL)
3277 || (hba[i]->cmd_pool == NULL)
3278 || (hba[i]->errinfo_pool == NULL)) {
3279 printk(KERN_ERR "cciss: out of memory");
3282 #ifdef CONFIG_CISS_SCSI_TAPE
3283 hba[i]->scsi_rejects.complete =
3284 kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
3285 (NR_CMDS + 5), GFP_KERNEL);
3286 if (hba[i]->scsi_rejects.complete == NULL) {
3287 printk(KERN_ERR "cciss: out of memory");
3291 spin_lock_init(&hba[i]->lock);
3293 /* Initialize the pdev driver private data.
3294 have it point to hba[i]. */
3295 pci_set_drvdata(pdev, hba[i]);
3296 /* command and error info recs zeroed out before
3298 memset(hba[i]->cmd_pool_bits, 0,
3299 ((NR_CMDS + BITS_PER_LONG -
3300 1) / BITS_PER_LONG) * sizeof(unsigned long));
3303 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n", i);
3304 #endif /* CCISS_DEBUG */
3306 cciss_getgeometry(i);
3308 cciss_scsi_setup(i);
3310 /* Turn the interrupts on so we can service requests */
3311 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
3314 hba[i]->busy_initializing = 0;
3316 for (j = 0; j < NWD; j++) { /* mfm */
3317 drive_info_struct *drv = &(hba[i]->drv[j]);
3318 struct gendisk *disk = hba[i]->gendisk[j];
3320 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
3323 "cciss: unable to allocate queue for disk %d\n",
3329 q->backing_dev_info.ra_pages = READ_AHEAD;
3330 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3332 /* This is a hardware imposed limit. */
3333 blk_queue_max_hw_segments(q, MAXSGENTRIES);
3335 /* This is a limit in the driver and could be eliminated. */
3336 blk_queue_max_phys_segments(q, MAXSGENTRIES);
3338 blk_queue_max_sectors(q, 512);
3340 blk_queue_softirq_done(q, cciss_softirq_done);
3342 q->queuedata = hba[i];
3343 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
3344 disk->major = hba[i]->major;
3345 disk->first_minor = j << NWD_SHIFT;
3346 disk->fops = &cciss_fops;
3348 disk->private_data = drv;
3349 disk->driverfs_dev = &pdev->dev;
3350 /* we must register the controller even if no disks exist */
3351 /* this is for the online array utilities */
3352 if (!drv->heads && j)
3354 blk_queue_hardsect_size(q, drv->block_size);
3355 set_capacity(disk, drv->nr_blocks);
3362 #ifdef CONFIG_CISS_SCSI_TAPE
3363 kfree(hba[i]->scsi_rejects.complete);
3365 kfree(hba[i]->cmd_pool_bits);
3366 if (hba[i]->cmd_pool)
3367 pci_free_consistent(hba[i]->pdev,
3368 NR_CMDS * sizeof(CommandList_struct),
3369 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3370 if (hba[i]->errinfo_pool)
3371 pci_free_consistent(hba[i]->pdev,
3372 NR_CMDS * sizeof(ErrorInfo_struct),
3373 hba[i]->errinfo_pool,
3374 hba[i]->errinfo_pool_dhandle);
3375 free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
3377 unregister_blkdev(hba[i]->major, hba[i]->devname);
3379 hba[i]->busy_initializing = 0;
3384 static void __devexit cciss_remove_one(struct pci_dev *pdev)
3386 ctlr_info_t *tmp_ptr;
3391 if (pci_get_drvdata(pdev) == NULL) {
3392 printk(KERN_ERR "cciss: Unable to remove device \n");
3395 tmp_ptr = pci_get_drvdata(pdev);
3397 if (hba[i] == NULL) {
3398 printk(KERN_ERR "cciss: device appears to "
3399 "already be removed \n");
3402 /* Turn board interrupts off and send the flush cache command */
3403 /* sendcmd will turn off interrupt, and send the flush...
3404 * To write all data in the battery backed cache to disks */
3405 memset(flush_buf, 0, 4);
3406 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
3408 if (return_code != IO_OK) {
3409 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
3412 free_irq(hba[i]->intr[2], hba[i]);
3414 #ifdef CONFIG_PCI_MSI
3415 if (hba[i]->msix_vector)
3416 pci_disable_msix(hba[i]->pdev);
3417 else if (hba[i]->msi_vector)
3418 pci_disable_msi(hba[i]->pdev);
3419 #endif /* CONFIG_PCI_MSI */
3421 iounmap(hba[i]->vaddr);
3422 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
3423 unregister_blkdev(hba[i]->major, hba[i]->devname);
3424 remove_proc_entry(hba[i]->devname, proc_cciss);
3426 /* remove it from the disk list */
3427 for (j = 0; j < NWD; j++) {
3428 struct gendisk *disk = hba[i]->gendisk[j];
3430 request_queue_t *q = disk->queue;
3432 if (disk->flags & GENHD_FL_UP)
3435 blk_cleanup_queue(q);
3439 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
3440 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
3441 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct),
3442 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
3443 kfree(hba[i]->cmd_pool_bits);
3444 #ifdef CONFIG_CISS_SCSI_TAPE
3445 kfree(hba[i]->scsi_rejects.complete);
3447 pci_release_regions(pdev);
3448 pci_disable_device(pdev);
3449 pci_set_drvdata(pdev, NULL);
3453 static struct pci_driver cciss_pci_driver = {
3455 .probe = cciss_init_one,
3456 .remove = __devexit_p(cciss_remove_one),
3457 .id_table = cciss_pci_device_id, /* id_table */
3461 * This is it. Register the PCI driver information for the cards we control
3462 * the OS will call our registered routines when it finds one of our cards.
3464 static int __init cciss_init(void)
3466 printk(KERN_INFO DRIVER_NAME "\n");
3468 /* Register for our PCI devices */
3469 return pci_register_driver(&cciss_pci_driver);
3472 static void __exit cciss_cleanup(void)
3476 pci_unregister_driver(&cciss_pci_driver);
3477 /* double check that all controller entrys have been removed */
3478 for (i = 0; i < MAX_CTLR; i++) {
3479 if (hba[i] != NULL) {
3480 printk(KERN_WARNING "cciss: had to remove"
3481 " controller %d\n", i);
3482 cciss_remove_one(hba[i]->pdev);
3485 remove_proc_entry("cciss", proc_root_driver);
3488 static void fail_all_cmds(unsigned long ctlr)
3490 /* If we get here, the board is apparently dead. */
3491 ctlr_info_t *h = hba[ctlr];
3492 CommandList_struct *c;
3493 unsigned long flags;
3495 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3496 h->alive = 0; /* the controller apparently died... */
3498 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3500 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3502 /* move everything off the request queue onto the completed queue */
3503 while ((c = h->reqQ) != NULL) {
3504 removeQ(&(h->reqQ), c);
3506 addQ(&(h->cmpQ), c);
3509 /* Now, fail everything on the completed queue with a HW error */
3510 while ((c = h->cmpQ) != NULL) {
3511 removeQ(&h->cmpQ, c);
3512 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3513 if (c->cmd_type == CMD_RWREQ) {
3514 complete_command(h, c, 0);
3515 } else if (c->cmd_type == CMD_IOCTL_PEND)
3516 complete(c->waiting);
3517 #ifdef CONFIG_CISS_SCSI_TAPE
3518 else if (c->cmd_type == CMD_SCSI)
3519 complete_scsi_command(c, 0, 0);
3522 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3526 module_init(cciss_init);
3527 module_exit(cciss_cleanup);