/* 6.10.00 - Remove 1G Addressing Limitations */
/* 6.11.xx - Get VersionInfo buffer off the stack ! DDTS 60401 */
/* 6.11.xx - Make Logical Drive Info structure safe for DMA DDTS 60639 */
-/* 7.10.xx - Add highmem_io flag in SCSI Templete for 2.4 kernels */
+/* 7.10.18 - Add highmem_io flag in SCSI Templete for 2.4 kernels */
/* - Fix path/name for scsi_hosts.h include for 2.6 kernels */
/* - Fix sort order of 7k */
/* - Remove 3 unused "inline" functions */
+/* 7.12.xx - Use STATIC functions whereever possible */
+/* - Clean up deprecated MODULE_PARM calls */
/*****************************************************************************/
/*
/*
* DRIVER_VER
*/
-#define IPS_VERSION_HIGH "7.10"
-#define IPS_VERSION_LOW ".18 "
+#define IPS_VERSION_HIGH "7.12"
+#define IPS_VERSION_LOW ".02 "
#if !defined(__i386__) && !defined(__ia64__) && !defined(__x86_64__)
#warning "This driver has only been tested on the x86/ia64/x86_64 platforms"
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
#include <linux/blk.h>
#include "sd.h"
-#define IPS_SG_ADDRESS(sg) ((sg)->address)
#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags)
#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags)
#ifndef __devexit_p
#define __devexit_p(x) x
#endif
#else
-#define IPS_SG_ADDRESS(sg) (page_address((sg)->page) ? \
- page_address((sg)->page)+(sg)->offset : NULL)
#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0)
#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0)
#endif
#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
- SCSI_DATA_NONE == scb->scsi_cmd->sc_data_direction) ? \
+ DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
PCI_DMA_BIDIRECTIONAL : \
- scsi_to_pci_dma_dir(scb->scsi_cmd->sc_data_direction))
+ scb->scsi_cmd->sc_data_direction)
#ifdef IPS_DEBUG
#define METHOD_TRACE(s, i) if (ips_debug >= (i+10)) printk(KERN_NOTICE s "\n");
static int ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr);
static int ips_register_scsi(int index);
+static int ips_poll_for_flush_complete(ips_ha_t * ha);
+static void ips_flush_and_reset(ips_ha_t *ha);
+
/*
* global variables
*/
ips_ha_t *ha;
ips_copp_wait_item_t *item;
int ret;
+ unsigned long cpu_flags;
+ struct Scsi_Host *host;
METHOD_TRACE("ips_eh_abort", 1);
if (!SC)
return (FAILED);
+ host = SC->device->host;
ha = (ips_ha_t *) SC->device->host->hostdata;
if (!ha)
if (!ha->active)
return (FAILED);
+ IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+
/* See if the command is on the copp queue */
item = ha->copp_waitlist.head;
while ((item) && (item->scsi_cmd != SC))
/* command must have already been sent */
ret = (FAILED);
}
+
+ IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
return ret;
}
/* */
/****************************************************************************/
static int
-ips_eh_reset(Scsi_Cmnd * SC)
+__ips_eh_reset(Scsi_Cmnd * SC)
{
int ret;
int i;
}
+static int
+ips_eh_reset(Scsi_Cmnd * SC)
+{
+ int rc;
+
+ spin_lock_irq(SC->device->host->host_lock);
+ rc = __ips_eh_reset(SC);
+ spin_unlock_irq(SC->device->host->host_lock);
+
+ return rc;
+}
+
/****************************************************************************/
/* */
/* Routine Name: ips_queue */
SC->device->channel, SC->device->id, SC->device->lun);
/* Check for command to initiator IDs */
- if ((SC->device->channel > 0)
- && (SC->device->id == ha->ha_id[SC->device->channel])) {
+ if ((scmd_channel(SC) > 0)
+ && (scmd_id(SC) == ha->ha_id[scmd_channel(SC)])) {
SC->result = DID_NO_CONNECT << 16;
done(SC);
static int
ips_is_passthru(Scsi_Cmnd * SC)
{
+ unsigned long flags;
+
METHOD_TRACE("ips_is_passthru", 1);
if (!SC)
return 1;
else if (SC->use_sg) {
struct scatterlist *sg = SC->request_buffer;
- char *buffer = IPS_SG_ADDRESS(sg);
+ char *buffer;
+
+ /* kmap_atomic() ensures addressability of the user buffer.*/
+ /* local_irq_save() protects the KM_IRQ0 address slot. */
+ local_irq_save(flags);
+ buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
- buffer[2] == 'P' && buffer[3] == 'P')
+ buffer[2] == 'P' && buffer[3] == 'P') {
+ kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+ local_irq_restore(flags);
return 1;
+ }
+ kunmap_atomic(buffer - sg->offset, KM_IRQ0);
+ local_irq_restore(flags);
}
}
return 0;
p = ha->scb_waitlist.head;
while ((p) && (scb = ips_getscb(ha))) {
- if ((p->device->channel > 0)
+ if ((scmd_channel(p) > 0)
&& (ha->
- dcdb_active[p->device->channel -
- 1] & (1 << p->device->id))) {
+ dcdb_active[scmd_channel(p) -
+ 1] & (1 << scmd_id(p)))) {
ips_freescb(ha, scb);
p = (Scsi_Cmnd *) p->host_scribble;
continue;
sg = SC->request_buffer;
scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg,
- scsi_to_pci_dma_dir(SC->
- sc_data_direction));
+ SC->sc_data_direction);
scb->flags |= IPS_SCB_MAP_SG;
for (i = 0; i < scb->sg_count; i++) {
if (ips_fill_scb_sg_single
pci_map_single(ha->pcidev,
SC->request_buffer,
SC->request_bufflen,
- scsi_to_pci_dma_dir(SC->
- sc_data_direction));
+ SC->sc_data_direction);
scb->flags |= IPS_SCB_MAP_SINGLE;
ips_fill_scb_sg_single(ha, scb->data_busaddr,
scb, 0,
int i;
unsigned int min_cnt, xfer_cnt;
char *cdata = (char *) data;
+ unsigned char *buffer;
+ unsigned long flags;
struct scatterlist *sg = scmd->request_buffer;
for (i = 0, xfer_cnt = 0;
(i < scmd->use_sg) && (xfer_cnt < count); i++) {
- if (!IPS_SG_ADDRESS(&sg[i]))
- return;
min_cnt = min(count - xfer_cnt, sg[i].length);
- memcpy(IPS_SG_ADDRESS(&sg[i]), &cdata[xfer_cnt],
- min_cnt);
+
+ /* kmap_atomic() ensures addressability of the data buffer.*/
+ /* local_irq_save() protects the KM_IRQ0 address slot. */
+ local_irq_save(flags);
+ buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
+ memcpy(buffer, &cdata[xfer_cnt], min_cnt);
+ kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
+ local_irq_restore(flags);
+
xfer_cnt += min_cnt;
}
int i;
unsigned int min_cnt, xfer_cnt;
char *cdata = (char *) data;
+ unsigned char *buffer;
+ unsigned long flags;
struct scatterlist *sg = scmd->request_buffer;
for (i = 0, xfer_cnt = 0;
(i < scmd->use_sg) && (xfer_cnt < count); i++) {
- if (!IPS_SG_ADDRESS(&sg[i]))
- return;
min_cnt = min(count - xfer_cnt, sg[i].length);
- memcpy(&cdata[xfer_cnt], IPS_SG_ADDRESS(&sg[i]),
- min_cnt);
+
+ /* kmap_atomic() ensures addressability of the data buffer.*/
+ /* local_irq_save() protects the KM_IRQ0 address slot. */
+ local_irq_save(flags);
+ buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
+ memcpy(&cdata[xfer_cnt], buffer, min_cnt);
+ kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
+ local_irq_restore(flags);
+
xfer_cnt += min_cnt;
}
ha->enq = NULL;
}
- if (ha->conf) {
- kfree(ha->conf);
- ha->conf = NULL;
- }
+ kfree(ha->conf);
+ ha->conf = NULL;
if (ha->adapt) {
pci_free_consistent(ha->pcidev,
ha->logical_drive_info = NULL;
}
- if (ha->nvram) {
- kfree(ha->nvram);
- ha->nvram = NULL;
- }
+ kfree(ha->nvram);
+ ha->nvram = NULL;
- if (ha->subsys) {
- kfree(ha->subsys);
- ha->subsys = NULL;
- }
+ kfree(ha->subsys);
+ ha->subsys = NULL;
if (ha->ioctl_data) {
pci_free_consistent(ha->pcidev, ha->ioctl_len,
uint32_t bits;
METHOD_TRACE("ips_is_init_morpheus", 1);
+
+ if (ips_isintr_morpheus(ha))
+ ips_flush_and_reset(ha);
post = readl(ha->mem_ptr + IPS_REG_I960_MSG0);
bits = readl(ha->mem_ptr + IPS_REG_I2O_HIR);
return (1);
}
+/****************************************************************************/
+/* */
+/* Routine Name: ips_flush_and_reset */
+/* */
+/* Routine Description: */
+/* */
+/* Perform cleanup ( FLUSH and RESET ) when the adapter is in an unknown */
+/* state ( was trying to INIT and an interrupt was already pending ) ... */
+/* */
+/****************************************************************************/
+static void
+ips_flush_and_reset(ips_ha_t *ha)
+{
+ ips_scb_t *scb;
+ int ret;
+ int time;
+ int done;
+ dma_addr_t command_dma;
+
+ /* Create a usuable SCB */
+ scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma);
+ if (scb) {
+ memset(scb, 0, sizeof(ips_scb_t));
+ ips_init_scb(ha, scb);
+ scb->scb_busaddr = command_dma;
+
+ scb->timeout = ips_cmd_timeout;
+ scb->cdb[0] = IPS_CMD_FLUSH;
+
+ scb->cmd.flush_cache.op_code = IPS_CMD_FLUSH;
+ scb->cmd.flush_cache.command_id = IPS_MAX_CMDS; /* Use an ID that would otherwise not exist */
+ scb->cmd.flush_cache.state = IPS_NORM_STATE;
+ scb->cmd.flush_cache.reserved = 0;
+ scb->cmd.flush_cache.reserved2 = 0;
+ scb->cmd.flush_cache.reserved3 = 0;
+ scb->cmd.flush_cache.reserved4 = 0;
+
+ ret = ips_send_cmd(ha, scb); /* Send the Flush Command */
+
+ if (ret == IPS_SUCCESS) {
+ time = 60 * IPS_ONE_SEC; /* Max Wait time is 60 seconds */
+ done = 0;
+
+ while ((time > 0) && (!done)) {
+ done = ips_poll_for_flush_complete(ha);
+ /* This may look evil, but it's only done during extremely rare start-up conditions ! */
+ udelay(1000);
+ time--;
+ }
+ }
+ }
+
+ /* Now RESET and INIT the adapter */
+ (*ha->func.reset) (ha);
+
+ pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma);
+ return;
+}
+
+/****************************************************************************/
+/* */
+/* Routine Name: ips_poll_for_flush_complete */
+/* */
+/* Routine Description: */
+/* */
+/* Poll for the Flush Command issued by ips_flush_and_reset() to complete */
+/* All other responses are just taken off the queue and ignored */
+/* */
+/****************************************************************************/
+static int
+ips_poll_for_flush_complete(ips_ha_t * ha)
+{
+ IPS_STATUS cstatus;
+
+ while (TRUE) {
+ cstatus.value = (*ha->func.statupd) (ha);
+
+ if (cstatus.value == 0xffffffff) /* If No Interrupt to process */
+ break;
+
+ /* Success is when we see the Flush Command ID */
+ if (cstatus.fields.command_id == IPS_MAX_CMDS )
+ return 1;
+ }
+
+ return 0;
+}
+
/****************************************************************************/
/* */
/* Routine Name: ips_enable_int_copperhead */