/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2008 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
new_queue_depth =
new_queue_depth);
}
}
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
atomic_set(&phba->num_rsrc_err, 0);
atomic_set(&phba->num_cmd_success, 0);
}
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
- for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
+ for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
shost = lpfc_shost_from_vport(vports[i]);
shost_for_each_device(sdev, shost) {
+ if (vports[i]->cfg_lun_queue_depth <=
+ sdev->queue_depth)
+ continue;
if (sdev->ordered_tags)
scsi_adjust_queue_depth(sdev,
MSG_ORDERED_TAG,
sdev->queue_depth+1);
}
}
- lpfc_destroy_vport_work_array(vports);
+ lpfc_destroy_vport_work_array(phba, vports);
atomic_set(&phba->num_rsrc_err, 0);
atomic_set(&phba->num_cmd_success, 0);
}
(num_bde * sizeof (struct ulp_bde64));
iocb_cmd->ulpBdeCount = 1;
iocb_cmd->ulpLe = 1;
- fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd));
+ fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
return 0;
}
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
cmd->result = ScsiResult(DID_REQUEUE, 0);
- break;
- } /* else: fall through */
+ break;
+ } /* else: fall through */
default:
cmd->result = ScsiResult(DID_ERROR, 0);
break;
}
- if ((pnode == NULL )
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode)
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
} else {
result = cmd->result;
sdev = cmd->device;
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
+ spin_lock_irqsave(sdev->host->host_lock, flags);
+ lpfc_cmd->pCmd = NULL; /* This must be done before scsi_done */
+ spin_unlock_irqrestore(sdev->host->host_lock, flags);
cmd->scsi_done(cmd);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
* wake up the thread.
*/
spin_lock_irqsave(sdev->host->host_lock, flags);
- lpfc_cmd->pCmd = NULL;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock_irqrestore(sdev->host->host_lock, flags);
if (!result)
lpfc_rampup_queue_depth(vport, sdev);
- if (!result && pnode != NULL &&
+ if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
((jiffies - pnode->last_ramp_up_time) >
LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
((jiffies - pnode->last_q_full_time) >
* Check for queue full. If the lun is reporting queue full, then
* back off the lun queue depth to prevent target overloads.
*/
- if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) {
+ if (result == SAM_STAT_TASK_SET_FULL && pnode &&
+ NLP_CHK_NODE_ACT(pnode)) {
pnode->last_q_full_time = jiffies;
shost_for_each_device(tmp_sdev, sdev->host) {
* wake up the thread.
*/
spin_lock_irqsave(sdev->host->host_lock, flags);
- lpfc_cmd->pCmd = NULL;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock_irqrestore(sdev->host->host_lock, flags);
int datadir = scsi_cmnd->sc_data_direction;
char tag[2];
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ return;
+
lpfc_cmd->fcp_rsp->rspSnsLen = 0;
/* clear task management bits */
lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
piocbq->iocb.ulpContext = pnode->nlp_rpi;
if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
piocbq->iocb.ulpFCP2Rcvy = 1;
+ else
+ piocbq->iocb.ulpFCP2Rcvy = 0;
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
piocbq->context1 = lpfc_cmd;
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *ndlp = rdata->pnode;
- if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ ndlp->nlp_state != NLP_STE_MAPPED_NODE)
return 0;
- }
piocbq = &(lpfc_cmd->cur_iocbq);
piocbq->vport = vport;
struct lpfc_iocbq *iocbqrsp;
int ret;
- if (!rdata->pnode)
+ if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
return FAILED;
lpfc_cmd->rdata = rdata;
* Catch race where our node has transitioned, but the
* transport is still transitioning.
*/
- if (!ndlp) {
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
goto out_fail_command;
}
* target is rediscovered or devloss timeout expires.
*/
while (1) {
- if (!pnode)
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
goto out;
if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
goto out;
}
pnode = rdata->pnode;
- if (!pnode)
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
goto out;
}
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
match = 0;
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
i == ndlp->nlp_sid &&
ndlp->rport) {
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
- .sg_tablesize = LPFC_SG_SEG_CNT,
- .use_sg_chaining = ENABLE_SG_CHAINING,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = lpfc_hba_attrs,
.slave_destroy = lpfc_slave_destroy,
.scan_finished = lpfc_scan_finished,
.this_id = -1,
- .sg_tablesize = LPFC_SG_SEG_CNT,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.use_clustering = ENABLE_CLUSTERING,
- .use_sg_chaining = ENABLE_SG_CHAINING,
.shost_attrs = lpfc_vport_attrs,
.max_sectors = 0xFFFF,
};