X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=drivers%2Fscsi%2Flpfc%2Flpfc_scsi.c;h=0910a9ab76a506014073c7af98ad61df8a2a6100;hb=dd32f7effdd2f3f348ef91ca1649d78a0ab2b103;hp=70255c11d3adde772d88f8288441c8fed324b98e;hpb=0a3abcf75bf391fec4e32356ab5ddb8f5d2e6b41;p=linux-2.6 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 70255c11d3..0910a9ab76 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -169,6 +169,9 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { + if (vports[i]->cfg_lun_queue_depth <= + sdev->queue_depth) + continue; if (sdev->ordered_tags) scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, @@ -578,14 +581,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->result == IOERR_NO_RESOURCES || lpfc_cmd->result == RJT_LOGIN_REQUIRED) { cmd->result = ScsiResult(DID_REQUEUE, 0); - break; - } /* else: fall through */ + break; + } /* else: fall through */ default: cmd->result = ScsiResult(DID_ERROR, 0); break; } - if ((pnode == NULL ) + if (!pnode || !NLP_CHK_NODE_ACT(pnode) || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY); } else { @@ -606,6 +609,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, result = cmd->result; sdev = cmd->device; lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); + spin_lock_irqsave(sdev->host->host_lock, flags); + lpfc_cmd->pCmd = NULL; /* This must be done before scsi_done */ + spin_unlock_irqrestore(sdev->host->host_lock, flags); cmd->scsi_done(cmd); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { @@ -614,7 +620,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, * wake up the thread. */ spin_lock_irqsave(sdev->host->host_lock, flags); - lpfc_cmd->pCmd = NULL; if (lpfc_cmd->waitq) wake_up(lpfc_cmd->waitq); spin_unlock_irqrestore(sdev->host->host_lock, flags); @@ -626,7 +631,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, if (!result) lpfc_rampup_queue_depth(vport, sdev); - if (!result && pnode != NULL && + if (!result && pnode && NLP_CHK_NODE_ACT(pnode) && ((jiffies - pnode->last_ramp_up_time) > LPFC_Q_RAMP_UP_INTERVAL * HZ) && ((jiffies - pnode->last_q_full_time) > @@ -654,7 +659,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, * Check for queue full. If the lun is reporting queue full, then * back off the lun queue depth to prevent target overloads. */ - if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) { + if (result == SAM_STAT_TASK_SET_FULL && pnode && + NLP_CHK_NODE_ACT(pnode)) { pnode->last_q_full_time = jiffies; shost_for_each_device(tmp_sdev, sdev->host) { @@ -684,7 +690,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, * wake up the thread. */ spin_lock_irqsave(sdev->host->host_lock, flags); - lpfc_cmd->pCmd = NULL; if (lpfc_cmd->waitq) wake_up(lpfc_cmd->waitq); spin_unlock_irqrestore(sdev->host->host_lock, flags); @@ -704,6 +709,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, int datadir = scsi_cmnd->sc_data_direction; char tag[2]; + if (!pnode || !NLP_CHK_NODE_ACT(pnode)) + return; + lpfc_cmd->fcp_rsp->rspSnsLen = 0; /* clear task management bits */ lpfc_cmd->fcp_cmnd->fcpCntl2 = 0; @@ -785,9 +793,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_nodelist *ndlp = rdata->pnode; - if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || + ndlp->nlp_state != NLP_STE_MAPPED_NODE) return 0; - } piocbq = &(lpfc_cmd->cur_iocbq); piocbq->vport = vport; @@ -842,7 +850,7 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, struct lpfc_iocbq *iocbqrsp; int ret; - if (!rdata->pnode) + if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) return FAILED; lpfc_cmd->rdata = rdata; @@ -959,7 +967,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) * Catch race where our node has transitioned, but the * transport is still transitioning. */ - if (!ndlp) { + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { cmnd->result = ScsiResult(DID_BUS_BUSY, 0); goto out_fail_command; } @@ -1146,7 +1154,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) * target is rediscovered or devloss timeout expires. */ while (1) { - if (!pnode) + if (!pnode || !NLP_CHK_NODE_ACT(pnode)) goto out; if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { @@ -1162,7 +1170,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) goto out; } pnode = rdata->pnode; - if (!pnode) + if (!pnode || !NLP_CHK_NODE_ACT(pnode)) goto out; } if (pnode->nlp_state == NLP_STE_MAPPED_NODE)