]> err.no Git - linux-2.6/blob - drivers/scsi/lpfc/lpfc_scsi.c
c987c4fcdadc99936ae988287b15761dd0da849f
[linux-2.6] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
31
32 #include "lpfc_version.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
40 #include "lpfc_vport.h"
41
42 #define LPFC_RESET_WAIT  2
43 #define LPFC_ABORT_WAIT  2
44
45 /*
46  * This function is called with no lock held when there is a resource
47  * error in driver or in firmware.
48  */
49 void
50 lpfc_adjust_queue_depth(struct lpfc_hba *phba)
51 {
52         unsigned long flags;
53
54         spin_lock_irqsave(&phba->hbalock, flags);
55         atomic_inc(&phba->num_rsrc_err);
56         phba->last_rsrc_error_time = jiffies;
57
58         if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
59                 spin_unlock_irqrestore(&phba->hbalock, flags);
60                 return;
61         }
62
63         phba->last_ramp_down_time = jiffies;
64
65         spin_unlock_irqrestore(&phba->hbalock, flags);
66
67         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
68         if ((phba->pport->work_port_events &
69                 WORKER_RAMP_DOWN_QUEUE) == 0) {
70                 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
71         }
72         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
73
74         spin_lock_irqsave(&phba->hbalock, flags);
75         if (phba->work_wait)
76                 wake_up(phba->work_wait);
77         spin_unlock_irqrestore(&phba->hbalock, flags);
78
79         return;
80 }
81
82 /*
83  * This function is called with no lock held when there is a successful
84  * SCSI command completion.
85  */
86 static inline void
87 lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
88                         struct scsi_device *sdev)
89 {
90         unsigned long flags;
91         struct lpfc_hba *phba = vport->phba;
92         atomic_inc(&phba->num_cmd_success);
93
94         if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
95                 return;
96         spin_lock_irqsave(&phba->hbalock, flags);
97         if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
98          ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
99                 spin_unlock_irqrestore(&phba->hbalock, flags);
100                 return;
101         }
102         phba->last_ramp_up_time = jiffies;
103         spin_unlock_irqrestore(&phba->hbalock, flags);
104
105         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
106         if ((phba->pport->work_port_events &
107                 WORKER_RAMP_UP_QUEUE) == 0) {
108                 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
109         }
110         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
111
112         spin_lock_irqsave(&phba->hbalock, flags);
113         if (phba->work_wait)
114                 wake_up(phba->work_wait);
115         spin_unlock_irqrestore(&phba->hbalock, flags);
116 }
117
118 void
119 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
120 {
121         struct lpfc_vport **vports;
122         struct Scsi_Host  *shost;
123         struct scsi_device *sdev;
124         unsigned long new_queue_depth;
125         unsigned long num_rsrc_err, num_cmd_success;
126         int i;
127
128         num_rsrc_err = atomic_read(&phba->num_rsrc_err);
129         num_cmd_success = atomic_read(&phba->num_cmd_success);
130
131         vports = lpfc_create_vport_work_array(phba);
132         if (vports != NULL)
133                 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
134                         shost = lpfc_shost_from_vport(vports[i]);
135                         shost_for_each_device(sdev, shost) {
136                                 new_queue_depth =
137                                         sdev->queue_depth * num_rsrc_err /
138                                         (num_rsrc_err + num_cmd_success);
139                                 if (!new_queue_depth)
140                                         new_queue_depth = sdev->queue_depth - 1;
141                                 else
142                                         new_queue_depth = sdev->queue_depth -
143                                                                 new_queue_depth;
144                                 if (sdev->ordered_tags)
145                                         scsi_adjust_queue_depth(sdev,
146                                                         MSG_ORDERED_TAG,
147                                                         new_queue_depth);
148                                 else
149                                         scsi_adjust_queue_depth(sdev,
150                                                         MSG_SIMPLE_TAG,
151                                                         new_queue_depth);
152                         }
153                 }
154         lpfc_destroy_vport_work_array(vports);
155         atomic_set(&phba->num_rsrc_err, 0);
156         atomic_set(&phba->num_cmd_success, 0);
157 }
158
159 void
160 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
161 {
162         struct lpfc_vport **vports;
163         struct Scsi_Host  *shost;
164         struct scsi_device *sdev;
165         int i;
166
167         vports = lpfc_create_vport_work_array(phba);
168         if (vports != NULL)
169                 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
170                         shost = lpfc_shost_from_vport(vports[i]);
171                         shost_for_each_device(sdev, shost) {
172                                 if (sdev->ordered_tags)
173                                         scsi_adjust_queue_depth(sdev,
174                                                         MSG_ORDERED_TAG,
175                                                         sdev->queue_depth+1);
176                                 else
177                                         scsi_adjust_queue_depth(sdev,
178                                                         MSG_SIMPLE_TAG,
179                                                         sdev->queue_depth+1);
180                         }
181                 }
182         lpfc_destroy_vport_work_array(vports);
183         atomic_set(&phba->num_rsrc_err, 0);
184         atomic_set(&phba->num_cmd_success, 0);
185 }
186
187 /*
188  * This routine allocates a scsi buffer, which contains all the necessary
189  * information needed to initiate a SCSI I/O.  The non-DMAable buffer region
190  * contains information to build the IOCB.  The DMAable region contains
191  * memory for the FCP CMND, FCP RSP, and the inital BPL.  In addition to
192  * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
193  * and the BPL BDE is setup in the IOCB.
194  */
195 static struct lpfc_scsi_buf *
196 lpfc_new_scsi_buf(struct lpfc_vport *vport)
197 {
198         struct lpfc_hba *phba = vport->phba;
199         struct lpfc_scsi_buf *psb;
200         struct ulp_bde64 *bpl;
201         IOCB_t *iocb;
202         dma_addr_t pdma_phys;
203         uint16_t iotag;
204
205         psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
206         if (!psb)
207                 return NULL;
208
209         /*
210          * Get memory from the pci pool to map the virt space to pci bus space
211          * for an I/O.  The DMA buffer includes space for the struct fcp_cmnd,
212          * struct fcp_rsp and the number of bde's necessary to support the
213          * sg_tablesize.
214          */
215         psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
216                                                         &psb->dma_handle);
217         if (!psb->data) {
218                 kfree(psb);
219                 return NULL;
220         }
221
222         /* Initialize virtual ptrs to dma_buf region. */
223         memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
224
225         /* Allocate iotag for psb->cur_iocbq. */
226         iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
227         if (iotag == 0) {
228                 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
229                               psb->data, psb->dma_handle);
230                 kfree (psb);
231                 return NULL;
232         }
233         psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
234
235         psb->fcp_cmnd = psb->data;
236         psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
237         psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
238                                                         sizeof(struct fcp_rsp);
239
240         /* Initialize local short-hand pointers. */
241         bpl = psb->fcp_bpl;
242         pdma_phys = psb->dma_handle;
243
244         /*
245          * The first two bdes are the FCP_CMD and FCP_RSP.  The balance are sg
246          * list bdes.  Initialize the first two and leave the rest for
247          * queuecommand.
248          */
249         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
250         bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
251         bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
252         bpl->tus.f.bdeFlags = BUFF_USE_CMND;
253         bpl->tus.w = le32_to_cpu(bpl->tus.w);
254         bpl++;
255
256         /* Setup the physical region for the FCP RSP */
257         pdma_phys += sizeof (struct fcp_cmnd);
258         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
259         bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
260         bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
261         bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
262         bpl->tus.w = le32_to_cpu(bpl->tus.w);
263
264         /*
265          * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
266          * initialize it with all known data now.
267          */
268         pdma_phys += (sizeof (struct fcp_rsp));
269         iocb = &psb->cur_iocbq.iocb;
270         iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
271         iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
272         iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
273         iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
274         iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
275         iocb->ulpBdeCount = 1;
276         iocb->ulpClass = CLASS3;
277
278         return psb;
279 }
280
281 static struct lpfc_scsi_buf*
282 lpfc_get_scsi_buf(struct lpfc_hba * phba)
283 {
284         struct  lpfc_scsi_buf * lpfc_cmd = NULL;
285         struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
286         unsigned long iflag = 0;
287
288         spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
289         list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
290         if (lpfc_cmd) {
291                 lpfc_cmd->seg_cnt = 0;
292                 lpfc_cmd->nonsg_phys = 0;
293         }
294         spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
295         return  lpfc_cmd;
296 }
297
298 static void
299 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
300 {
301         unsigned long iflag = 0;
302
303         spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
304         psb->pCmd = NULL;
305         list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
306         spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
307 }
308
309 static int
310 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
311 {
312         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
313         struct scatterlist *sgel = NULL;
314         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
315         struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
316         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
317         dma_addr_t physaddr;
318         uint32_t i, num_bde = 0;
319         int nseg, datadir = scsi_cmnd->sc_data_direction;
320
321         /*
322          * There are three possibilities here - use scatter-gather segment, use
323          * the single mapping, or neither.  Start the lpfc command prep by
324          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
325          * data bde entry.
326          */
327         bpl += 2;
328         if (scsi_sg_count(scsi_cmnd)) {
329                 /*
330                  * The driver stores the segment count returned from pci_map_sg
331                  * because this a count of dma-mappings used to map the use_sg
332                  * pages.  They are not guaranteed to be the same for those
333                  * architectures that implement an IOMMU.
334                  */
335
336                 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
337                                   scsi_sg_count(scsi_cmnd), datadir);
338                 if (unlikely(!nseg))
339                         return 1;
340
341                 lpfc_cmd->seg_cnt = nseg;
342                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
343                         printk(KERN_ERR "%s: Too many sg segments from "
344                                "dma_map_sg.  Config %d, seg_cnt %d",
345                                __FUNCTION__, phba->cfg_sg_seg_cnt,
346                                lpfc_cmd->seg_cnt);
347                         scsi_dma_unmap(scsi_cmnd);
348                         return 1;
349                 }
350
351                 /*
352                  * The driver established a maximum scatter-gather segment count
353                  * during probe that limits the number of sg elements in any
354                  * single scsi command.  Just run through the seg_cnt and format
355                  * the bde's.
356                  */
357                 scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
358                         physaddr = sg_dma_address(sgel);
359                         bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
360                         bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
361                         bpl->tus.f.bdeSize = sg_dma_len(sgel);
362                         if (datadir == DMA_TO_DEVICE)
363                                 bpl->tus.f.bdeFlags = 0;
364                         else
365                                 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
366                         bpl->tus.w = le32_to_cpu(bpl->tus.w);
367                         bpl++;
368                         num_bde++;
369                 }
370         }
371
372         /*
373          * Finish initializing those IOCB fields that are dependent on the
374          * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
375          * reinitialized since all iocb memory resources are used many times
376          * for transmit, receive, and continuation bpl's.
377          */
378         iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
379         iocb_cmd->un.fcpi64.bdl.bdeSize +=
380                 (num_bde * sizeof (struct ulp_bde64));
381         iocb_cmd->ulpBdeCount = 1;
382         iocb_cmd->ulpLe = 1;
383         fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd));
384         return 0;
385 }
386
387 static void
388 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
389 {
390         /*
391          * There are only two special cases to consider.  (1) the scsi command
392          * requested scatter-gather usage or (2) the scsi command allocated
393          * a request buffer, but did not request use_sg.  There is a third
394          * case, but it does not require resource deallocation.
395          */
396         if (psb->seg_cnt > 0)
397                 scsi_dma_unmap(psb->pCmd);
398 }
399
400 static void
401 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
402                     struct lpfc_iocbq *rsp_iocb)
403 {
404         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
405         struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
406         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
407         uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
408         uint32_t resp_info = fcprsp->rspStatus2;
409         uint32_t scsi_status = fcprsp->rspStatus3;
410         uint32_t *lp;
411         uint32_t host_status = DID_OK;
412         uint32_t rsplen = 0;
413         uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
414
415         /*
416          *  If this is a task management command, there is no
417          *  scsi packet associated with this lpfc_cmd.  The driver
418          *  consumes it.
419          */
420         if (fcpcmd->fcpCntl2) {
421                 scsi_status = 0;
422                 goto out;
423         }
424
425         if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
426                 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
427                 if (snslen > SCSI_SENSE_BUFFERSIZE)
428                         snslen = SCSI_SENSE_BUFFERSIZE;
429
430                 if (resp_info & RSP_LEN_VALID)
431                   rsplen = be32_to_cpu(fcprsp->rspRspLen);
432                 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
433         }
434         lp = (uint32_t *)cmnd->sense_buffer;
435
436         if (!scsi_status && (resp_info & RESID_UNDER))
437                 logit = LOG_FCP;
438
439         lpfc_printf_vlog(vport, KERN_WARNING, logit,
440                          "0730 FCP command x%x failed: x%x SNS x%x x%x "
441                          "Data: x%x x%x x%x x%x x%x\n",
442                          cmnd->cmnd[0], scsi_status,
443                          be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
444                          be32_to_cpu(fcprsp->rspResId),
445                          be32_to_cpu(fcprsp->rspSnsLen),
446                          be32_to_cpu(fcprsp->rspRspLen),
447                          fcprsp->rspInfo3);
448
449         if (resp_info & RSP_LEN_VALID) {
450                 rsplen = be32_to_cpu(fcprsp->rspRspLen);
451                 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
452                     (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
453                         host_status = DID_ERROR;
454                         goto out;
455                 }
456         }
457
458         scsi_set_resid(cmnd, 0);
459         if (resp_info & RESID_UNDER) {
460                 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
461
462                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
463                                  "0716 FCP Read Underrun, expected %d, "
464                                  "residual %d Data: x%x x%x x%x\n",
465                                  be32_to_cpu(fcpcmd->fcpDl),
466                                  scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
467                                  cmnd->underflow);
468
469                 /*
470                  * If there is an under run check if under run reported by
471                  * storage array is same as the under run reported by HBA.
472                  * If this is not same, there is a dropped frame.
473                  */
474                 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
475                         fcpi_parm &&
476                         (scsi_get_resid(cmnd) != fcpi_parm)) {
477                         lpfc_printf_vlog(vport, KERN_WARNING,
478                                          LOG_FCP | LOG_FCP_ERROR,
479                                          "0735 FCP Read Check Error "
480                                          "and Underrun Data: x%x x%x x%x x%x\n",
481                                          be32_to_cpu(fcpcmd->fcpDl),
482                                          scsi_get_resid(cmnd), fcpi_parm,
483                                          cmnd->cmnd[0]);
484                         scsi_set_resid(cmnd, scsi_bufflen(cmnd));
485                         host_status = DID_ERROR;
486                 }
487                 /*
488                  * The cmnd->underflow is the minimum number of bytes that must
489                  * be transfered for this command.  Provided a sense condition
490                  * is not present, make sure the actual amount transferred is at
491                  * least the underflow value or fail.
492                  */
493                 if (!(resp_info & SNS_LEN_VALID) &&
494                     (scsi_status == SAM_STAT_GOOD) &&
495                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
496                      < cmnd->underflow)) {
497                         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
498                                          "0717 FCP command x%x residual "
499                                          "underrun converted to error "
500                                          "Data: x%x x%x x%x\n",
501                                          cmnd->cmnd[0], scsi_bufflen(cmnd),
502                                          scsi_get_resid(cmnd), cmnd->underflow);
503                         host_status = DID_ERROR;
504                 }
505         } else if (resp_info & RESID_OVER) {
506                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
507                                  "0720 FCP command x%x residual overrun error. "
508                                  "Data: x%x x%x \n", cmnd->cmnd[0],
509                                  scsi_bufflen(cmnd), scsi_get_resid(cmnd));
510                 host_status = DID_ERROR;
511
512         /*
513          * Check SLI validation that all the transfer was actually done
514          * (fcpi_parm should be zero). Apply check only to reads.
515          */
516         } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
517                         (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
518                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
519                                  "0734 FCP Read Check Error Data: "
520                                  "x%x x%x x%x x%x\n",
521                                  be32_to_cpu(fcpcmd->fcpDl),
522                                  be32_to_cpu(fcprsp->rspResId),
523                                  fcpi_parm, cmnd->cmnd[0]);
524                 host_status = DID_ERROR;
525                 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
526         }
527
528  out:
529         cmnd->result = ScsiResult(host_status, scsi_status);
530 }
531
532 static void
533 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
534                         struct lpfc_iocbq *pIocbOut)
535 {
536         struct lpfc_scsi_buf *lpfc_cmd =
537                 (struct lpfc_scsi_buf *) pIocbIn->context1;
538         struct lpfc_vport      *vport = pIocbIn->vport;
539         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
540         struct lpfc_nodelist *pnode = rdata->pnode;
541         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
542         int result;
543         struct scsi_device *sdev, *tmp_sdev;
544         int depth = 0;
545         unsigned long flags;
546
547         lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
548         lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
549
550         if (lpfc_cmd->status) {
551                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
552                     (lpfc_cmd->result & IOERR_DRVR_MASK))
553                         lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
554                 else if (lpfc_cmd->status >= IOSTAT_CNT)
555                         lpfc_cmd->status = IOSTAT_DEFAULT;
556
557                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
558                                  "0729 FCP cmd x%x failed <%d/%d> "
559                                  "status: x%x result: x%x Data: x%x x%x\n",
560                                  cmd->cmnd[0],
561                                  cmd->device ? cmd->device->id : 0xffff,
562                                  cmd->device ? cmd->device->lun : 0xffff,
563                                  lpfc_cmd->status, lpfc_cmd->result,
564                                  pIocbOut->iocb.ulpContext,
565                                  lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
566
567                 switch (lpfc_cmd->status) {
568                 case IOSTAT_FCP_RSP_ERROR:
569                         /* Call FCP RSP handler to determine result */
570                         lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
571                         break;
572                 case IOSTAT_NPORT_BSY:
573                 case IOSTAT_FABRIC_BSY:
574                         cmd->result = ScsiResult(DID_BUS_BUSY, 0);
575                         break;
576                 case IOSTAT_LOCAL_REJECT:
577                         if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
578                             lpfc_cmd->result == IOERR_NO_RESOURCES ||
579                             lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
580                                 cmd->result = ScsiResult(DID_REQUEUE, 0);
581                         break;
582                 } /* else: fall through */
583                 default:
584                         cmd->result = ScsiResult(DID_ERROR, 0);
585                         break;
586                 }
587
588                 if ((pnode == NULL )
589                     || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
590                         cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
591         } else {
592                 cmd->result = ScsiResult(DID_OK, 0);
593         }
594
595         if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
596                 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
597
598                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
599                                  "0710 Iodone <%d/%d> cmd %p, error "
600                                  "x%x SNS x%x x%x Data: x%x x%x\n",
601                                  cmd->device->id, cmd->device->lun, cmd,
602                                  cmd->result, *lp, *(lp + 3), cmd->retries,
603                                  scsi_get_resid(cmd));
604         }
605
606         result = cmd->result;
607         sdev = cmd->device;
608         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
609         cmd->scsi_done(cmd);
610
611         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
612                 /*
613                  * If there is a thread waiting for command completion
614                  * wake up the thread.
615                  */
616                 spin_lock_irqsave(sdev->host->host_lock, flags);
617                 lpfc_cmd->pCmd = NULL;
618                 if (lpfc_cmd->waitq)
619                         wake_up(lpfc_cmd->waitq);
620                 spin_unlock_irqrestore(sdev->host->host_lock, flags);
621                 lpfc_release_scsi_buf(phba, lpfc_cmd);
622                 return;
623         }
624
625
626         if (!result)
627                 lpfc_rampup_queue_depth(vport, sdev);
628
629         if (!result && pnode != NULL &&
630            ((jiffies - pnode->last_ramp_up_time) >
631                 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
632            ((jiffies - pnode->last_q_full_time) >
633                 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
634            (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
635                 shost_for_each_device(tmp_sdev, sdev->host) {
636                         if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
637                                 if (tmp_sdev->id != sdev->id)
638                                         continue;
639                                 if (tmp_sdev->ordered_tags)
640                                         scsi_adjust_queue_depth(tmp_sdev,
641                                                 MSG_ORDERED_TAG,
642                                                 tmp_sdev->queue_depth+1);
643                                 else
644                                         scsi_adjust_queue_depth(tmp_sdev,
645                                                 MSG_SIMPLE_TAG,
646                                                 tmp_sdev->queue_depth+1);
647
648                                 pnode->last_ramp_up_time = jiffies;
649                         }
650                 }
651         }
652
653         /*
654          * Check for queue full.  If the lun is reporting queue full, then
655          * back off the lun queue depth to prevent target overloads.
656          */
657         if (result == SAM_STAT_TASK_SET_FULL && pnode != NULL) {
658                 pnode->last_q_full_time = jiffies;
659
660                 shost_for_each_device(tmp_sdev, sdev->host) {
661                         if (tmp_sdev->id != sdev->id)
662                                 continue;
663                         depth = scsi_track_queue_full(tmp_sdev,
664                                         tmp_sdev->queue_depth - 1);
665                 }
666                 /*
667                  * The queue depth cannot be lowered any more.
668                  * Modify the returned error code to store
669                  * the final depth value set by
670                  * scsi_track_queue_full.
671                  */
672                 if (depth == -1)
673                         depth = sdev->host->cmd_per_lun;
674
675                 if (depth) {
676                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
677                                          "0711 detected queue full - lun queue "
678                                          "depth adjusted to %d.\n", depth);
679                 }
680         }
681
682         /*
683          * If there is a thread waiting for command completion
684          * wake up the thread.
685          */
686         spin_lock_irqsave(sdev->host->host_lock, flags);
687         lpfc_cmd->pCmd = NULL;
688         if (lpfc_cmd->waitq)
689                 wake_up(lpfc_cmd->waitq);
690         spin_unlock_irqrestore(sdev->host->host_lock, flags);
691
692         lpfc_release_scsi_buf(phba, lpfc_cmd);
693 }
694
695 static void
696 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
697                     struct lpfc_nodelist *pnode)
698 {
699         struct lpfc_hba *phba = vport->phba;
700         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
701         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
702         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
703         struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
704         int datadir = scsi_cmnd->sc_data_direction;
705         char tag[2];
706
707         lpfc_cmd->fcp_rsp->rspSnsLen = 0;
708         /* clear task management bits */
709         lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
710
711         int_to_scsilun(lpfc_cmd->pCmd->device->lun,
712                         &lpfc_cmd->fcp_cmnd->fcp_lun);
713
714         memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
715
716         if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
717                 switch (tag[0]) {
718                 case HEAD_OF_QUEUE_TAG:
719                         fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
720                         break;
721                 case ORDERED_QUEUE_TAG:
722                         fcp_cmnd->fcpCntl1 = ORDERED_Q;
723                         break;
724                 default:
725                         fcp_cmnd->fcpCntl1 = SIMPLE_Q;
726                         break;
727                 }
728         } else
729                 fcp_cmnd->fcpCntl1 = 0;
730
731         /*
732          * There are three possibilities here - use scatter-gather segment, use
733          * the single mapping, or neither.  Start the lpfc command prep by
734          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
735          * data bde entry.
736          */
737         if (scsi_sg_count(scsi_cmnd)) {
738                 if (datadir == DMA_TO_DEVICE) {
739                         iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
740                         iocb_cmd->un.fcpi.fcpi_parm = 0;
741                         iocb_cmd->ulpPU = 0;
742                         fcp_cmnd->fcpCntl3 = WRITE_DATA;
743                         phba->fc4OutputRequests++;
744                 } else {
745                         iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
746                         iocb_cmd->ulpPU = PARM_READ_CHECK;
747                         iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
748                         fcp_cmnd->fcpCntl3 = READ_DATA;
749                         phba->fc4InputRequests++;
750                 }
751         } else {
752                 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
753                 iocb_cmd->un.fcpi.fcpi_parm = 0;
754                 iocb_cmd->ulpPU = 0;
755                 fcp_cmnd->fcpCntl3 = 0;
756                 phba->fc4ControlRequests++;
757         }
758
759         /*
760          * Finish initializing those IOCB fields that are independent
761          * of the scsi_cmnd request_buffer
762          */
763         piocbq->iocb.ulpContext = pnode->nlp_rpi;
764         if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
765                 piocbq->iocb.ulpFCP2Rcvy = 1;
766
767         piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
768         piocbq->context1  = lpfc_cmd;
769         piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
770         piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
771         piocbq->vport = vport;
772 }
773
774 static int
775 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
776                              struct lpfc_scsi_buf *lpfc_cmd,
777                              unsigned int lun,
778                              uint8_t task_mgmt_cmd)
779 {
780         struct lpfc_iocbq *piocbq;
781         IOCB_t *piocb;
782         struct fcp_cmnd *fcp_cmnd;
783         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
784         struct lpfc_nodelist *ndlp = rdata->pnode;
785
786         if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
787                 return 0;
788         }
789
790         piocbq = &(lpfc_cmd->cur_iocbq);
791         piocbq->vport = vport;
792
793         piocb = &piocbq->iocb;
794
795         fcp_cmnd = lpfc_cmd->fcp_cmnd;
796         int_to_scsilun(lun, &lpfc_cmd->fcp_cmnd->fcp_lun);
797         fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
798
799         piocb->ulpCommand = CMD_FCP_ICMND64_CR;
800
801         piocb->ulpContext = ndlp->nlp_rpi;
802         if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
803                 piocb->ulpFCP2Rcvy = 1;
804         }
805         piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
806
807         /* ulpTimeout is only one byte */
808         if (lpfc_cmd->timeout > 0xff) {
809                 /*
810                  * Do not timeout the command at the firmware level.
811                  * The driver will provide the timeout mechanism.
812                  */
813                 piocb->ulpTimeout = 0;
814         } else {
815                 piocb->ulpTimeout = lpfc_cmd->timeout;
816         }
817
818         return 1;
819 }
820
821 static void
822 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
823                         struct lpfc_iocbq *cmdiocbq,
824                         struct lpfc_iocbq *rspiocbq)
825 {
826         struct lpfc_scsi_buf *lpfc_cmd =
827                 (struct lpfc_scsi_buf *) cmdiocbq->context1;
828         if (lpfc_cmd)
829                 lpfc_release_scsi_buf(phba, lpfc_cmd);
830         return;
831 }
832
833 static int
834 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
835                     unsigned  tgt_id, unsigned int lun,
836                     struct lpfc_rport_data *rdata)
837 {
838         struct lpfc_hba   *phba = vport->phba;
839         struct lpfc_iocbq *iocbq;
840         struct lpfc_iocbq *iocbqrsp;
841         int ret;
842
843         if (!rdata->pnode)
844                 return FAILED;
845
846         lpfc_cmd->rdata = rdata;
847         ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
848                                            FCP_TARGET_RESET);
849         if (!ret)
850                 return FAILED;
851
852         iocbq = &lpfc_cmd->cur_iocbq;
853         iocbqrsp = lpfc_sli_get_iocbq(phba);
854
855         if (!iocbqrsp)
856                 return FAILED;
857
858         /* Issue Target Reset to TGT <num> */
859         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
860                          "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
861                          tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
862         ret = lpfc_sli_issue_iocb_wait(phba,
863                                        &phba->sli.ring[phba->sli.fcp_ring],
864                                        iocbq, iocbqrsp, lpfc_cmd->timeout);
865         if (ret != IOCB_SUCCESS) {
866                 if (ret == IOCB_TIMEDOUT)
867                         iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
868                 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
869         } else {
870                 ret = SUCCESS;
871                 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
872                 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
873                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
874                         (lpfc_cmd->result & IOERR_DRVR_MASK))
875                                 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
876         }
877
878         lpfc_sli_release_iocbq(phba, iocbqrsp);
879         return ret;
880 }
881
882 const char *
883 lpfc_info(struct Scsi_Host *host)
884 {
885         struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
886         struct lpfc_hba   *phba = vport->phba;
887         int len;
888         static char  lpfcinfobuf[384];
889
890         memset(lpfcinfobuf,0,384);
891         if (phba && phba->pcidev){
892                 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
893                 len = strlen(lpfcinfobuf);
894                 snprintf(lpfcinfobuf + len,
895                         384-len,
896                         " on PCI bus %02x device %02x irq %d",
897                         phba->pcidev->bus->number,
898                         phba->pcidev->devfn,
899                         phba->pcidev->irq);
900                 len = strlen(lpfcinfobuf);
901                 if (phba->Port[0]) {
902                         snprintf(lpfcinfobuf + len,
903                                  384-len,
904                                  " port %s",
905                                  phba->Port);
906                 }
907         }
908         return lpfcinfobuf;
909 }
910
911 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
912 {
913         unsigned long  poll_tmo_expires =
914                 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
915
916         if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
917                 mod_timer(&phba->fcp_poll_timer,
918                           poll_tmo_expires);
919 }
920
921 void lpfc_poll_start_timer(struct lpfc_hba * phba)
922 {
923         lpfc_poll_rearm_timer(phba);
924 }
925
926 void lpfc_poll_timeout(unsigned long ptr)
927 {
928         struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
929
930         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
931                 lpfc_sli_poll_fcp_ring (phba);
932                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
933                         lpfc_poll_rearm_timer(phba);
934         }
935 }
936
937 static int
938 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
939 {
940         struct Scsi_Host  *shost = cmnd->device->host;
941         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
942         struct lpfc_hba   *phba = vport->phba;
943         struct lpfc_sli   *psli = &phba->sli;
944         struct lpfc_rport_data *rdata = cmnd->device->hostdata;
945         struct lpfc_nodelist *ndlp = rdata->pnode;
946         struct lpfc_scsi_buf *lpfc_cmd;
947         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
948         int err;
949
950         err = fc_remote_port_chkready(rport);
951         if (err) {
952                 cmnd->result = err;
953                 goto out_fail_command;
954         }
955
956         /*
957          * Catch race where our node has transitioned, but the
958          * transport is still transitioning.
959          */
960         if (!ndlp) {
961                 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
962                 goto out_fail_command;
963         }
964         lpfc_cmd = lpfc_get_scsi_buf(phba);
965         if (lpfc_cmd == NULL) {
966                 lpfc_adjust_queue_depth(phba);
967
968                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
969                                  "0707 driver's buffer pool is empty, "
970                                  "IO busied\n");
971                 goto out_host_busy;
972         }
973
974         /*
975          * Store the midlayer's command structure for the completion phase
976          * and complete the command initialization.
977          */
978         lpfc_cmd->pCmd  = cmnd;
979         lpfc_cmd->rdata = rdata;
980         lpfc_cmd->timeout = 0;
981         cmnd->host_scribble = (unsigned char *)lpfc_cmd;
982         cmnd->scsi_done = done;
983
984         err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
985         if (err)
986                 goto out_host_busy_free_buf;
987
988         lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
989
990         err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
991                                   &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
992         if (err)
993                 goto out_host_busy_free_buf;
994
995         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
996                 lpfc_sli_poll_fcp_ring(phba);
997                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
998                         lpfc_poll_rearm_timer(phba);
999         }
1000
1001         return 0;
1002
1003  out_host_busy_free_buf:
1004         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1005         lpfc_release_scsi_buf(phba, lpfc_cmd);
1006  out_host_busy:
1007         return SCSI_MLQUEUE_HOST_BUSY;
1008
1009  out_fail_command:
1010         done(cmnd);
1011         return 0;
1012 }
1013
1014 static void
1015 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1016 {
1017         struct Scsi_Host *shost = cmnd->device->host;
1018         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1019
1020         spin_lock_irq(shost->host_lock);
1021         while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1022                 spin_unlock_irq(shost->host_lock);
1023                 msleep(1000);
1024                 spin_lock_irq(shost->host_lock);
1025         }
1026         spin_unlock_irq(shost->host_lock);
1027         return;
1028 }
1029
1030 static int
1031 lpfc_abort_handler(struct scsi_cmnd *cmnd)
1032 {
1033         struct Scsi_Host  *shost = cmnd->device->host;
1034         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1035         struct lpfc_hba   *phba = vport->phba;
1036         struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
1037         struct lpfc_iocbq *iocb;
1038         struct lpfc_iocbq *abtsiocb;
1039         struct lpfc_scsi_buf *lpfc_cmd;
1040         IOCB_t *cmd, *icmd;
1041         int ret = SUCCESS;
1042         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
1043
1044         lpfc_block_error_handler(cmnd);
1045         lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
1046         BUG_ON(!lpfc_cmd);
1047
1048         /*
1049          * If pCmd field of the corresponding lpfc_scsi_buf structure
1050          * points to a different SCSI command, then the driver has
1051          * already completed this command, but the midlayer did not
1052          * see the completion before the eh fired.  Just return
1053          * SUCCESS.
1054          */
1055         iocb = &lpfc_cmd->cur_iocbq;
1056         if (lpfc_cmd->pCmd != cmnd)
1057                 goto out;
1058
1059         BUG_ON(iocb->context1 != lpfc_cmd);
1060
1061         abtsiocb = lpfc_sli_get_iocbq(phba);
1062         if (abtsiocb == NULL) {
1063                 ret = FAILED;
1064                 goto out;
1065         }
1066
1067         /*
1068          * The scsi command can not be in txq and it is in flight because the
1069          * pCmd is still pointig at the SCSI command we have to abort. There
1070          * is no need to search the txcmplq. Just send an abort to the FW.
1071          */
1072
1073         cmd = &iocb->iocb;
1074         icmd = &abtsiocb->iocb;
1075         icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
1076         icmd->un.acxri.abortContextTag = cmd->ulpContext;
1077         icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
1078
1079         icmd->ulpLe = 1;
1080         icmd->ulpClass = cmd->ulpClass;
1081         if (lpfc_is_link_up(phba))
1082                 icmd->ulpCommand = CMD_ABORT_XRI_CN;
1083         else
1084                 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
1085
1086         abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
1087         abtsiocb->vport = vport;
1088         if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
1089                 lpfc_sli_release_iocbq(phba, abtsiocb);
1090                 ret = FAILED;
1091                 goto out;
1092         }
1093
1094         if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1095                 lpfc_sli_poll_fcp_ring (phba);
1096
1097         lpfc_cmd->waitq = &waitq;
1098         /* Wait for abort to complete */
1099         wait_event_timeout(waitq,
1100                           (lpfc_cmd->pCmd != cmnd),
1101                            (2*vport->cfg_devloss_tmo*HZ));
1102
1103         spin_lock_irq(shost->host_lock);
1104         lpfc_cmd->waitq = NULL;
1105         spin_unlock_irq(shost->host_lock);
1106
1107         if (lpfc_cmd->pCmd == cmnd) {
1108                 ret = FAILED;
1109                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1110                                  "0748 abort handler timed out waiting "
1111                                  "for abort to complete: ret %#x, ID %d, "
1112                                  "LUN %d, snum %#lx\n",
1113                                  ret, cmnd->device->id, cmnd->device->lun,
1114                                  cmnd->serial_number);
1115         }
1116
1117  out:
1118         lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1119                          "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
1120                          "LUN %d snum %#lx\n", ret, cmnd->device->id,
1121                          cmnd->device->lun, cmnd->serial_number);
1122         return ret;
1123 }
1124
1125 static int
1126 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1127 {
1128         struct Scsi_Host  *shost = cmnd->device->host;
1129         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1130         struct lpfc_hba   *phba = vport->phba;
1131         struct lpfc_scsi_buf *lpfc_cmd;
1132         struct lpfc_iocbq *iocbq, *iocbqrsp;
1133         struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1134         struct lpfc_nodelist *pnode = rdata->pnode;
1135         uint32_t cmd_result = 0, cmd_status = 0;
1136         int ret = FAILED;
1137         int iocb_status = IOCB_SUCCESS;
1138         int cnt, loopcnt;
1139
1140         lpfc_block_error_handler(cmnd);
1141         loopcnt = 0;
1142         /*
1143          * If target is not in a MAPPED state, delay the reset until
1144          * target is rediscovered or devloss timeout expires.
1145          */
1146         while (1) {
1147                 if (!pnode)
1148                         goto out;
1149
1150                 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1151                         schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1152                         loopcnt++;
1153                         rdata = cmnd->device->hostdata;
1154                         if (!rdata ||
1155                                 (loopcnt > ((vport->cfg_devloss_tmo * 2) + 1))){
1156                                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1157                                                  "0721 LUN Reset rport "
1158                                                  "failure: cnt x%x rdata x%p\n",
1159                                                  loopcnt, rdata);
1160                                 goto out;
1161                         }
1162                         pnode = rdata->pnode;
1163                         if (!pnode)
1164                                 goto out;
1165                 }
1166                 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
1167                         break;
1168         }
1169
1170         lpfc_cmd = lpfc_get_scsi_buf(phba);
1171         if (lpfc_cmd == NULL)
1172                 goto out;
1173
1174         lpfc_cmd->timeout = 60;
1175         lpfc_cmd->rdata = rdata;
1176
1177         ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
1178                                            FCP_TARGET_RESET);
1179         if (!ret)
1180                 goto out_free_scsi_buf;
1181
1182         iocbq = &lpfc_cmd->cur_iocbq;
1183
1184         /* get a buffer for this IOCB command response */
1185         iocbqrsp = lpfc_sli_get_iocbq(phba);
1186         if (iocbqrsp == NULL)
1187                 goto out_free_scsi_buf;
1188
1189         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1190                          "0703 Issue target reset to TGT %d LUN %d "
1191                          "rpi x%x nlp_flag x%x\n", cmnd->device->id,
1192                          cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1193         iocb_status = lpfc_sli_issue_iocb_wait(phba,
1194                                        &phba->sli.ring[phba->sli.fcp_ring],
1195                                        iocbq, iocbqrsp, lpfc_cmd->timeout);
1196
1197         if (iocb_status == IOCB_TIMEDOUT)
1198                 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1199
1200         if (iocb_status == IOCB_SUCCESS)
1201                 ret = SUCCESS;
1202         else
1203                 ret = iocb_status;
1204
1205         cmd_result = iocbqrsp->iocb.un.ulpWord[4];
1206         cmd_status = iocbqrsp->iocb.ulpStatus;
1207
1208         lpfc_sli_release_iocbq(phba, iocbqrsp);
1209
1210         /*
1211          * All outstanding txcmplq I/Os should have been aborted by the device.
1212          * Unfortunately, some targets do not abide by this forcing the driver
1213          * to double check.
1214          */
1215         cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
1216                                 LPFC_CTX_LUN);
1217         if (cnt)
1218                 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1219                                     cmnd->device->id, cmnd->device->lun,
1220                                     LPFC_CTX_LUN);
1221         loopcnt = 0;
1222         while(cnt) {
1223                 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1224
1225                 if (++loopcnt
1226                     > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1227                         break;
1228
1229                 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
1230                                         cmnd->device->lun, LPFC_CTX_LUN);
1231         }
1232
1233         if (cnt) {
1234                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1235                                  "0719 device reset I/O flush failure: "
1236                                  "cnt x%x\n", cnt);
1237                 ret = FAILED;
1238         }
1239
1240 out_free_scsi_buf:
1241         if (iocb_status != IOCB_TIMEDOUT) {
1242                 lpfc_release_scsi_buf(phba, lpfc_cmd);
1243         }
1244         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1245                          "0713 SCSI layer issued device reset (%d, %d) "
1246                          "return x%x status x%x result x%x\n",
1247                          cmnd->device->id, cmnd->device->lun, ret,
1248                          cmd_status, cmd_result);
1249 out:
1250         return ret;
1251 }
1252
1253 static int
1254 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1255 {
1256         struct Scsi_Host  *shost = cmnd->device->host;
1257         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1258         struct lpfc_hba   *phba = vport->phba;
1259         struct lpfc_nodelist *ndlp = NULL;
1260         int match;
1261         int ret = FAILED, i, err_count = 0;
1262         int cnt, loopcnt;
1263         struct lpfc_scsi_buf * lpfc_cmd;
1264
1265         lpfc_block_error_handler(cmnd);
1266
1267         lpfc_cmd = lpfc_get_scsi_buf(phba);
1268         if (lpfc_cmd == NULL)
1269                 goto out;
1270
1271         /* The lpfc_cmd storage is reused.  Set all loop invariants. */
1272         lpfc_cmd->timeout = 60;
1273
1274         /*
1275          * Since the driver manages a single bus device, reset all
1276          * targets known to the driver.  Should any target reset
1277          * fail, this routine returns failure to the midlayer.
1278          */
1279         for (i = 0; i < LPFC_MAX_TARGET; i++) {
1280                 /* Search for mapped node by target ID */
1281                 match = 0;
1282                 spin_lock_irq(shost->host_lock);
1283                 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1284                         if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1285                             i == ndlp->nlp_sid &&
1286                             ndlp->rport) {
1287                                 match = 1;
1288                                 break;
1289                         }
1290                 }
1291                 spin_unlock_irq(shost->host_lock);
1292                 if (!match)
1293                         continue;
1294
1295                 ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1296                                           cmnd->device->lun,
1297                                           ndlp->rport->dd_data);
1298                 if (ret != SUCCESS) {
1299                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1300                                          "0700 Bus Reset on target %d failed\n",
1301                                          i);
1302                         err_count++;
1303                         break;
1304                 }
1305         }
1306
1307         if (ret != IOCB_TIMEDOUT)
1308                 lpfc_release_scsi_buf(phba, lpfc_cmd);
1309
1310         if (err_count == 0)
1311                 ret = SUCCESS;
1312         else
1313                 ret = FAILED;
1314
1315         /*
1316          * All outstanding txcmplq I/Os should have been aborted by
1317          * the targets.  Unfortunately, some targets do not abide by
1318          * this forcing the driver to double check.
1319          */
1320         cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1321         if (cnt)
1322                 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1323                                     0, 0, LPFC_CTX_HOST);
1324         loopcnt = 0;
1325         while(cnt) {
1326                 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1327
1328                 if (++loopcnt
1329                     > (2 * vport->cfg_devloss_tmo)/LPFC_RESET_WAIT)
1330                         break;
1331
1332                 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1333         }
1334
1335         if (cnt) {
1336                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1337                                  "0715 Bus Reset I/O flush failure: "
1338                                  "cnt x%x left x%x\n", cnt, i);
1339                 ret = FAILED;
1340         }
1341
1342         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1343                          "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
1344 out:
1345         return ret;
1346 }
1347
1348 static int
1349 lpfc_slave_alloc(struct scsi_device *sdev)
1350 {
1351         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1352         struct lpfc_hba   *phba = vport->phba;
1353         struct lpfc_scsi_buf *scsi_buf = NULL;
1354         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1355         uint32_t total = 0, i;
1356         uint32_t num_to_alloc = 0;
1357         unsigned long flags;
1358
1359         if (!rport || fc_remote_port_chkready(rport))
1360                 return -ENXIO;
1361
1362         sdev->hostdata = rport->dd_data;
1363
1364         /*
1365          * Populate the cmds_per_lun count scsi_bufs into this host's globally
1366          * available list of scsi buffers.  Don't allocate more than the
1367          * HBA limit conveyed to the midlayer via the host structure.  The
1368          * formula accounts for the lun_queue_depth + error handlers + 1
1369          * extra.  This list of scsi bufs exists for the lifetime of the driver.
1370          */
1371         total = phba->total_scsi_bufs;
1372         num_to_alloc = vport->cfg_lun_queue_depth + 2;
1373
1374         /* Allow some exchanges to be available always to complete discovery */
1375         if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1376                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1377                                  "0704 At limitation of %d preallocated "
1378                                  "command buffers\n", total);
1379                 return 0;
1380         /* Allow some exchanges to be available always to complete discovery */
1381         } else if (total + num_to_alloc >
1382                 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1383                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1384                                  "0705 Allocation request of %d "
1385                                  "command buffers will exceed max of %d.  "
1386                                  "Reducing allocation request to %d.\n",
1387                                  num_to_alloc, phba->cfg_hba_queue_depth,
1388                                  (phba->cfg_hba_queue_depth - total));
1389                 num_to_alloc = phba->cfg_hba_queue_depth - total;
1390         }
1391
1392         for (i = 0; i < num_to_alloc; i++) {
1393                 scsi_buf = lpfc_new_scsi_buf(vport);
1394                 if (!scsi_buf) {
1395                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1396                                          "0706 Failed to allocate "
1397                                          "command buffer\n");
1398                         break;
1399                 }
1400
1401                 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
1402                 phba->total_scsi_bufs++;
1403                 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
1404                 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
1405         }
1406         return 0;
1407 }
1408
1409 static int
1410 lpfc_slave_configure(struct scsi_device *sdev)
1411 {
1412         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1413         struct lpfc_hba   *phba = vport->phba;
1414         struct fc_rport   *rport = starget_to_rport(sdev->sdev_target);
1415
1416         if (sdev->tagged_supported)
1417                 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
1418         else
1419                 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
1420
1421         /*
1422          * Initialize the fc transport attributes for the target
1423          * containing this scsi device.  Also note that the driver's
1424          * target pointer is stored in the starget_data for the
1425          * driver's sysfs entry point functions.
1426          */
1427         rport->dev_loss_tmo = vport->cfg_devloss_tmo;
1428
1429         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1430                 lpfc_sli_poll_fcp_ring(phba);
1431                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1432                         lpfc_poll_rearm_timer(phba);
1433         }
1434
1435         return 0;
1436 }
1437
1438 static void
1439 lpfc_slave_destroy(struct scsi_device *sdev)
1440 {
1441         sdev->hostdata = NULL;
1442         return;
1443 }
1444
1445
1446 struct scsi_host_template lpfc_template = {
1447         .module                 = THIS_MODULE,
1448         .name                   = LPFC_DRIVER_NAME,
1449         .info                   = lpfc_info,
1450         .queuecommand           = lpfc_queuecommand,
1451         .eh_abort_handler       = lpfc_abort_handler,
1452         .eh_device_reset_handler= lpfc_device_reset_handler,
1453         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
1454         .slave_alloc            = lpfc_slave_alloc,
1455         .slave_configure        = lpfc_slave_configure,
1456         .slave_destroy          = lpfc_slave_destroy,
1457         .scan_finished          = lpfc_scan_finished,
1458         .this_id                = -1,
1459         .sg_tablesize           = LPFC_SG_SEG_CNT,
1460         .use_sg_chaining        = ENABLE_SG_CHAINING,
1461         .cmd_per_lun            = LPFC_CMD_PER_LUN,
1462         .use_clustering         = ENABLE_CLUSTERING,
1463         .shost_attrs            = lpfc_hba_attrs,
1464         .max_sectors            = 0xFFFF,
1465 };
1466
1467 struct scsi_host_template lpfc_vport_template = {
1468         .module                 = THIS_MODULE,
1469         .name                   = LPFC_DRIVER_NAME,
1470         .info                   = lpfc_info,
1471         .queuecommand           = lpfc_queuecommand,
1472         .eh_abort_handler       = lpfc_abort_handler,
1473         .eh_device_reset_handler= lpfc_device_reset_handler,
1474         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
1475         .slave_alloc            = lpfc_slave_alloc,
1476         .slave_configure        = lpfc_slave_configure,
1477         .slave_destroy          = lpfc_slave_destroy,
1478         .scan_finished          = lpfc_scan_finished,
1479         .this_id                = -1,
1480         .sg_tablesize           = LPFC_SG_SEG_CNT,
1481         .cmd_per_lun            = LPFC_CMD_PER_LUN,
1482         .use_clustering         = ENABLE_CLUSTERING,
1483         .use_sg_chaining        = ENABLE_SG_CHAINING,
1484         .shost_attrs            = lpfc_vport_attrs,
1485         .max_sectors            = 0xFFFF,
1486 };