]> err.no Git - linux-2.6/blob - drivers/scsi/lpfc/lpfc_sli.c
[SCSI] lpfc: NPIV: add NPIV support on top of SLI-3
[linux-2.6] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
32
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_compat.h"
41
42 /*
43  * Define macro to log: Mailbox command x%x cannot issue Data
44  * This allows multiple uses of lpfc_msgBlk0311
45  * w/o perturbing log msg utility.
46  */
47 #define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
48                         lpfc_printf_log(phba, \
49                                 KERN_INFO, \
50                                 LOG_MBOX | LOG_SLI, \
51                                 "%d (%d):0311 Mailbox command x%x cannot " \
52                                 "issue Data: x%x x%x x%x\n", \
53                                 phba->brd_no, \
54                                 pmbox->vport ? pmbox->vport->vpi : 0, \
55                                 pmbox->mb.mbxCommand,           \
56                                 phba->pport->port_state,        \
57                                 psli->sli_flag, \
58                                 flag)
59
60
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
63         LPFC_UNKNOWN_IOCB,
64         LPFC_UNSOL_IOCB,
65         LPFC_SOL_IOCB,
66         LPFC_ABORT_IOCB
67 } lpfc_iocb_type;
68
69                 /* SLI-2/SLI-3 provide different sized iocbs.  Given a pointer
70                  * to the start of the ring, and the slot number of the
71                  * desired iocb entry, calc a pointer to that entry.
72                  */
73 static inline IOCB_t *
74 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
75 {
76         return (IOCB_t *) (((char *) pring->cmdringaddr) +
77                            pring->cmdidx * phba->iocb_cmd_size);
78 }
79
80 static inline IOCB_t *
81 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
82 {
83         return (IOCB_t *) (((char *) pring->rspringaddr) +
84                            pring->rspidx * phba->iocb_rsp_size);
85 }
86
87 static struct lpfc_iocbq *
88 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
89 {
90         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
91         struct lpfc_iocbq * iocbq = NULL;
92
93         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
94         return iocbq;
95 }
96
97 struct lpfc_iocbq *
98 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
99 {
100         struct lpfc_iocbq * iocbq = NULL;
101         unsigned long iflags;
102
103         spin_lock_irqsave(&phba->hbalock, iflags);
104         iocbq = __lpfc_sli_get_iocbq(phba);
105         spin_unlock_irqrestore(&phba->hbalock, iflags);
106         return iocbq;
107 }
108
109 void
110 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
111 {
112         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
113
114         /*
115          * Clean all volatile data fields, preserve iotag and node struct.
116          */
117         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
118         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
119 }
120
121 void
122 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
123 {
124         unsigned long iflags;
125
126         /*
127          * Clean all volatile data fields, preserve iotag and node struct.
128          */
129         spin_lock_irqsave(&phba->hbalock, iflags);
130         __lpfc_sli_release_iocbq(phba, iocbq);
131         spin_unlock_irqrestore(&phba->hbalock, iflags);
132 }
133
134 /*
135  * Translate the iocb command to an iocb command type used to decide the final
136  * disposition of each completed IOCB.
137  */
138 static lpfc_iocb_type
139 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
140 {
141         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
142
143         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
144                 return 0;
145
146         switch (iocb_cmnd) {
147         case CMD_XMIT_SEQUENCE_CR:
148         case CMD_XMIT_SEQUENCE_CX:
149         case CMD_XMIT_BCAST_CN:
150         case CMD_XMIT_BCAST_CX:
151         case CMD_ELS_REQUEST_CR:
152         case CMD_ELS_REQUEST_CX:
153         case CMD_CREATE_XRI_CR:
154         case CMD_CREATE_XRI_CX:
155         case CMD_GET_RPI_CN:
156         case CMD_XMIT_ELS_RSP_CX:
157         case CMD_GET_RPI_CR:
158         case CMD_FCP_IWRITE_CR:
159         case CMD_FCP_IWRITE_CX:
160         case CMD_FCP_IREAD_CR:
161         case CMD_FCP_IREAD_CX:
162         case CMD_FCP_ICMND_CR:
163         case CMD_FCP_ICMND_CX:
164         case CMD_FCP_TSEND_CX:
165         case CMD_FCP_TRSP_CX:
166         case CMD_FCP_TRECEIVE_CX:
167         case CMD_FCP_AUTO_TRSP_CX:
168         case CMD_ADAPTER_MSG:
169         case CMD_ADAPTER_DUMP:
170         case CMD_XMIT_SEQUENCE64_CR:
171         case CMD_XMIT_SEQUENCE64_CX:
172         case CMD_XMIT_BCAST64_CN:
173         case CMD_XMIT_BCAST64_CX:
174         case CMD_ELS_REQUEST64_CR:
175         case CMD_ELS_REQUEST64_CX:
176         case CMD_FCP_IWRITE64_CR:
177         case CMD_FCP_IWRITE64_CX:
178         case CMD_FCP_IREAD64_CR:
179         case CMD_FCP_IREAD64_CX:
180         case CMD_FCP_ICMND64_CR:
181         case CMD_FCP_ICMND64_CX:
182         case CMD_FCP_TSEND64_CX:
183         case CMD_FCP_TRSP64_CX:
184         case CMD_FCP_TRECEIVE64_CX:
185         case CMD_GEN_REQUEST64_CR:
186         case CMD_GEN_REQUEST64_CX:
187         case CMD_XMIT_ELS_RSP64_CX:
188                 type = LPFC_SOL_IOCB;
189                 break;
190         case CMD_ABORT_XRI_CN:
191         case CMD_ABORT_XRI_CX:
192         case CMD_CLOSE_XRI_CN:
193         case CMD_CLOSE_XRI_CX:
194         case CMD_XRI_ABORTED_CX:
195         case CMD_ABORT_MXRI64_CN:
196                 type = LPFC_ABORT_IOCB;
197                 break;
198         case CMD_RCV_SEQUENCE_CX:
199         case CMD_RCV_ELS_REQ_CX:
200         case CMD_RCV_SEQUENCE64_CX:
201         case CMD_RCV_ELS_REQ64_CX:
202         case CMD_IOCB_RCV_SEQ64_CX:
203         case CMD_IOCB_RCV_ELS64_CX:
204         case CMD_IOCB_RCV_CONT64_CX:
205                 type = LPFC_UNSOL_IOCB;
206                 break;
207         default:
208                 type = LPFC_UNKNOWN_IOCB;
209                 break;
210         }
211
212         return type;
213 }
214
215 static int
216 lpfc_sli_ring_map(struct lpfc_hba *phba)
217 {
218         struct lpfc_sli *psli = &phba->sli;
219         LPFC_MBOXQ_t *pmb;
220         MAILBOX_t *pmbox;
221         int i, rc, ret = 0;
222
223         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
224         if (!pmb)
225                 return -ENOMEM;
226         pmbox = &pmb->mb;
227         phba->link_state = LPFC_INIT_MBX_CMDS;
228         for (i = 0; i < psli->num_rings; i++) {
229                 lpfc_config_ring(phba, i, pmb);
230                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
231                 if (rc != MBX_SUCCESS) {
232                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
233                                         "%d:0446 Adapter failed to init (%d), "
234                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
235                                         "ring %d\n",
236                                         phba->brd_no, rc,
237                                         pmbox->mbxCommand,
238                                         pmbox->mbxStatus,
239                                         i);
240                         phba->link_state = LPFC_HBA_ERROR;
241                         ret = -ENXIO;
242                         break;
243                 }
244         }
245         mempool_free(pmb, phba->mbox_mem_pool);
246         return ret;
247 }
248
249 static int
250 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
251                         struct lpfc_iocbq *piocb)
252 {
253         list_add_tail(&piocb->list, &pring->txcmplq);
254         pring->txcmplq_cnt++;
255         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
256            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
257            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
258                 if (!piocb->vport)
259                         BUG();
260                 else
261                         mod_timer(&piocb->vport->els_tmofunc,
262                                   jiffies + HZ * (phba->fc_ratov << 1));
263         }
264
265
266         return 0;
267 }
268
269 static struct lpfc_iocbq *
270 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
271 {
272         struct list_head *dlp;
273         struct lpfc_iocbq *cmd_iocb;
274
275         dlp = &pring->txq;
276         cmd_iocb = NULL;
277         list_remove_head((&pring->txq), cmd_iocb,
278                          struct lpfc_iocbq,
279                          list);
280         if (cmd_iocb) {
281                 /* If the first ptr is not equal to the list header,
282                  * deque the IOCBQ_t and return it.
283                  */
284                 pring->txq_cnt--;
285         }
286         return cmd_iocb;
287 }
288
289 static IOCB_t *
290 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
291 {
292         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
293                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
294                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
295         uint32_t  max_cmd_idx = pring->numCiocb;
296
297         if ((pring->next_cmdidx == pring->cmdidx) &&
298            (++pring->next_cmdidx >= max_cmd_idx))
299                 pring->next_cmdidx = 0;
300
301         if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
302
303                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
304
305                 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
306                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
307                                         "%d:0315 Ring %d issue: portCmdGet %d "
308                                         "is bigger then cmd ring %d\n",
309                                         phba->brd_no, pring->ringno,
310                                         pring->local_getidx, max_cmd_idx);
311
312                         phba->link_state = LPFC_HBA_ERROR;
313                         /*
314                          * All error attention handlers are posted to
315                          * worker thread
316                          */
317                         phba->work_ha |= HA_ERATT;
318                         phba->work_hs = HS_FFER3;
319
320                         /* hbalock should already be held */
321                         if (phba->work_wait)
322                                 lpfc_worker_wake_up(phba);
323
324                         return NULL;
325                 }
326
327                 if (pring->local_getidx == pring->next_cmdidx)
328                         return NULL;
329         }
330
331         return lpfc_cmd_iocb(phba, pring);
332 }
333
334 uint16_t
335 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
336 {
337         struct lpfc_iocbq **new_arr;
338         struct lpfc_iocbq **old_arr;
339         size_t new_len;
340         struct lpfc_sli *psli = &phba->sli;
341         uint16_t iotag;
342
343         spin_lock_irq(&phba->hbalock);
344         iotag = psli->last_iotag;
345         if(++iotag < psli->iocbq_lookup_len) {
346                 psli->last_iotag = iotag;
347                 psli->iocbq_lookup[iotag] = iocbq;
348                 spin_unlock_irq(&phba->hbalock);
349                 iocbq->iotag = iotag;
350                 return iotag;
351         } else if (psli->iocbq_lookup_len < (0xffff
352                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
353                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
354                 spin_unlock_irq(&phba->hbalock);
355                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
356                                   GFP_KERNEL);
357                 if (new_arr) {
358                         spin_lock_irq(&phba->hbalock);
359                         old_arr = psli->iocbq_lookup;
360                         if (new_len <= psli->iocbq_lookup_len) {
361                                 /* highly unprobable case */
362                                 kfree(new_arr);
363                                 iotag = psli->last_iotag;
364                                 if(++iotag < psli->iocbq_lookup_len) {
365                                         psli->last_iotag = iotag;
366                                         psli->iocbq_lookup[iotag] = iocbq;
367                                         spin_unlock_irq(&phba->hbalock);
368                                         iocbq->iotag = iotag;
369                                         return iotag;
370                                 }
371                                 spin_unlock_irq(&phba->hbalock);
372                                 return 0;
373                         }
374                         if (psli->iocbq_lookup)
375                                 memcpy(new_arr, old_arr,
376                                        ((psli->last_iotag  + 1) *
377                                         sizeof (struct lpfc_iocbq *)));
378                         psli->iocbq_lookup = new_arr;
379                         psli->iocbq_lookup_len = new_len;
380                         psli->last_iotag = iotag;
381                         psli->iocbq_lookup[iotag] = iocbq;
382                         spin_unlock_irq(&phba->hbalock);
383                         iocbq->iotag = iotag;
384                         kfree(old_arr);
385                         return iotag;
386                 }
387         } else
388                 spin_unlock_irq(&phba->hbalock);
389
390         lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
391                         "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
392                         phba->brd_no, psli->last_iotag);
393
394         return 0;
395 }
396
397 static void
398 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
399                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
400 {
401         /*
402          * Set up an iotag
403          */
404         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
405
406         /*
407          * Issue iocb command to adapter
408          */
409         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
410         wmb();
411         pring->stats.iocb_cmd++;
412
413         /*
414          * If there is no completion routine to call, we can release the
415          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
416          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
417          */
418         if (nextiocb->iocb_cmpl)
419                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
420         else
421                 __lpfc_sli_release_iocbq(phba, nextiocb);
422
423         /*
424          * Let the HBA know what IOCB slot will be the next one the
425          * driver will put a command into.
426          */
427         pring->cmdidx = pring->next_cmdidx;
428         writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
429 }
430
431 static void
432 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
433 {
434         int ringno = pring->ringno;
435
436         pring->flag |= LPFC_CALL_RING_AVAILABLE;
437
438         wmb();
439
440         /*
441          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
442          * The HBA will tell us when an IOCB entry is available.
443          */
444         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
445         readl(phba->CAregaddr); /* flush */
446
447         pring->stats.iocb_cmd_full++;
448 }
449
450 static void
451 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
452 {
453         int ringno = pring->ringno;
454
455         /*
456          * Tell the HBA that there is work to do in this ring.
457          */
458         wmb();
459         writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
460         readl(phba->CAregaddr); /* flush */
461 }
462
463 static void
464 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
465 {
466         IOCB_t *iocb;
467         struct lpfc_iocbq *nextiocb;
468
469         /*
470          * Check to see if:
471          *  (a) there is anything on the txq to send
472          *  (b) link is up
473          *  (c) link attention events can be processed (fcp ring only)
474          *  (d) IOCB processing is not blocked by the outstanding mbox command.
475          */
476         if (pring->txq_cnt &&
477             lpfc_is_link_up(phba) &&
478             (pring->ringno != phba->sli.fcp_ring ||
479              phba->sli.sli_flag & LPFC_PROCESS_LA) &&
480             !(pring->flag & LPFC_STOP_IOCB_MBX)) {
481
482                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
483                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
484                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
485
486                 if (iocb)
487                         lpfc_sli_update_ring(phba, pring);
488                 else
489                         lpfc_sli_update_full_ring(phba, pring);
490         }
491
492         return;
493 }
494
495 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
496 static void
497 lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
498 {
499         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
500                 &phba->slim2p->mbx.us.s3_pgp.port[ringno] :
501                 &phba->slim2p->mbx.us.s2.port[ringno];
502         unsigned long iflags;
503
504         /* If the ring is active, flag it */
505         spin_lock_irqsave(&phba->hbalock, iflags);
506         if (phba->sli.ring[ringno].cmdringaddr) {
507                 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
508                         phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
509                         /*
510                          * Force update of the local copy of cmdGetInx
511                          */
512                         phba->sli.ring[ringno].local_getidx
513                                 = le32_to_cpu(pgp->cmdGetInx);
514                         lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
515                 }
516         }
517         spin_unlock_irqrestore(&phba->hbalock, iflags);
518 }
519
520 struct lpfc_hbq_entry *
521 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
522 {
523         struct hbq_s *hbqp = &phba->hbqs[hbqno];
524
525         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
526             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
527                 hbqp->next_hbqPutIdx = 0;
528
529         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
530                 uint32_t raw_index = phba->hbq_get[hbqno];
531                 uint32_t getidx = le32_to_cpu(raw_index);
532
533                 hbqp->local_hbqGetIdx = getidx;
534
535                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
536                         lpfc_printf_log(phba, KERN_ERR,
537                                         LOG_SLI | LOG_VPORT,
538                                         "%d:1802 HBQ %d: local_hbqGetIdx "
539                                         "%u is > than hbqp->entry_count %u\n",
540                                         phba->brd_no, hbqno,
541                                         hbqp->local_hbqGetIdx,
542                                         hbqp->entry_count);
543
544                         phba->link_state = LPFC_HBA_ERROR;
545                         return NULL;
546                 }
547
548                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
549                         return NULL;
550         }
551
552         return (struct lpfc_hbq_entry *) phba->hbqslimp.virt + hbqp->hbqPutIdx;
553 }
554
555 void
556 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
557 {
558         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
559         struct hbq_dmabuf *hbq_buf;
560
561         /* Return all memory used by all HBQs */
562         list_for_each_entry_safe(dmabuf, next_dmabuf,
563                                  &phba->hbq_buffer_list, list) {
564                 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
565                 list_del(&hbq_buf->dbuf.list);
566                 lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys);
567                 kfree(hbq_buf);
568         }
569 }
570
571 static void
572 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
573                          struct hbq_dmabuf *hbq_buf)
574 {
575         struct lpfc_hbq_entry *hbqe;
576         dma_addr_t physaddr = hbq_buf->dbuf.phys;
577
578         /* Get next HBQ entry slot to use */
579         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
580         if (hbqe) {
581                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
582
583                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
584                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
585                 hbqe->bde.tus.f.bdeSize = FCELSSIZE;
586                 hbqe->bde.tus.f.bdeFlags = 0;
587                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
588                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
589                                 /* Sync SLIM */
590                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
591                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
592                                 /* flush */
593                 readl(phba->hbq_put + hbqno);
594                 list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list);
595         }
596 }
597
598 static struct lpfc_hbq_init lpfc_els_hbq = {
599         .rn = 1,
600         .entry_count = 200,
601         .mask_count = 0,
602         .profile = 0,
603         .ring_mask = 1 << LPFC_ELS_RING,
604         .buffer_count = 0,
605         .init_count = 20,
606         .add_count = 5,
607 };
608
609 static struct lpfc_hbq_init *lpfc_hbq_defs[] = {
610         &lpfc_els_hbq,
611 };
612
613 int
614 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
615 {
616         uint32_t i, start, end;
617         struct hbq_dmabuf *hbq_buffer;
618
619         start = lpfc_hbq_defs[hbqno]->buffer_count;
620         end = count + lpfc_hbq_defs[hbqno]->buffer_count;
621         if (end > lpfc_hbq_defs[hbqno]->entry_count) {
622                 end = lpfc_hbq_defs[hbqno]->entry_count;
623         }
624
625         /* Populate HBQ entries */
626         for (i = start; i < end; i++) {
627                 hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf),
628                                      GFP_KERNEL);
629                 if (!hbq_buffer)
630                         return 1;
631                 hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI,
632                                                         &hbq_buffer->dbuf.phys);
633                 if (hbq_buffer->dbuf.virt == NULL)
634                         return 1;
635                 hbq_buffer->tag = (i | (hbqno << 16));
636                 lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer);
637                 lpfc_hbq_defs[hbqno]->buffer_count++;
638         }
639         return 0;
640 }
641
642 int
643 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
644 {
645         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
646                                          lpfc_hbq_defs[qno]->add_count));
647 }
648
649 int
650 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
651 {
652         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
653                                          lpfc_hbq_defs[qno]->init_count));
654 }
655
656 struct hbq_dmabuf *
657 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
658 {
659         struct lpfc_dmabuf *d_buf;
660         struct hbq_dmabuf *hbq_buf;
661
662         list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) {
663                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
664                 if ((hbq_buf->tag & 0xffff) == tag) {
665                         return hbq_buf;
666                 }
667         }
668         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
669                         "%d:1803 Bad hbq tag. Data: x%x x%x\n",
670                         phba->brd_no, tag,
671                         lpfc_hbq_defs[tag >> 16]->buffer_count);
672         return NULL;
673 }
674
675 void
676 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *sp)
677 {
678         uint32_t hbqno;
679
680         if (sp) {
681                 hbqno = sp->tag >> 16;
682                 lpfc_sli_hbq_to_firmware(phba, hbqno, sp);
683         }
684 }
685
686 static int
687 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
688 {
689         uint8_t ret;
690
691         switch (mbxCommand) {
692         case MBX_LOAD_SM:
693         case MBX_READ_NV:
694         case MBX_WRITE_NV:
695         case MBX_RUN_BIU_DIAG:
696         case MBX_INIT_LINK:
697         case MBX_DOWN_LINK:
698         case MBX_CONFIG_LINK:
699         case MBX_CONFIG_RING:
700         case MBX_RESET_RING:
701         case MBX_READ_CONFIG:
702         case MBX_READ_RCONFIG:
703         case MBX_READ_SPARM:
704         case MBX_READ_STATUS:
705         case MBX_READ_RPI:
706         case MBX_READ_XRI:
707         case MBX_READ_REV:
708         case MBX_READ_LNK_STAT:
709         case MBX_REG_LOGIN:
710         case MBX_UNREG_LOGIN:
711         case MBX_READ_LA:
712         case MBX_CLEAR_LA:
713         case MBX_DUMP_MEMORY:
714         case MBX_DUMP_CONTEXT:
715         case MBX_RUN_DIAGS:
716         case MBX_RESTART:
717         case MBX_UPDATE_CFG:
718         case MBX_DOWN_LOAD:
719         case MBX_DEL_LD_ENTRY:
720         case MBX_RUN_PROGRAM:
721         case MBX_SET_MASK:
722         case MBX_SET_SLIM:
723         case MBX_UNREG_D_ID:
724         case MBX_KILL_BOARD:
725         case MBX_CONFIG_FARP:
726         case MBX_BEACON:
727         case MBX_LOAD_AREA:
728         case MBX_RUN_BIU_DIAG64:
729         case MBX_CONFIG_PORT:
730         case MBX_READ_SPARM64:
731         case MBX_READ_RPI64:
732         case MBX_REG_LOGIN64:
733         case MBX_READ_LA64:
734         case MBX_FLASH_WR_ULA:
735         case MBX_SET_DEBUG:
736         case MBX_LOAD_EXP_ROM:
737         case MBX_REG_VPI:
738         case MBX_UNREG_VPI:
739                 ret = mbxCommand;
740                 break;
741         default:
742                 ret = MBX_SHUTDOWN;
743                 break;
744         }
745         return ret;
746 }
747 static void
748 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
749 {
750         wait_queue_head_t *pdone_q;
751
752         /*
753          * If pdone_q is empty, the driver thread gave up waiting and
754          * continued running.
755          */
756         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
757         pdone_q = (wait_queue_head_t *) pmboxq->context1;
758         if (pdone_q)
759                 wake_up_interruptible(pdone_q);
760         return;
761 }
762
763 void
764 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
765 {
766         struct lpfc_dmabuf *mp;
767         uint16_t rpi;
768         int rc;
769
770         mp = (struct lpfc_dmabuf *) (pmb->context1);
771
772         if (mp) {
773                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
774                 kfree(mp);
775         }
776
777         /*
778          * If a REG_LOGIN succeeded  after node is destroyed or node
779          * is in re-discovery driver need to cleanup the RPI.
780          */
781         if (!(phba->pport->load_flag & FC_UNLOADING) &&
782             pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
783             !pmb->mb.mbxStatus) {
784
785                 rpi = pmb->mb.un.varWords[0];
786                 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
787                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
788                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
789                 if (rc != MBX_NOT_FINISHED)
790                         return;
791         }
792
793         mempool_free(pmb, phba->mbox_mem_pool);
794         return;
795 }
796
797 int
798 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
799 {
800         MAILBOX_t *pmbox;
801         LPFC_MBOXQ_t *pmb;
802         int rc;
803         LIST_HEAD(cmplq);
804
805         phba->sli.slistat.mbox_event++;
806
807         /* Get all completed mailboxe buffers into the cmplq */
808         spin_lock_irq(&phba->hbalock);
809         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
810         spin_unlock_irq(&phba->hbalock);
811
812         /* Get a Mailbox buffer to setup mailbox commands for callback */
813         do {
814                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
815                 if (pmb == NULL)
816                         break;
817
818                 pmbox = &pmb->mb;
819
820                 /*
821                  * It is a fatal error if unknown mbox command completion.
822                  */
823                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
824                     MBX_SHUTDOWN) {
825
826                         /* Unknow mailbox command compl */
827                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
828                                         "%d (%d):0323 Unknown Mailbox command "
829                                         "%x Cmpl\n",
830                                         phba->brd_no,
831                                         pmb->vport ? pmb->vport->vpi : 0,
832                                         pmbox->mbxCommand);
833                         phba->link_state = LPFC_HBA_ERROR;
834                         phba->work_hs = HS_FFER3;
835                         lpfc_handle_eratt(phba);
836                         continue;
837                 }
838
839                 if (pmbox->mbxStatus) {
840                         phba->sli.slistat.mbox_stat_err++;
841                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
842                                 /* Mbox cmd cmpl error - RETRYing */
843                                 lpfc_printf_log(phba, KERN_INFO,
844                                                 LOG_MBOX | LOG_SLI,
845                                                 "%d (%d):0305 Mbox cmd cmpl "
846                                                 "error - RETRYing Data: x%x "
847                                                 "x%x x%x x%x\n",
848                                                 phba->brd_no,
849                                                 pmb->vport ? pmb->vport->vpi :0,
850                                                 pmbox->mbxCommand,
851                                                 pmbox->mbxStatus,
852                                                 pmbox->un.varWords[0],
853                                                 pmb->vport->port_state);
854                                 pmbox->mbxStatus = 0;
855                                 pmbox->mbxOwner = OWN_HOST;
856                                 spin_lock_irq(&phba->hbalock);
857                                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
858                                 spin_unlock_irq(&phba->hbalock);
859                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
860                                 if (rc == MBX_SUCCESS)
861                                         continue;
862                         }
863                 }
864
865                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
866                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
867                                 "%d (%d):0307 Mailbox cmd x%x Cmpl x%p "
868                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
869                                 phba->brd_no,
870                                 pmb->vport ? pmb->vport->vpi : 0,
871                                 pmbox->mbxCommand,
872                                 pmb->mbox_cmpl,
873                                 *((uint32_t *) pmbox),
874                                 pmbox->un.varWords[0],
875                                 pmbox->un.varWords[1],
876                                 pmbox->un.varWords[2],
877                                 pmbox->un.varWords[3],
878                                 pmbox->un.varWords[4],
879                                 pmbox->un.varWords[5],
880                                 pmbox->un.varWords[6],
881                                 pmbox->un.varWords[7]);
882
883                 if (pmb->mbox_cmpl)
884                         pmb->mbox_cmpl(phba,pmb);
885         } while (1);
886         return 0;
887 }
888
889 static struct lpfc_dmabuf *
890 lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
891 {
892         struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
893
894         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
895         if (hbq_entry == NULL)
896                 return NULL;
897         list_del(&hbq_entry->dbuf.list);
898         new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC);
899         if (new_hbq_entry == NULL)
900                 return &hbq_entry->dbuf;
901         new_hbq_entry->dbuf = hbq_entry->dbuf;
902         new_hbq_entry->tag = -1;
903         hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys);
904         if (hbq_entry->dbuf.virt == NULL) {
905                 kfree(new_hbq_entry);
906                 return &hbq_entry->dbuf;
907         }
908         lpfc_sli_free_hbq(phba, hbq_entry);
909         return &new_hbq_entry->dbuf;
910 }
911
912 static int
913 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
914                             struct lpfc_iocbq *saveq)
915 {
916         IOCB_t           * irsp;
917         WORD5            * w5p;
918         uint32_t           Rctl, Type;
919         uint32_t           match, i;
920
921         match = 0;
922         irsp = &(saveq->iocb);
923         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
924             || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
925             || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
926             || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
927                 Rctl = FC_ELS_REQ;
928                 Type = FC_ELS_DATA;
929         } else {
930                 w5p =
931                     (WORD5 *) & (saveq->iocb.un.
932                                  ulpWord[5]);
933                 Rctl = w5p->hcsw.Rctl;
934                 Type = w5p->hcsw.Type;
935
936                 /* Firmware Workaround */
937                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
938                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
939                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
940                         Rctl = FC_ELS_REQ;
941                         Type = FC_ELS_DATA;
942                         w5p->hcsw.Rctl = Rctl;
943                         w5p->hcsw.Type = Type;
944                 }
945         }
946
947         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
948                 if (irsp->ulpBdeCount != 0)
949                         saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
950                                                 irsp->un.ulpWord[3]);
951                 if (irsp->ulpBdeCount == 2)
952                         saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
953                                                 irsp->un.ulpWord[15]);
954         }
955
956         /* unSolicited Responses */
957         if (pring->prt[0].profile) {
958                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
959                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
960                                                                         saveq);
961                 match = 1;
962         } else {
963                 /* We must search, based on rctl / type
964                    for the right routine */
965                 for (i = 0; i < pring->num_mask;
966                      i++) {
967                         if ((pring->prt[i].rctl ==
968                              Rctl)
969                             && (pring->prt[i].
970                                 type == Type)) {
971                                 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
972                                         (pring->prt[i].lpfc_sli_rcv_unsol_event)
973                                                         (phba, pring, saveq);
974                                 match = 1;
975                                 break;
976                         }
977                 }
978         }
979         if (match == 0) {
980                 /* Unexpected Rctl / Type received */
981                 /* Ring <ringno> handler: unexpected
982                    Rctl <Rctl> Type <Type> received */
983                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
984                                 "%d:0313 Ring %d handler: unexpected Rctl x%x "
985                                 "Type x%x received\n",
986                                 phba->brd_no,
987                                 pring->ringno,
988                                 Rctl,
989                                 Type);
990         }
991         return 1;
992 }
993
994 static struct lpfc_iocbq *
995 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
996                       struct lpfc_sli_ring *pring,
997                       struct lpfc_iocbq *prspiocb)
998 {
999         struct lpfc_iocbq *cmd_iocb = NULL;
1000         uint16_t iotag;
1001
1002         iotag = prspiocb->iocb.ulpIoTag;
1003
1004         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
1005                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
1006                 list_del_init(&cmd_iocb->list);
1007                 pring->txcmplq_cnt--;
1008                 return cmd_iocb;
1009         }
1010
1011         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1012                         "%d:0317 iotag x%x is out off "
1013                         "range: max iotag x%x wd0 x%x\n",
1014                         phba->brd_no, iotag,
1015                         phba->sli.last_iotag,
1016                         *(((uint32_t *) &prspiocb->iocb) + 7));
1017         return NULL;
1018 }
1019
1020 static int
1021 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1022                           struct lpfc_iocbq *saveq)
1023 {
1024         struct lpfc_iocbq *cmdiocbp;
1025         int rc = 1;
1026         unsigned long iflag;
1027
1028         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
1029         spin_lock_irqsave(&phba->hbalock, iflag);
1030         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
1031         spin_unlock_irqrestore(&phba->hbalock, iflag);
1032
1033         if (cmdiocbp) {
1034                 if (cmdiocbp->iocb_cmpl) {
1035                         /*
1036                          * Post all ELS completions to the worker thread.
1037                          * All other are passed to the completion callback.
1038                          */
1039                         if (pring->ringno == LPFC_ELS_RING) {
1040                                 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
1041                                         cmdiocbp->iocb_flag &=
1042                                                 ~LPFC_DRIVER_ABORTED;
1043                                         saveq->iocb.ulpStatus =
1044                                                 IOSTAT_LOCAL_REJECT;
1045                                         saveq->iocb.un.ulpWord[4] =
1046                                                 IOERR_SLI_ABORTED;
1047                                 }
1048                         }
1049                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
1050                 } else
1051                         lpfc_sli_release_iocbq(phba, cmdiocbp);
1052         } else {
1053                 /*
1054                  * Unknown initiating command based on the response iotag.
1055                  * This could be the case on the ELS ring because of
1056                  * lpfc_els_abort().
1057                  */
1058                 if (pring->ringno != LPFC_ELS_RING) {
1059                         /*
1060                          * Ring <ringno> handler: unexpected completion IoTag
1061                          * <IoTag>
1062                          */
1063                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1064                                         "%d (%d):0322 Ring %d handler: "
1065                                         "unexpected completion IoTag x%x "
1066                                         "Data: x%x x%x x%x x%x\n",
1067                                         phba->brd_no,
1068                                         cmdiocbp->vport->vpi,
1069                                         pring->ringno,
1070                                         saveq->iocb.ulpIoTag,
1071                                         saveq->iocb.ulpStatus,
1072                                         saveq->iocb.un.ulpWord[4],
1073                                         saveq->iocb.ulpCommand,
1074                                         saveq->iocb.ulpContext);
1075                 }
1076         }
1077
1078         return rc;
1079 }
1080
1081 static void
1082 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1083 {
1084         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1085                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1086                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1087         /*
1088          * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1089          * rsp ring <portRspMax>
1090          */
1091         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1092                         "%d:0312 Ring %d handler: portRspPut %d "
1093                         "is bigger then rsp ring %d\n",
1094                         phba->brd_no, pring->ringno,
1095                         le32_to_cpu(pgp->rspPutInx),
1096                         pring->numRiocb);
1097
1098         phba->link_state = LPFC_HBA_ERROR;
1099
1100         /*
1101          * All error attention handlers are posted to
1102          * worker thread
1103          */
1104         phba->work_ha |= HA_ERATT;
1105         phba->work_hs = HS_FFER3;
1106
1107         /* hbalock should already be held */
1108         if (phba->work_wait)
1109                 lpfc_worker_wake_up(phba);
1110
1111         return;
1112 }
1113
1114 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1115 {
1116         struct lpfc_sli      *psli  = &phba->sli;
1117         struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1118         IOCB_t *irsp = NULL;
1119         IOCB_t *entry = NULL;
1120         struct lpfc_iocbq *cmdiocbq = NULL;
1121         struct lpfc_iocbq rspiocbq;
1122         struct lpfc_pgp *pgp;
1123         uint32_t status;
1124         uint32_t portRspPut, portRspMax;
1125         int type;
1126         uint32_t rsp_cmpl = 0;
1127         uint32_t ha_copy;
1128         unsigned long iflags;
1129
1130         pring->stats.iocb_event++;
1131
1132         pgp = (phba->sli_rev == 3) ?
1133                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1134                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1135
1136
1137         /*
1138          * The next available response entry should never exceed the maximum
1139          * entries.  If it does, treat it as an adapter hardware error.
1140          */
1141         portRspMax = pring->numRiocb;
1142         portRspPut = le32_to_cpu(pgp->rspPutInx);
1143         if (unlikely(portRspPut >= portRspMax)) {
1144                 lpfc_sli_rsp_pointers_error(phba, pring);
1145                 return;
1146         }
1147
1148         rmb();
1149         while (pring->rspidx != portRspPut) {
1150                 entry = lpfc_resp_iocb(phba, pring);
1151                 if (++pring->rspidx >= portRspMax)
1152                         pring->rspidx = 0;
1153
1154                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1155                                       (uint32_t *) &rspiocbq.iocb,
1156                                       phba->iocb_rsp_size);
1157                 irsp = &rspiocbq.iocb;
1158                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1159                 pring->stats.iocb_rsp++;
1160                 rsp_cmpl++;
1161
1162                 if (unlikely(irsp->ulpStatus)) {
1163                         /* Rsp ring <ringno> error: IOCB */
1164                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1165                                         "%d:0326 Rsp Ring %d error: IOCB Data: "
1166                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1167                                         phba->brd_no, pring->ringno,
1168                                         irsp->un.ulpWord[0],
1169                                         irsp->un.ulpWord[1],
1170                                         irsp->un.ulpWord[2],
1171                                         irsp->un.ulpWord[3],
1172                                         irsp->un.ulpWord[4],
1173                                         irsp->un.ulpWord[5],
1174                                         *(((uint32_t *) irsp) + 6),
1175                                         *(((uint32_t *) irsp) + 7));
1176                 }
1177
1178                 switch (type) {
1179                 case LPFC_ABORT_IOCB:
1180                 case LPFC_SOL_IOCB:
1181                         /*
1182                          * Idle exchange closed via ABTS from port.  No iocb
1183                          * resources need to be recovered.
1184                          */
1185                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1186                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1187                                                 "%d:0314 IOCB cmd 0x%x"
1188                                                 " processed. Skipping"
1189                                                 " completion", phba->brd_no,
1190                                                 irsp->ulpCommand);
1191                                 break;
1192                         }
1193
1194                         spin_lock_irqsave(&phba->hbalock, iflags);
1195                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1196                                                          &rspiocbq);
1197                         spin_unlock_irqrestore(&phba->hbalock, iflags);
1198                         if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1199                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1200                                                       &rspiocbq);
1201                         }
1202                         break;
1203                 default:
1204                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1205                                 char adaptermsg[LPFC_MAX_ADPTMSG];
1206                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1207                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1208                                        MAX_MSG_DATA);
1209                                 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1210                                          phba->brd_no, adaptermsg);
1211                         } else {
1212                                 /* Unknown IOCB command */
1213                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1214                                                 "%d:0321 Unknown IOCB command "
1215                                                 "Data: x%x, x%x x%x x%x x%x\n",
1216                                                 phba->brd_no, type,
1217                                                 irsp->ulpCommand,
1218                                                 irsp->ulpStatus,
1219                                                 irsp->ulpIoTag,
1220                                                 irsp->ulpContext);
1221                         }
1222                         break;
1223                 }
1224
1225                 /*
1226                  * The response IOCB has been processed.  Update the ring
1227                  * pointer in SLIM.  If the port response put pointer has not
1228                  * been updated, sync the pgp->rspPutInx and fetch the new port
1229                  * response put pointer.
1230                  */
1231                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1232
1233                 if (pring->rspidx == portRspPut)
1234                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1235         }
1236
1237         ha_copy = readl(phba->HAregaddr);
1238         ha_copy >>= (LPFC_FCP_RING * 4);
1239
1240         if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1241                 spin_lock_irqsave(&phba->hbalock, iflags);
1242                 pring->stats.iocb_rsp_full++;
1243                 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1244                 writel(status, phba->CAregaddr);
1245                 readl(phba->CAregaddr);
1246                 spin_unlock_irqrestore(&phba->hbalock, iflags);
1247         }
1248         if ((ha_copy & HA_R0CE_RSP) &&
1249             (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1250                 spin_lock_irqsave(&phba->hbalock, iflags);
1251                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1252                 pring->stats.iocb_cmd_empty++;
1253
1254                 /* Force update of the local copy of cmdGetInx */
1255                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1256                 lpfc_sli_resume_iocb(phba, pring);
1257
1258                 if ((pring->lpfc_sli_cmd_available))
1259                         (pring->lpfc_sli_cmd_available) (phba, pring);
1260
1261                 spin_unlock_irqrestore(&phba->hbalock, iflags);
1262         }
1263
1264         return;
1265 }
1266
1267 /*
1268  * This routine presumes LPFC_FCP_RING handling and doesn't bother
1269  * to check it explicitly.
1270  */
1271 static int
1272 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1273                                 struct lpfc_sli_ring *pring, uint32_t mask)
1274 {
1275         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1276                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1277                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1278         IOCB_t *irsp = NULL;
1279         IOCB_t *entry = NULL;
1280         struct lpfc_iocbq *cmdiocbq = NULL;
1281         struct lpfc_iocbq rspiocbq;
1282         uint32_t status;
1283         uint32_t portRspPut, portRspMax;
1284         int rc = 1;
1285         lpfc_iocb_type type;
1286         unsigned long iflag;
1287         uint32_t rsp_cmpl = 0;
1288
1289         spin_lock_irqsave(&phba->hbalock, iflag);
1290         pring->stats.iocb_event++;
1291
1292         /*
1293          * The next available response entry should never exceed the maximum
1294          * entries.  If it does, treat it as an adapter hardware error.
1295          */
1296         portRspMax = pring->numRiocb;
1297         portRspPut = le32_to_cpu(pgp->rspPutInx);
1298         if (unlikely(portRspPut >= portRspMax)) {
1299                 lpfc_sli_rsp_pointers_error(phba, pring);
1300                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1301                 return 1;
1302         }
1303
1304         rmb();
1305         while (pring->rspidx != portRspPut) {
1306                 /*
1307                  * Fetch an entry off the ring and copy it into a local data
1308                  * structure.  The copy involves a byte-swap since the
1309                  * network byte order and pci byte orders are different.
1310                  */
1311                 entry = lpfc_resp_iocb(phba, pring);
1312
1313                 if (++pring->rspidx >= portRspMax)
1314                         pring->rspidx = 0;
1315
1316                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1317                                       (uint32_t *) &rspiocbq.iocb,
1318                                       phba->iocb_rsp_size);
1319                 INIT_LIST_HEAD(&(rspiocbq.list));
1320                 irsp = &rspiocbq.iocb;
1321
1322                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1323                 pring->stats.iocb_rsp++;
1324                 rsp_cmpl++;
1325
1326                 if (unlikely(irsp->ulpStatus)) {
1327                         /*
1328                          * If resource errors reported from HBA, reduce
1329                          * queuedepths of the SCSI device.
1330                          */
1331                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1332                                 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1333                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1334                                 lpfc_adjust_queue_depth(phba);
1335                                 spin_lock_irqsave(&phba->hbalock, iflag);
1336                         }
1337
1338                         /* Rsp ring <ringno> error: IOCB */
1339                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1340                                         "%d:0336 Rsp Ring %d error: IOCB Data: "
1341                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1342                                         phba->brd_no, pring->ringno,
1343                                         irsp->un.ulpWord[0],
1344                                         irsp->un.ulpWord[1],
1345                                         irsp->un.ulpWord[2],
1346                                         irsp->un.ulpWord[3],
1347                                         irsp->un.ulpWord[4],
1348                                         irsp->un.ulpWord[5],
1349                                         *(((uint32_t *) irsp) + 6),
1350                                         *(((uint32_t *) irsp) + 7));
1351                 }
1352
1353                 switch (type) {
1354                 case LPFC_ABORT_IOCB:
1355                 case LPFC_SOL_IOCB:
1356                         /*
1357                          * Idle exchange closed via ABTS from port.  No iocb
1358                          * resources need to be recovered.
1359                          */
1360                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1361                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1362                                                 "%d:0333 IOCB cmd 0x%x"
1363                                                 " processed. Skipping"
1364                                                 " completion\n",
1365                                                 phba->brd_no,
1366                                                 irsp->ulpCommand);
1367                                 break;
1368                         }
1369
1370                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1371                                                          &rspiocbq);
1372                         if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1373                                 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1374                                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1375                                                               &rspiocbq);
1376                                 } else {
1377                                         spin_unlock_irqrestore(&phba->hbalock,
1378                                                                iflag);
1379                                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1380                                                               &rspiocbq);
1381                                         spin_lock_irqsave(&phba->hbalock,
1382                                                           iflag);
1383                                 }
1384                         }
1385                         break;
1386                 case LPFC_UNSOL_IOCB:
1387                         spin_unlock_irqrestore(&phba->hbalock, iflag);
1388                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1389                         spin_lock_irqsave(&phba->hbalock, iflag);
1390                         break;
1391                 default:
1392                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1393                                 char adaptermsg[LPFC_MAX_ADPTMSG];
1394                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1395                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1396                                        MAX_MSG_DATA);
1397                                 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1398                                          phba->brd_no, adaptermsg);
1399                         } else {
1400                                 /* Unknown IOCB command */
1401                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1402                                                 "%d:0334 Unknown IOCB command "
1403                                                 "Data: x%x, x%x x%x x%x x%x\n",
1404                                                 phba->brd_no, type,
1405                                                 irsp->ulpCommand,
1406                                                 irsp->ulpStatus,
1407                                                 irsp->ulpIoTag,
1408                                                 irsp->ulpContext);
1409                         }
1410                         break;
1411                 }
1412
1413                 /*
1414                  * The response IOCB has been processed.  Update the ring
1415                  * pointer in SLIM.  If the port response put pointer has not
1416                  * been updated, sync the pgp->rspPutInx and fetch the new port
1417                  * response put pointer.
1418                  */
1419                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1420
1421                 if (pring->rspidx == portRspPut)
1422                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1423         }
1424
1425         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1426                 pring->stats.iocb_rsp_full++;
1427                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1428                 writel(status, phba->CAregaddr);
1429                 readl(phba->CAregaddr);
1430         }
1431         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1432                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1433                 pring->stats.iocb_cmd_empty++;
1434
1435                 /* Force update of the local copy of cmdGetInx */
1436                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1437                 lpfc_sli_resume_iocb(phba, pring);
1438
1439                 if ((pring->lpfc_sli_cmd_available))
1440                         (pring->lpfc_sli_cmd_available) (phba, pring);
1441
1442         }
1443
1444         spin_unlock_irqrestore(&phba->hbalock, iflag);
1445         return rc;
1446 }
1447
1448 int
1449 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1450                                 struct lpfc_sli_ring *pring, uint32_t mask)
1451 {
1452         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1453                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1454                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1455         IOCB_t *entry;
1456         IOCB_t *irsp = NULL;
1457         struct lpfc_iocbq *rspiocbp = NULL;
1458         struct lpfc_iocbq *next_iocb;
1459         struct lpfc_iocbq *cmdiocbp;
1460         struct lpfc_iocbq *saveq;
1461         uint8_t iocb_cmd_type;
1462         lpfc_iocb_type type;
1463         uint32_t status, free_saveq;
1464         uint32_t portRspPut, portRspMax;
1465         int rc = 1;
1466         unsigned long iflag;
1467
1468         spin_lock_irqsave(&phba->hbalock, iflag);
1469         pring->stats.iocb_event++;
1470
1471         /*
1472          * The next available response entry should never exceed the maximum
1473          * entries.  If it does, treat it as an adapter hardware error.
1474          */
1475         portRspMax = pring->numRiocb;
1476         portRspPut = le32_to_cpu(pgp->rspPutInx);
1477         if (portRspPut >= portRspMax) {
1478                 /*
1479                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1480                  * rsp ring <portRspMax>
1481                  */
1482                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1483                                 "%d:0303 Ring %d handler: portRspPut %d "
1484                                 "is bigger then rsp ring %d\n",
1485                                 phba->brd_no, pring->ringno, portRspPut,
1486                                 portRspMax);
1487
1488                 phba->link_state = LPFC_HBA_ERROR;
1489                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1490
1491                 phba->work_hs = HS_FFER3;
1492                 lpfc_handle_eratt(phba);
1493
1494                 return 1;
1495         }
1496
1497         rmb();
1498         while (pring->rspidx != portRspPut) {
1499                 /*
1500                  * Build a completion list and call the appropriate handler.
1501                  * The process is to get the next available response iocb, get
1502                  * a free iocb from the list, copy the response data into the
1503                  * free iocb, insert to the continuation list, and update the
1504                  * next response index to slim.  This process makes response
1505                  * iocb's in the ring available to DMA as fast as possible but
1506                  * pays a penalty for a copy operation.  Since the iocb is
1507                  * only 32 bytes, this penalty is considered small relative to
1508                  * the PCI reads for register values and a slim write.  When
1509                  * the ulpLe field is set, the entire Command has been
1510                  * received.
1511                  */
1512                 entry = lpfc_resp_iocb(phba, pring);
1513
1514                 rspiocbp = __lpfc_sli_get_iocbq(phba);
1515                 if (rspiocbp == NULL) {
1516                         printk(KERN_ERR "%s: out of buffers! Failing "
1517                                "completion.\n", __FUNCTION__);
1518                         break;
1519                 }
1520
1521                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
1522                                       phba->iocb_rsp_size);
1523                 irsp = &rspiocbp->iocb;
1524
1525                 if (++pring->rspidx >= portRspMax)
1526                         pring->rspidx = 0;
1527
1528                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1529
1530                 if (list_empty(&(pring->iocb_continueq))) {
1531                         list_add(&rspiocbp->list, &(pring->iocb_continueq));
1532                 } else {
1533                         list_add_tail(&rspiocbp->list,
1534                                       &(pring->iocb_continueq));
1535                 }
1536
1537                 pring->iocb_continueq_cnt++;
1538                 if (irsp->ulpLe) {
1539                         /*
1540                          * By default, the driver expects to free all resources
1541                          * associated with this iocb completion.
1542                          */
1543                         free_saveq = 1;
1544                         saveq = list_get_first(&pring->iocb_continueq,
1545                                                struct lpfc_iocbq, list);
1546                         irsp = &(saveq->iocb);
1547                         list_del_init(&pring->iocb_continueq);
1548                         pring->iocb_continueq_cnt = 0;
1549
1550                         pring->stats.iocb_rsp++;
1551
1552                         /*
1553                          * If resource errors reported from HBA, reduce
1554                          * queuedepths of the SCSI device.
1555                          */
1556                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1557                              (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1558                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1559                                 lpfc_adjust_queue_depth(phba);
1560                                 spin_lock_irqsave(&phba->hbalock, iflag);
1561                         }
1562
1563                         if (irsp->ulpStatus) {
1564                                 /* Rsp ring <ringno> error: IOCB */
1565                                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1566                                                 "%d:0328 Rsp Ring %d error: "
1567                                                 "IOCB Data: "
1568                                                 "x%x x%x x%x x%x "
1569                                                 "x%x x%x x%x x%x "
1570                                                 "x%x x%x x%x x%x "
1571                                                 "x%x x%x x%x x%x\n",
1572                                                 phba->brd_no,
1573                                                 pring->ringno,
1574                                                 irsp->un.ulpWord[0],
1575                                                 irsp->un.ulpWord[1],
1576                                                 irsp->un.ulpWord[2],
1577                                                 irsp->un.ulpWord[3],
1578                                                 irsp->un.ulpWord[4],
1579                                                 irsp->un.ulpWord[5],
1580                                                 *(((uint32_t *) irsp) + 6),
1581                                                 *(((uint32_t *) irsp) + 7),
1582                                                 *(((uint32_t *) irsp) + 8),
1583                                                 *(((uint32_t *) irsp) + 9),
1584                                                 *(((uint32_t *) irsp) + 10),
1585                                                 *(((uint32_t *) irsp) + 11),
1586                                                 *(((uint32_t *) irsp) + 12),
1587                                                 *(((uint32_t *) irsp) + 13),
1588                                                 *(((uint32_t *) irsp) + 14),
1589                                                 *(((uint32_t *) irsp) + 15));
1590                         }
1591
1592                         /*
1593                          * Fetch the IOCB command type and call the correct
1594                          * completion routine.  Solicited and Unsolicited
1595                          * IOCBs on the ELS ring get freed back to the
1596                          * lpfc_iocb_list by the discovery kernel thread.
1597                          */
1598                         iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1599                         type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1600                         if (type == LPFC_SOL_IOCB) {
1601                                 spin_unlock_irqrestore(&phba->hbalock,
1602                                                        iflag);
1603                                 rc = lpfc_sli_process_sol_iocb(phba, pring,
1604                                                                saveq);
1605                                 spin_lock_irqsave(&phba->hbalock, iflag);
1606                         } else if (type == LPFC_UNSOL_IOCB) {
1607                                 spin_unlock_irqrestore(&phba->hbalock,
1608                                                        iflag);
1609                                 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1610                                                                  saveq);
1611                                 spin_lock_irqsave(&phba->hbalock, iflag);
1612                         } else if (type == LPFC_ABORT_IOCB) {
1613                                 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1614                                     ((cmdiocbp =
1615                                       lpfc_sli_iocbq_lookup(phba, pring,
1616                                                             saveq)))) {
1617                                         /* Call the specified completion
1618                                            routine */
1619                                         if (cmdiocbp->iocb_cmpl) {
1620                                                 spin_unlock_irqrestore(
1621                                                        &phba->hbalock,
1622                                                        iflag);
1623                                                 (cmdiocbp->iocb_cmpl) (phba,
1624                                                              cmdiocbp, saveq);
1625                                                 spin_lock_irqsave(
1626                                                           &phba->hbalock,
1627                                                           iflag);
1628                                         } else
1629                                                 __lpfc_sli_release_iocbq(phba,
1630                                                                       cmdiocbp);
1631                                 }
1632                         } else if (type == LPFC_UNKNOWN_IOCB) {
1633                                 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1634
1635                                         char adaptermsg[LPFC_MAX_ADPTMSG];
1636
1637                                         memset(adaptermsg, 0,
1638                                                LPFC_MAX_ADPTMSG);
1639                                         memcpy(&adaptermsg[0], (uint8_t *) irsp,
1640                                                MAX_MSG_DATA);
1641                                         dev_warn(&((phba->pcidev)->dev),
1642                                                  "lpfc%d: %s",
1643                                                  phba->brd_no, adaptermsg);
1644                                 } else {
1645                                         /* Unknown IOCB command */
1646                                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1647                                                         "%d:0335 Unknown IOCB "
1648                                                         "command Data: x%x "
1649                                                         "x%x x%x x%x\n",
1650                                                         phba->brd_no,
1651                                                         irsp->ulpCommand,
1652                                                         irsp->ulpStatus,
1653                                                         irsp->ulpIoTag,
1654                                                         irsp->ulpContext);
1655                                 }
1656                         }
1657
1658                         if (free_saveq) {
1659                                 list_for_each_entry_safe(rspiocbp, next_iocb,
1660                                                          &saveq->list, list) {
1661                                         list_del(&rspiocbp->list);
1662                                         __lpfc_sli_release_iocbq(phba,
1663                                                                  rspiocbp);
1664                                 }
1665                                 __lpfc_sli_release_iocbq(phba, saveq);
1666                         }
1667                         rspiocbp = NULL;
1668                 }
1669
1670                 /*
1671                  * If the port response put pointer has not been updated, sync
1672                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1673                  * response put pointer.
1674                  */
1675                 if (pring->rspidx == portRspPut) {
1676                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1677                 }
1678         } /* while (pring->rspidx != portRspPut) */
1679
1680         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
1681                 /* At least one response entry has been freed */
1682                 pring->stats.iocb_rsp_full++;
1683                 /* SET RxRE_RSP in Chip Att register */
1684                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1685                 writel(status, phba->CAregaddr);
1686                 readl(phba->CAregaddr); /* flush */
1687         }
1688         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1689                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1690                 pring->stats.iocb_cmd_empty++;
1691
1692                 /* Force update of the local copy of cmdGetInx */
1693                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1694                 lpfc_sli_resume_iocb(phba, pring);
1695
1696                 if ((pring->lpfc_sli_cmd_available))
1697                         (pring->lpfc_sli_cmd_available) (phba, pring);
1698
1699         }
1700
1701         spin_unlock_irqrestore(&phba->hbalock, iflag);
1702         return rc;
1703 }
1704
1705 void
1706 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1707 {
1708         LIST_HEAD(completions);
1709         struct lpfc_iocbq *iocb, *next_iocb;
1710         IOCB_t *cmd = NULL;
1711
1712         if (pring->ringno == LPFC_ELS_RING) {
1713                 lpfc_fabric_abort_hba(phba);
1714         }
1715
1716         /* Error everything on txq and txcmplq
1717          * First do the txq.
1718          */
1719         spin_lock_irq(&phba->hbalock);
1720         list_splice_init(&pring->txq, &completions);
1721         pring->txq_cnt = 0;
1722
1723         /* Next issue ABTS for everything on the txcmplq */
1724         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1725                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1726
1727         spin_unlock_irq(&phba->hbalock);
1728
1729         while (!list_empty(&completions)) {
1730                 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1731                 cmd = &iocb->iocb;
1732                 list_del_init(&iocb->list);
1733
1734                 if (!iocb->iocb_cmpl)
1735                         lpfc_sli_release_iocbq(phba, iocb);
1736                 else {
1737                         cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1738                         cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1739                         (iocb->iocb_cmpl) (phba, iocb, iocb);
1740                 }
1741         }
1742 }
1743
1744 int
1745 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1746 {
1747         uint32_t status;
1748         int i = 0;
1749         int retval = 0;
1750
1751         /* Read the HBA Host Status Register */
1752         status = readl(phba->HSregaddr);
1753
1754         /*
1755          * Check status register every 100ms for 5 retries, then every
1756          * 500ms for 5, then every 2.5 sec for 5, then reset board and
1757          * every 2.5 sec for 4.
1758          * Break our of the loop if errors occurred during init.
1759          */
1760         while (((status & mask) != mask) &&
1761                !(status & HS_FFERM) &&
1762                i++ < 20) {
1763
1764                 if (i <= 5)
1765                         msleep(10);
1766                 else if (i <= 10)
1767                         msleep(500);
1768                 else
1769                         msleep(2500);
1770
1771                 if (i == 15) {
1772                                 /* Do post */
1773                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1774                         lpfc_sli_brdrestart(phba);
1775                 }
1776                 /* Read the HBA Host Status Register */
1777                 status = readl(phba->HSregaddr);
1778         }
1779
1780         /* Check to see if any errors occurred during init */
1781         if ((status & HS_FFERM) || (i >= 20)) {
1782                 phba->link_state = LPFC_HBA_ERROR;
1783                 retval = 1;
1784         }
1785
1786         return retval;
1787 }
1788
1789 #define BARRIER_TEST_PATTERN (0xdeadbeef)
1790
1791 void lpfc_reset_barrier(struct lpfc_hba *phba)
1792 {
1793         uint32_t __iomem *resp_buf;
1794         uint32_t __iomem *mbox_buf;
1795         volatile uint32_t mbox;
1796         uint32_t hc_copy;
1797         int  i;
1798         uint8_t hdrtype;
1799
1800         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1801         if (hdrtype != 0x80 ||
1802             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1803              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1804                 return;
1805
1806         /*
1807          * Tell the other part of the chip to suspend temporarily all
1808          * its DMA activity.
1809          */
1810         resp_buf = phba->MBslimaddr;
1811
1812         /* Disable the error attention */
1813         hc_copy = readl(phba->HCregaddr);
1814         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1815         readl(phba->HCregaddr); /* flush */
1816         phba->link_flag |= LS_IGNORE_ERATT;
1817
1818         if (readl(phba->HAregaddr) & HA_ERATT) {
1819                 /* Clear Chip error bit */
1820                 writel(HA_ERATT, phba->HAregaddr);
1821                 phba->pport->stopped = 1;
1822         }
1823
1824         mbox = 0;
1825         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1826         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1827
1828         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1829         mbox_buf = phba->MBslimaddr;
1830         writel(mbox, mbox_buf);
1831
1832         for (i = 0;
1833              readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1834                 mdelay(1);
1835
1836         if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1837                 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1838                     phba->pport->stopped)
1839                         goto restore_hc;
1840                 else
1841                         goto clear_errat;
1842         }
1843
1844         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1845         for (i = 0; readl(resp_buf) != mbox &&  i < 500; i++)
1846                 mdelay(1);
1847
1848 clear_errat:
1849
1850         while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1851                 mdelay(1);
1852
1853         if (readl(phba->HAregaddr) & HA_ERATT) {
1854                 writel(HA_ERATT, phba->HAregaddr);
1855                 phba->pport->stopped = 1;
1856         }
1857
1858 restore_hc:
1859         phba->link_flag &= ~LS_IGNORE_ERATT;
1860         writel(hc_copy, phba->HCregaddr);
1861         readl(phba->HCregaddr); /* flush */
1862 }
1863
1864 int
1865 lpfc_sli_brdkill(struct lpfc_hba *phba)
1866 {
1867         struct lpfc_sli *psli;
1868         LPFC_MBOXQ_t *pmb;
1869         uint32_t status;
1870         uint32_t ha_copy;
1871         int retval;
1872         int i = 0;
1873
1874         psli = &phba->sli;
1875
1876         /* Kill HBA */
1877         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1878                         "%d:0329 Kill HBA Data: x%x x%x\n",
1879                         phba->brd_no, phba->pport->port_state, psli->sli_flag);
1880
1881         if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1882                                                   GFP_KERNEL)) == 0)
1883                 return 1;
1884
1885         /* Disable the error attention */
1886         spin_lock_irq(&phba->hbalock);
1887         status = readl(phba->HCregaddr);
1888         status &= ~HC_ERINT_ENA;
1889         writel(status, phba->HCregaddr);
1890         readl(phba->HCregaddr); /* flush */
1891         phba->link_flag |= LS_IGNORE_ERATT;
1892         spin_unlock_irq(&phba->hbalock);
1893
1894         lpfc_kill_board(phba, pmb);
1895         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1896         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1897
1898         if (retval != MBX_SUCCESS) {
1899                 if (retval != MBX_BUSY)
1900                         mempool_free(pmb, phba->mbox_mem_pool);
1901                 spin_lock_irq(&phba->hbalock);
1902                 phba->link_flag &= ~LS_IGNORE_ERATT;
1903                 spin_unlock_irq(&phba->hbalock);
1904                 return 1;
1905         }
1906
1907         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1908
1909         mempool_free(pmb, phba->mbox_mem_pool);
1910
1911         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
1912          * attention every 100ms for 3 seconds. If we don't get ERATT after
1913          * 3 seconds we still set HBA_ERROR state because the status of the
1914          * board is now undefined.
1915          */
1916         ha_copy = readl(phba->HAregaddr);
1917
1918         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
1919                 mdelay(100);
1920                 ha_copy = readl(phba->HAregaddr);
1921         }
1922
1923         del_timer_sync(&psli->mbox_tmo);
1924         if (ha_copy & HA_ERATT) {
1925                 writel(HA_ERATT, phba->HAregaddr);
1926                 phba->pport->stopped = 1;
1927         }
1928         spin_lock_irq(&phba->hbalock);
1929         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1930         phba->link_flag &= ~LS_IGNORE_ERATT;
1931         spin_unlock_irq(&phba->hbalock);
1932
1933         psli->mbox_active = NULL;
1934         lpfc_hba_down_post(phba);
1935         phba->link_state = LPFC_HBA_ERROR;
1936
1937         return ha_copy & HA_ERATT ? 0 : 1;
1938 }
1939
1940 int
1941 lpfc_sli_brdreset(struct lpfc_hba *phba)
1942 {
1943         struct lpfc_sli *psli;
1944         struct lpfc_sli_ring *pring;
1945         uint16_t cfg_value;
1946         int i;
1947
1948         psli = &phba->sli;
1949
1950         /* Reset HBA */
1951         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1952                         "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
1953                         phba->pport->port_state, psli->sli_flag);
1954
1955         /* perform board reset */
1956         phba->fc_eventTag = 0;
1957         phba->pport->fc_myDID = 0;
1958         phba->pport->fc_prevDID = 0;
1959
1960         /* Turn off parity checking and serr during the physical reset */
1961         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1962         pci_write_config_word(phba->pcidev, PCI_COMMAND,
1963                               (cfg_value &
1964                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1965
1966         psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
1967         /* Now toggle INITFF bit in the Host Control Register */
1968         writel(HC_INITFF, phba->HCregaddr);
1969         mdelay(1);
1970         readl(phba->HCregaddr); /* flush */
1971         writel(0, phba->HCregaddr);
1972         readl(phba->HCregaddr); /* flush */
1973
1974         /* Restore PCI cmd register */
1975         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
1976
1977         /* Initialize relevant SLI info */
1978         for (i = 0; i < psli->num_rings; i++) {
1979                 pring = &psli->ring[i];
1980                 pring->flag = 0;
1981                 pring->rspidx = 0;
1982                 pring->next_cmdidx  = 0;
1983                 pring->local_getidx = 0;
1984                 pring->cmdidx = 0;
1985                 pring->missbufcnt = 0;
1986         }
1987
1988         phba->link_state = LPFC_WARM_START;
1989         return 0;
1990 }
1991
1992 int
1993 lpfc_sli_brdrestart(struct lpfc_hba *phba)
1994 {
1995         MAILBOX_t *mb;
1996         struct lpfc_sli *psli;
1997         uint16_t skip_post;
1998         volatile uint32_t word0;
1999         void __iomem *to_slim;
2000
2001         spin_lock_irq(&phba->hbalock);
2002
2003         psli = &phba->sli;
2004
2005         /* Restart HBA */
2006         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2007                         "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
2008                         phba->pport->port_state, psli->sli_flag);
2009
2010         word0 = 0;
2011         mb = (MAILBOX_t *) &word0;
2012         mb->mbxCommand = MBX_RESTART;
2013         mb->mbxHc = 1;
2014
2015         lpfc_reset_barrier(phba);
2016
2017         to_slim = phba->MBslimaddr;
2018         writel(*(uint32_t *) mb, to_slim);
2019         readl(to_slim); /* flush */
2020
2021         /* Only skip post after fc_ffinit is completed */
2022         if (phba->pport->port_state) {
2023                 skip_post = 1;
2024                 word0 = 1;      /* This is really setting up word1 */
2025         } else {
2026                 skip_post = 0;
2027                 word0 = 0;      /* This is really setting up word1 */
2028         }
2029         to_slim = phba->MBslimaddr + sizeof (uint32_t);
2030         writel(*(uint32_t *) mb, to_slim);
2031         readl(to_slim); /* flush */
2032
2033         lpfc_sli_brdreset(phba);
2034         phba->pport->stopped = 0;
2035         phba->link_state = LPFC_INIT_START;
2036
2037         spin_unlock_irq(&phba->hbalock);
2038
2039         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
2040         psli->stats_start = get_seconds();
2041
2042         if (skip_post)
2043                 mdelay(100);
2044         else
2045                 mdelay(2000);
2046
2047         lpfc_hba_down_post(phba);
2048
2049         return 0;
2050 }
2051
2052 static int
2053 lpfc_sli_chipset_init(struct lpfc_hba *phba)
2054 {
2055         uint32_t status, i = 0;
2056
2057         /* Read the HBA Host Status Register */
2058         status = readl(phba->HSregaddr);
2059
2060         /* Check status register to see what current state is */
2061         i = 0;
2062         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
2063
2064                 /* Check every 100ms for 5 retries, then every 500ms for 5, then
2065                  * every 2.5 sec for 5, then reset board and every 2.5 sec for
2066                  * 4.
2067                  */
2068                 if (i++ >= 20) {
2069                         /* Adapter failed to init, timeout, status reg
2070                            <status> */
2071                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2072                                         "%d:0436 Adapter failed to init, "
2073                                         "timeout, status reg x%x\n",
2074                                         phba->brd_no, status);
2075                         phba->link_state = LPFC_HBA_ERROR;
2076                         return -ETIMEDOUT;
2077                 }
2078
2079                 /* Check to see if any errors occurred during init */
2080                 if (status & HS_FFERM) {
2081                         /* ERROR: During chipset initialization */
2082                         /* Adapter failed to init, chipset, status reg
2083                            <status> */
2084                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2085                                         "%d:0437 Adapter failed to init, "
2086                                         "chipset, status reg x%x\n",
2087                                         phba->brd_no,
2088                                         status);
2089                         phba->link_state = LPFC_HBA_ERROR;
2090                         return -EIO;
2091                 }
2092
2093                 if (i <= 5) {
2094                         msleep(10);
2095                 } else if (i <= 10) {
2096                         msleep(500);
2097                 } else {
2098                         msleep(2500);
2099                 }
2100
2101                 if (i == 15) {
2102                                 /* Do post */
2103                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2104                         lpfc_sli_brdrestart(phba);
2105                 }
2106                 /* Read the HBA Host Status Register */
2107                 status = readl(phba->HSregaddr);
2108         }
2109
2110         /* Check to see if any errors occurred during init */
2111         if (status & HS_FFERM) {
2112                 /* ERROR: During chipset initialization */
2113                 /* Adapter failed to init, chipset, status reg <status> */
2114                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2115                                 "%d:0438 Adapter failed to init, chipset, "
2116                                 "status reg x%x\n",
2117                                 phba->brd_no,
2118                                 status);
2119                 phba->link_state = LPFC_HBA_ERROR;
2120                 return -EIO;
2121         }
2122
2123         /* Clear all interrupt enable conditions */
2124         writel(0, phba->HCregaddr);
2125         readl(phba->HCregaddr); /* flush */
2126
2127         /* setup host attn register */
2128         writel(0xffffffff, phba->HAregaddr);
2129         readl(phba->HAregaddr); /* flush */
2130         return 0;
2131 }
2132
2133 static int
2134 lpfc_sli_hbq_count(void)
2135 {
2136         return ARRAY_SIZE(lpfc_hbq_defs);
2137 }
2138
2139 static int
2140 lpfc_sli_hbq_entry_count(void)
2141 {
2142         int  hbq_count = lpfc_sli_hbq_count();
2143         int  count = 0;
2144         int  i;
2145
2146         for (i = 0; i < hbq_count; ++i)
2147                 count += lpfc_hbq_defs[i]->entry_count;
2148         return count;
2149 }
2150
2151 int
2152 lpfc_sli_hbq_size(void)
2153 {
2154         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
2155 }
2156
2157 static int
2158 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2159 {
2160         int  hbq_count = lpfc_sli_hbq_count();
2161         LPFC_MBOXQ_t *pmb;
2162         MAILBOX_t *pmbox;
2163         uint32_t hbqno;
2164         uint32_t hbq_entry_index;
2165
2166                                 /* Get a Mailbox buffer to setup mailbox
2167                                  * commands for HBA initialization
2168                                  */
2169         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2170
2171         if (!pmb)
2172                 return -ENOMEM;
2173
2174         pmbox = &pmb->mb;
2175
2176         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2177         phba->link_state = LPFC_INIT_MBX_CMDS;
2178
2179         hbq_entry_index = 0;
2180         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2181                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
2182                 phba->hbqs[hbqno].hbqPutIdx      = 0;
2183                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
2184                 phba->hbqs[hbqno].entry_count =
2185                         lpfc_hbq_defs[hbqno]->entry_count;
2186                 lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index,
2187                                 pmb);
2188                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
2189
2190                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
2191                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
2192                            mbxStatus <status>, ring <num> */
2193
2194                         lpfc_printf_log(phba, KERN_ERR,
2195                                         LOG_SLI | LOG_VPORT,
2196                                         "%d:1805 Adapter failed to init. "
2197                                         "Data: x%x x%x x%x\n",
2198                                         phba->brd_no, pmbox->mbxCommand,
2199                                         pmbox->mbxStatus, hbqno);
2200
2201                         phba->link_state = LPFC_HBA_ERROR;
2202                         mempool_free(pmb, phba->mbox_mem_pool);
2203                         return ENXIO;
2204                 }
2205         }
2206         phba->hbq_count = hbq_count;
2207
2208         mempool_free(pmb, phba->mbox_mem_pool);
2209
2210         /* Initially populate or replenish the HBQs */
2211         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2212                 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2213                         return -ENOMEM;
2214         }
2215         return 0;
2216 }
2217
2218 static int
2219 lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2220 {
2221         LPFC_MBOXQ_t *pmb;
2222         uint32_t resetcount = 0, rc = 0, done = 0;
2223
2224         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2225         if (!pmb) {
2226                 phba->link_state = LPFC_HBA_ERROR;
2227                 return -ENOMEM;
2228         }
2229
2230         phba->sli_rev = sli_mode;
2231         while (resetcount < 2 && !done) {
2232                 spin_lock_irq(&phba->hbalock);
2233                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2234                 spin_unlock_irq(&phba->hbalock);
2235                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2236                 lpfc_sli_brdrestart(phba);
2237                 msleep(2500);
2238                 rc = lpfc_sli_chipset_init(phba);
2239                 if (rc)
2240                         break;
2241
2242                 spin_lock_irq(&phba->hbalock);
2243                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2244                 spin_unlock_irq(&phba->hbalock);
2245                 resetcount++;
2246
2247                 /* Call pre CONFIG_PORT mailbox command initialization.  A
2248                  * value of 0 means the call was successful.  Any other
2249                  * nonzero value is a failure, but if ERESTART is returned,
2250                  * the driver may reset the HBA and try again.
2251                  */
2252                 rc = lpfc_config_port_prep(phba);
2253                 if (rc == -ERESTART) {
2254                         phba->link_state = LPFC_LINK_UNKNOWN;
2255                         continue;
2256                 } else if (rc) {
2257                         break;
2258                 }
2259
2260                 phba->link_state = LPFC_INIT_MBX_CMDS;
2261                 lpfc_config_port(phba, pmb);
2262                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2263                 if (rc != MBX_SUCCESS) {
2264                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2265                                 "%d:0442 Adapter failed to init, mbxCmd x%x "
2266                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
2267                                 phba->brd_no, pmb->mb.mbxCommand,
2268                                 pmb->mb.mbxStatus, 0);
2269                         spin_lock_irq(&phba->hbalock);
2270                         phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2271                         spin_unlock_irq(&phba->hbalock);
2272                         rc = -ENXIO;
2273                 } else {
2274                         done = 1;
2275                         phba->max_vpi = (phba->max_vpi &&
2276                                          pmb->mb.un.varCfgPort.gmv) != 0
2277                                 ? pmb->mb.un.varCfgPort.max_vpi
2278                                 : 0;
2279                 }
2280         }
2281
2282         if (!done) {
2283                 rc = -EINVAL;
2284                 goto do_prep_failed;
2285         }
2286
2287         if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2288                 (!pmb->mb.un.varCfgPort.cMA)) {
2289                 rc = -ENXIO;
2290                 goto do_prep_failed;
2291         }
2292         return rc;
2293
2294 do_prep_failed:
2295         mempool_free(pmb, phba->mbox_mem_pool);
2296         return rc;
2297 }
2298
2299 int
2300 lpfc_sli_hba_setup(struct lpfc_hba *phba)
2301 {
2302         uint32_t rc;
2303         int  mode = 3;
2304
2305         switch (lpfc_sli_mode) {
2306         case 2:
2307                 if (lpfc_npiv_enable) {
2308                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2309                                 "%d:1824 NPIV enabled: Override lpfc_sli_mode "
2310                                 "parameter (%d) to auto (0).\n",
2311                                 phba->brd_no, lpfc_sli_mode);
2312                         break;
2313                 }
2314                 mode = 2;
2315                 break;
2316         case 0:
2317         case 3:
2318                 break;
2319         default:
2320                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2321                                 "%d:1819 Unrecognized lpfc_sli_mode "
2322                                 "parameter: %d.\n",
2323                                 phba->brd_no, lpfc_sli_mode);
2324
2325                 break;
2326         }
2327
2328         rc = lpfc_do_config_port(phba, mode);
2329         if (rc && lpfc_sli_mode == 3)
2330                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2331                                 "%d:1820 Unable to select SLI-3.  "
2332                                 "Not supported by adapter.\n",
2333                                 phba->brd_no);
2334         if (rc && mode != 2)
2335                 rc = lpfc_do_config_port(phba, 2);
2336         if (rc)
2337                 goto lpfc_sli_hba_setup_error;
2338
2339         if (phba->sli_rev == 3) {
2340                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
2341                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
2342                 phba->sli3_options |= LPFC_SLI3_ENABLED;
2343                 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
2344
2345         } else {
2346                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2347                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
2348                 phba->sli3_options = 0;
2349         }
2350
2351         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2352                         "%d:0444 Firmware in SLI %x mode. Max_vpi %d\n",
2353                         phba->brd_no, phba->sli_rev, phba->max_vpi);
2354         rc = lpfc_sli_ring_map(phba);
2355
2356         if (rc)
2357                 goto lpfc_sli_hba_setup_error;
2358
2359                                 /* Init HBQs */
2360
2361         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2362                 rc = lpfc_sli_hbq_setup(phba);
2363                 if (rc)
2364                         goto lpfc_sli_hba_setup_error;
2365         }
2366
2367         phba->sli.sli_flag |= LPFC_PROCESS_LA;
2368
2369         rc = lpfc_config_port_post(phba);
2370         if (rc)
2371                 goto lpfc_sli_hba_setup_error;
2372
2373         return rc;
2374
2375 lpfc_sli_hba_setup_error:
2376         phba->link_state = LPFC_HBA_ERROR;
2377         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2378                         "%d:0445 Firmware initialization failed\n",
2379                         phba->brd_no);
2380         return rc;
2381 }
2382
2383 /*! lpfc_mbox_timeout
2384  *
2385  * \pre
2386  * \post
2387  * \param hba Pointer to per struct lpfc_hba structure
2388  * \param l1  Pointer to the driver's mailbox queue.
2389  * \return
2390  *   void
2391  *
2392  * \b Description:
2393  *
2394  * This routine handles mailbox timeout events at timer interrupt context.
2395  */
2396 void
2397 lpfc_mbox_timeout(unsigned long ptr)
2398 {
2399         struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
2400         unsigned long iflag;
2401         uint32_t tmo_posted;
2402
2403         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2404         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2405         if (!tmo_posted)
2406                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2407         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2408
2409         if (!tmo_posted) {
2410                 spin_lock_irqsave(&phba->hbalock, iflag);
2411                 if (phba->work_wait)
2412                         lpfc_worker_wake_up(phba);
2413                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2414         }
2415 }
2416
2417 void
2418 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2419 {
2420         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
2421         MAILBOX_t *mb = &pmbox->mb;
2422         struct lpfc_sli *psli = &phba->sli;
2423         struct lpfc_sli_ring *pring;
2424
2425         if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
2426                 return;
2427         }
2428
2429         /* Mbox cmd <mbxCommand> timeout */
2430         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2431                         "%d:0310 Mailbox command x%x timeout Data: x%x x%x "
2432                         "x%p\n",
2433                         phba->brd_no,
2434                         mb->mbxCommand,
2435                         phba->pport->port_state,
2436                         phba->sli.sli_flag,
2437                         phba->sli.mbox_active);
2438
2439         /* Setting state unknown so lpfc_sli_abort_iocb_ring
2440          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2441          * it to fail all oustanding SCSI IO.
2442          */
2443         spin_lock_irq(&phba->pport->work_port_lock);
2444         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2445         spin_unlock_irq(&phba->pport->work_port_lock);
2446         spin_lock_irq(&phba->hbalock);
2447         phba->link_state = LPFC_LINK_UNKNOWN;
2448         phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2449         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2450         spin_unlock_irq(&phba->hbalock);
2451
2452         pring = &psli->ring[psli->fcp_ring];
2453         lpfc_sli_abort_iocb_ring(phba, pring);
2454
2455         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2456                         "%d:0316 Resetting board due to mailbox timeout\n",
2457                         phba->brd_no);
2458         /*
2459          * lpfc_offline calls lpfc_sli_hba_down which will clean up
2460          * on oustanding mailbox commands.
2461          */
2462         lpfc_offline_prep(phba);
2463         lpfc_offline(phba);
2464         lpfc_sli_brdrestart(phba);
2465         if (lpfc_online(phba) == 0)             /* Initialize the HBA */
2466                 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2467         lpfc_unblock_mgmt_io(phba);
2468         return;
2469 }
2470
2471 int
2472 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2473 {
2474         MAILBOX_t *mb;
2475         struct lpfc_sli *psli = &phba->sli;
2476         uint32_t status, evtctr;
2477         uint32_t ha_copy;
2478         int i;
2479         unsigned long drvr_flag = 0;
2480         volatile uint32_t word0, ldata;
2481         void __iomem *to_slim;
2482
2483         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2484                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2485                 if(!pmbox->vport) {
2486                         lpfc_printf_log(phba, KERN_ERR,
2487                                         LOG_MBOX | LOG_VPORT,
2488                                         "%d:1806 Mbox x%x failed. No vport\n",
2489                                         phba->brd_no,
2490                                         pmbox->mb.mbxCommand);
2491                         dump_stack();
2492                         return MBXERR_ERROR;
2493                 }
2494         }
2495
2496
2497         /* If the PCI channel is in offline state, do not post mbox. */
2498         if (unlikely(pci_channel_offline(phba->pcidev)))
2499                 return MBX_NOT_FINISHED;
2500
2501         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2502         psli = &phba->sli;
2503
2504
2505         mb = &pmbox->mb;
2506         status = MBX_SUCCESS;
2507
2508         if (phba->link_state == LPFC_HBA_ERROR) {
2509                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2510
2511                 /* Mbox command <mbxCommand> cannot issue */
2512                 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2513                 return MBX_NOT_FINISHED;
2514         }
2515
2516         if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2517             !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2518                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2519                 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2520                 return MBX_NOT_FINISHED;
2521         }
2522
2523         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2524                 /* Polling for a mbox command when another one is already active
2525                  * is not allowed in SLI. Also, the driver must have established
2526                  * SLI2 mode to queue and process multiple mbox commands.
2527                  */
2528
2529                 if (flag & MBX_POLL) {
2530                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2531
2532                         /* Mbox command <mbxCommand> cannot issue */
2533                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2534                         return MBX_NOT_FINISHED;
2535                 }
2536
2537                 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2538                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2539                         /* Mbox command <mbxCommand> cannot issue */
2540                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2541                         return MBX_NOT_FINISHED;
2542                 }
2543
2544                 /* Handle STOP IOCB processing flag. This is only meaningful
2545                  * if we are not polling for mbox completion.
2546                  */
2547                 if (flag & MBX_STOP_IOCB) {
2548                         flag &= ~MBX_STOP_IOCB;
2549                         /* Now flag each ring */
2550                         for (i = 0; i < psli->num_rings; i++) {
2551                                 /* If the ring is active, flag it */
2552                                 if (psli->ring[i].cmdringaddr) {
2553                                         psli->ring[i].flag |=
2554                                             LPFC_STOP_IOCB_MBX;
2555                                 }
2556                         }
2557                 }
2558
2559                 /* Another mailbox command is still being processed, queue this
2560                  * command to be processed later.
2561                  */
2562                 lpfc_mbox_put(phba, pmbox);
2563
2564                 /* Mbox cmd issue - BUSY */
2565                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2566                                 "%d (%d):0308 Mbox cmd issue - BUSY Data: "
2567                                 "x%x x%x x%x x%x\n",
2568                                 phba->brd_no,
2569                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2570                                 mb->mbxCommand, phba->pport->port_state,
2571                                 psli->sli_flag, flag);
2572
2573                 psli->slistat.mbox_busy++;
2574                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2575
2576                 return MBX_BUSY;
2577         }
2578
2579         /* Handle STOP IOCB processing flag. This is only meaningful
2580          * if we are not polling for mbox completion.
2581          */
2582         if (flag & MBX_STOP_IOCB) {
2583                 flag &= ~MBX_STOP_IOCB;
2584                 if (flag == MBX_NOWAIT) {
2585                         /* Now flag each ring */
2586                         for (i = 0; i < psli->num_rings; i++) {
2587                                 /* If the ring is active, flag it */
2588                                 if (psli->ring[i].cmdringaddr) {
2589                                         psli->ring[i].flag |=
2590                                             LPFC_STOP_IOCB_MBX;
2591                                 }
2592                         }
2593                 }
2594         }
2595
2596         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2597
2598         /* If we are not polling, we MUST be in SLI2 mode */
2599         if (flag != MBX_POLL) {
2600                 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2601                     (mb->mbxCommand != MBX_KILL_BOARD)) {
2602                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2603                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2604                         /* Mbox command <mbxCommand> cannot issue */
2605                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2606                         return MBX_NOT_FINISHED;
2607                 }
2608                 /* timeout active mbox command */
2609                 mod_timer(&psli->mbox_tmo, (jiffies +
2610                                (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2611         }
2612
2613         /* Mailbox cmd <cmd> issue */
2614         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2615                         "%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
2616                         "x%x\n",
2617                         phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0,
2618                         mb->mbxCommand, phba->pport->port_state,
2619                         psli->sli_flag, flag);
2620
2621         psli->slistat.mbox_cmd++;
2622         evtctr = psli->slistat.mbox_event;
2623
2624         /* next set own bit for the adapter and copy over command word */
2625         mb->mbxOwner = OWN_CHIP;
2626
2627         if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2628                 /* First copy command data to host SLIM area */
2629                 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
2630         } else {
2631                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2632                         /* copy command data into host mbox for cmpl */
2633                         lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2634                                               MAILBOX_CMD_SIZE);
2635                 }
2636
2637                 /* First copy mbox command data to HBA SLIM, skip past first
2638                    word */
2639                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2640                 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2641                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
2642
2643                 /* Next copy over first word, with mbxOwner set */
2644                 ldata = *((volatile uint32_t *)mb);
2645                 to_slim = phba->MBslimaddr;
2646                 writel(ldata, to_slim);
2647                 readl(to_slim); /* flush */
2648
2649                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2650                         /* switch over to host mailbox */
2651                         psli->sli_flag |= LPFC_SLI2_ACTIVE;
2652                 }
2653         }
2654
2655         wmb();
2656         /* interrupt board to doit right away */
2657         writel(CA_MBATT, phba->CAregaddr);
2658         readl(phba->CAregaddr); /* flush */
2659
2660         switch (flag) {
2661         case MBX_NOWAIT:
2662                 /* Don't wait for it to finish, just return */
2663                 psli->mbox_active = pmbox;
2664                 break;
2665
2666         case MBX_POLL:
2667                 psli->mbox_active = NULL;
2668                 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2669                         /* First read mbox status word */
2670                         word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
2671                         word0 = le32_to_cpu(word0);
2672                 } else {
2673                         /* First read mbox status word */
2674                         word0 = readl(phba->MBslimaddr);
2675                 }
2676
2677                 /* Read the HBA Host Attention Register */
2678                 ha_copy = readl(phba->HAregaddr);
2679
2680                 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2681                 i *= 1000; /* Convert to ms */
2682
2683                 /* Wait for command to complete */
2684                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2685                        (!(ha_copy & HA_MBATT) &&
2686                         (phba->link_state > LPFC_WARM_START))) {
2687                         if (i-- <= 0) {
2688                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2689                                 spin_unlock_irqrestore(&phba->hbalock,
2690                                                        drvr_flag);
2691                                 return MBX_NOT_FINISHED;
2692                         }
2693
2694                         /* Check if we took a mbox interrupt while we were
2695                            polling */
2696                         if (((word0 & OWN_CHIP) != OWN_CHIP)
2697                             && (evtctr != psli->slistat.mbox_event))
2698                                 break;
2699
2700                         spin_unlock_irqrestore(&phba->hbalock,
2701                                                drvr_flag);
2702
2703                         msleep(1);
2704
2705                         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2706
2707                         if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2708                                 /* First copy command data */
2709                                 word0 = *((volatile uint32_t *)
2710                                                 &phba->slim2p->mbx);
2711                                 word0 = le32_to_cpu(word0);
2712                                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2713                                         MAILBOX_t *slimmb;
2714                                         volatile uint32_t slimword0;
2715                                         /* Check real SLIM for any errors */
2716                                         slimword0 = readl(phba->MBslimaddr);
2717                                         slimmb = (MAILBOX_t *) & slimword0;
2718                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2719                                             && slimmb->mbxStatus) {
2720                                                 psli->sli_flag &=
2721                                                     ~LPFC_SLI2_ACTIVE;
2722                                                 word0 = slimword0;
2723                                         }
2724                                 }
2725                         } else {
2726                                 /* First copy command data */
2727                                 word0 = readl(phba->MBslimaddr);
2728                         }
2729                         /* Read the HBA Host Attention Register */
2730                         ha_copy = readl(phba->HAregaddr);
2731                 }
2732
2733                 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2734                         /* copy results back to user */
2735                         lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2736                                               MAILBOX_CMD_SIZE);
2737                 } else {
2738                         /* First copy command data */
2739                         lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2740                                                         MAILBOX_CMD_SIZE);
2741                         if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2742                                 pmbox->context2) {
2743                                 lpfc_memcpy_from_slim((void *)pmbox->context2,
2744                                       phba->MBslimaddr + DMP_RSP_OFFSET,
2745                                                       mb->un.varDmp.word_cnt);
2746                         }
2747                 }
2748
2749                 writel(HA_MBATT, phba->HAregaddr);
2750                 readl(phba->HAregaddr); /* flush */
2751
2752                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2753                 status = mb->mbxStatus;
2754         }
2755
2756         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2757         return status;
2758 }
2759
2760 /*
2761  * Caller needs to hold lock.
2762  */
2763 static int
2764 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2765                     struct lpfc_iocbq *piocb)
2766 {
2767         /* Insert the caller's iocb in the txq tail for later processing. */
2768         list_add_tail(&piocb->list, &pring->txq);
2769         pring->txq_cnt++;
2770         return 0;
2771 }
2772
2773 static struct lpfc_iocbq *
2774 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2775                    struct lpfc_iocbq **piocb)
2776 {
2777         struct lpfc_iocbq * nextiocb;
2778
2779         nextiocb = lpfc_sli_ringtx_get(phba, pring);
2780         if (!nextiocb) {
2781                 nextiocb = *piocb;
2782                 *piocb = NULL;
2783         }
2784
2785         return nextiocb;
2786 }
2787
2788 /*
2789  * Lockless version of lpfc_sli_issue_iocb.
2790  */
2791 int
2792 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2793                     struct lpfc_iocbq *piocb, uint32_t flag)
2794 {
2795         struct lpfc_iocbq *nextiocb;
2796         IOCB_t *iocb;
2797
2798         if (piocb->iocb_cmpl && (!piocb->vport) &&
2799            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2800            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2801                 lpfc_printf_log(phba, KERN_ERR,
2802                                 LOG_SLI | LOG_VPORT,
2803                                 "%d:1807 IOCB x%x failed. No vport\n",
2804                                 phba->brd_no,
2805                                 piocb->iocb.ulpCommand);
2806                 dump_stack();
2807                 return IOCB_ERROR;
2808         }
2809
2810
2811         /* If the PCI channel is in offline state, do not post iocbs. */
2812         if (unlikely(pci_channel_offline(phba->pcidev)))
2813                 return IOCB_ERROR;
2814
2815         /*
2816          * We should never get an IOCB if we are in a < LINK_DOWN state
2817          */
2818         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
2819                 return IOCB_ERROR;
2820
2821         /*
2822          * Check to see if we are blocking IOCB processing because of a
2823          * outstanding mbox command.
2824          */
2825         if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2826                 goto iocb_busy;
2827
2828         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
2829                 /*
2830                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
2831                  * can be issued if the link is not up.
2832                  */
2833                 switch (piocb->iocb.ulpCommand) {
2834                 case CMD_QUE_RING_BUF_CN:
2835                 case CMD_QUE_RING_BUF64_CN:
2836                         /*
2837                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2838                          * completion, iocb_cmpl MUST be 0.
2839                          */
2840                         if (piocb->iocb_cmpl)
2841                                 piocb->iocb_cmpl = NULL;
2842                         /*FALLTHROUGH*/
2843                 case CMD_CREATE_XRI_CR:
2844                 case CMD_CLOSE_XRI_CN:
2845                 case CMD_CLOSE_XRI_CX:
2846                         break;
2847                 default:
2848                         goto iocb_busy;
2849                 }
2850
2851         /*
2852          * For FCP commands, we must be in a state where we can process link
2853          * attention events.
2854          */
2855         } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2856                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
2857                 goto iocb_busy;
2858         }
2859
2860         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2861                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2862                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2863
2864         if (iocb)
2865                 lpfc_sli_update_ring(phba, pring);
2866         else
2867                 lpfc_sli_update_full_ring(phba, pring);
2868
2869         if (!piocb)
2870                 return IOCB_SUCCESS;
2871
2872         goto out_busy;
2873
2874  iocb_busy:
2875         pring->stats.iocb_cmd_delay++;
2876
2877  out_busy:
2878
2879         if (!(flag & SLI_IOCB_RET_IOCB)) {
2880                 __lpfc_sli_ringtx_put(phba, pring, piocb);
2881                 return IOCB_SUCCESS;
2882         }
2883
2884         return IOCB_BUSY;
2885 }
2886
2887
2888 int
2889 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2890                     struct lpfc_iocbq *piocb, uint32_t flag)
2891 {
2892         unsigned long iflags;
2893         int rc;
2894
2895         spin_lock_irqsave(&phba->hbalock, iflags);
2896         rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2897         spin_unlock_irqrestore(&phba->hbalock, iflags);
2898
2899         return rc;
2900 }
2901
2902 static int
2903 lpfc_extra_ring_setup( struct lpfc_hba *phba)
2904 {
2905         struct lpfc_sli *psli;
2906         struct lpfc_sli_ring *pring;
2907
2908         psli = &phba->sli;
2909
2910         /* Adjust cmd/rsp ring iocb entries more evenly */
2911
2912         /* Take some away from the FCP ring */
2913         pring = &psli->ring[psli->fcp_ring];
2914         pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2915         pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2916         pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2917         pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2918
2919         /* and give them to the extra ring */
2920         pring = &psli->ring[psli->extra_ring];
2921
2922         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2923         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2924         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2925         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2926
2927         /* Setup default profile for this ring */
2928         pring->iotag_max = 4096;
2929         pring->num_mask = 1;
2930         pring->prt[0].profile = 0;      /* Mask 0 */
2931         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2932         pring->prt[0].type = phba->cfg_multi_ring_type;
2933         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2934         return 0;
2935 }
2936
2937 int
2938 lpfc_sli_setup(struct lpfc_hba *phba)
2939 {
2940         int i, totiocbsize = 0;
2941         struct lpfc_sli *psli = &phba->sli;
2942         struct lpfc_sli_ring *pring;
2943
2944         psli->num_rings = MAX_CONFIGURED_RINGS;
2945         psli->sli_flag = 0;
2946         psli->fcp_ring = LPFC_FCP_RING;
2947         psli->next_ring = LPFC_FCP_NEXT_RING;
2948         psli->extra_ring = LPFC_EXTRA_RING;
2949
2950         psli->iocbq_lookup = NULL;
2951         psli->iocbq_lookup_len = 0;
2952         psli->last_iotag = 0;
2953
2954         for (i = 0; i < psli->num_rings; i++) {
2955                 pring = &psli->ring[i];
2956                 switch (i) {
2957                 case LPFC_FCP_RING:     /* ring 0 - FCP */
2958                         /* numCiocb and numRiocb are used in config_port */
2959                         pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
2960                         pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
2961                         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2962                         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2963                         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2964                         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2965                         pring->sizeCiocb = (phba->sli_rev == 3) ?
2966                                                         SLI3_IOCB_CMD_SIZE :
2967                                                         SLI2_IOCB_CMD_SIZE;
2968                         pring->sizeRiocb = (phba->sli_rev == 3) ?
2969                                                         SLI3_IOCB_RSP_SIZE :
2970                                                         SLI2_IOCB_RSP_SIZE;
2971                         pring->iotag_ctr = 0;
2972                         pring->iotag_max =
2973                             (phba->cfg_hba_queue_depth * 2);
2974                         pring->fast_iotag = pring->iotag_max;
2975                         pring->num_mask = 0;
2976                         break;
2977                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
2978                         /* numCiocb and numRiocb are used in config_port */
2979                         pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2980                         pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
2981                         pring->sizeCiocb = (phba->sli_rev == 3) ?
2982                                                         SLI3_IOCB_CMD_SIZE :
2983                                                         SLI2_IOCB_CMD_SIZE;
2984                         pring->sizeRiocb = (phba->sli_rev == 3) ?
2985                                                         SLI3_IOCB_RSP_SIZE :
2986                                                         SLI2_IOCB_RSP_SIZE;
2987                         pring->iotag_max = phba->cfg_hba_queue_depth;
2988                         pring->num_mask = 0;
2989                         break;
2990                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
2991                         /* numCiocb and numRiocb are used in config_port */
2992                         pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2993                         pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
2994                         pring->sizeCiocb = (phba->sli_rev == 3) ?
2995                                                         SLI3_IOCB_CMD_SIZE :
2996                                                         SLI2_IOCB_CMD_SIZE;
2997                         pring->sizeRiocb = (phba->sli_rev == 3) ?
2998                                                         SLI3_IOCB_RSP_SIZE :
2999                                                         SLI2_IOCB_RSP_SIZE;
3000                         pring->fast_iotag = 0;
3001                         pring->iotag_ctr = 0;
3002                         pring->iotag_max = 4096;
3003                         pring->num_mask = 4;
3004                         pring->prt[0].profile = 0;      /* Mask 0 */
3005                         pring->prt[0].rctl = FC_ELS_REQ;
3006                         pring->prt[0].type = FC_ELS_DATA;
3007                         pring->prt[0].lpfc_sli_rcv_unsol_event =
3008                             lpfc_els_unsol_event;
3009                         pring->prt[1].profile = 0;      /* Mask 1 */
3010                         pring->prt[1].rctl = FC_ELS_RSP;
3011                         pring->prt[1].type = FC_ELS_DATA;
3012                         pring->prt[1].lpfc_sli_rcv_unsol_event =
3013                             lpfc_els_unsol_event;
3014                         pring->prt[2].profile = 0;      /* Mask 2 */
3015                         /* NameServer Inquiry */
3016                         pring->prt[2].rctl = FC_UNSOL_CTL;
3017                         /* NameServer */
3018                         pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
3019                         pring->prt[2].lpfc_sli_rcv_unsol_event =
3020                             lpfc_ct_unsol_event;
3021                         pring->prt[3].profile = 0;      /* Mask 3 */
3022                         /* NameServer response */
3023                         pring->prt[3].rctl = FC_SOL_CTL;
3024                         /* NameServer */
3025                         pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
3026                         pring->prt[3].lpfc_sli_rcv_unsol_event =
3027                             lpfc_ct_unsol_event;
3028                         break;
3029                 }
3030                 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
3031                                 (pring->numRiocb * pring->sizeRiocb);
3032         }
3033         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
3034                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
3035                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3036                                 "%d:0462 Too many cmd / rsp ring entries in "
3037                                 "SLI2 SLIM Data: x%x x%lx\n",
3038                                 phba->brd_no, totiocbsize,
3039                                 (unsigned long) MAX_SLIM_IOCB_SIZE);
3040         }
3041         if (phba->cfg_multi_ring_support == 2)
3042                 lpfc_extra_ring_setup(phba);
3043
3044         return 0;
3045 }
3046
3047 int
3048 lpfc_sli_queue_setup(struct lpfc_hba *phba)
3049 {
3050         struct lpfc_sli *psli;
3051         struct lpfc_sli_ring *pring;
3052         int i;
3053
3054         psli = &phba->sli;
3055         spin_lock_irq(&phba->hbalock);
3056         INIT_LIST_HEAD(&psli->mboxq);
3057         INIT_LIST_HEAD(&psli->mboxq_cmpl);
3058         /* Initialize list headers for txq and txcmplq as double linked lists */
3059         for (i = 0; i < psli->num_rings; i++) {
3060                 pring = &psli->ring[i];
3061                 pring->ringno = i;
3062                 pring->next_cmdidx  = 0;
3063                 pring->local_getidx = 0;
3064                 pring->cmdidx = 0;
3065                 INIT_LIST_HEAD(&pring->txq);
3066                 INIT_LIST_HEAD(&pring->txcmplq);
3067                 INIT_LIST_HEAD(&pring->iocb_continueq);
3068                 INIT_LIST_HEAD(&pring->postbufq);
3069         }
3070         spin_unlock_irq(&phba->hbalock);
3071         return 1;
3072 }
3073
3074 int
3075 lpfc_sli_host_down(struct lpfc_vport *vport)
3076 {
3077         struct lpfc_hba *phba = vport->phba;
3078         struct lpfc_sli *psli = &phba->sli;
3079         struct lpfc_sli_ring *pring;
3080         struct lpfc_iocbq *iocb, *next_iocb;
3081         IOCB_t *icmd = NULL;
3082         int i;
3083         unsigned long flags = 0;
3084         uint16_t prev_pring_flag;
3085
3086         lpfc_cleanup_discovery_resources(vport);
3087
3088         spin_lock_irqsave(&phba->hbalock, flags);
3089
3090         for (i = 0; i < psli->num_rings; i++) {
3091                 pring = &psli->ring[i];
3092                 prev_pring_flag = pring->flag;
3093                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3094
3095                 /*
3096                  * Error everything on the txq since these iocbs have not been
3097                  * given to the FW yet.
3098                  */
3099
3100                 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3101                         if (iocb->vport != vport)
3102                                 continue;
3103                         list_del_init(&iocb->list);
3104                         pring->txq_cnt--;
3105                         if (iocb->iocb_cmpl) {
3106                                 icmd = &iocb->iocb;
3107                                 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3108                                 icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3109                                 spin_unlock_irqrestore(&phba->hbalock, flags);
3110                                 (iocb->iocb_cmpl) (phba, iocb, iocb);
3111                                 spin_lock_irqsave(&phba->hbalock, flags);
3112                         } else
3113                                 lpfc_sli_release_iocbq(phba, iocb);
3114                 }
3115
3116                 /* Next issue ABTS for everything on the txcmplq */
3117                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3118                                                                         list) {
3119                         if (iocb->vport != vport)
3120                                 continue;
3121                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3122                 }
3123
3124                 pring->flag = prev_pring_flag;
3125         }
3126
3127         spin_unlock_irqrestore(&phba->hbalock, flags);
3128
3129         return 1;
3130 }
3131
3132 int
3133 lpfc_sli_hba_down(struct lpfc_hba *phba)
3134 {
3135         LIST_HEAD(completions);
3136         struct lpfc_sli *psli = &phba->sli;
3137         struct lpfc_sli_ring *pring;
3138         LPFC_MBOXQ_t *pmb;
3139         struct lpfc_iocbq *iocb;
3140         IOCB_t *cmd = NULL;
3141         int i;
3142         unsigned long flags = 0;
3143
3144         lpfc_hba_down_prep(phba);
3145
3146         lpfc_fabric_abort_hba(phba);
3147
3148         spin_lock_irqsave(&phba->hbalock, flags);
3149         for (i = 0; i < psli->num_rings; i++) {
3150                 pring = &psli->ring[i];
3151                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3152
3153                 /*
3154                  * Error everything on the txq since these iocbs have not been
3155                  * given to the FW yet.
3156                  */
3157                 list_splice_init(&pring->txq, &completions);
3158                 pring->txq_cnt = 0;
3159
3160         }
3161         spin_unlock_irqrestore(&phba->hbalock, flags);
3162
3163         while (!list_empty(&completions)) {
3164                 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3165                 cmd = &iocb->iocb;
3166
3167                 if (!iocb->iocb_cmpl)
3168                         lpfc_sli_release_iocbq(phba, iocb);
3169                 else {
3170                         cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3171                         cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3172                         (iocb->iocb_cmpl) (phba, iocb, iocb);
3173                 }
3174         }
3175
3176         /* Return any active mbox cmds */
3177         del_timer_sync(&psli->mbox_tmo);
3178         spin_lock_irqsave(&phba->hbalock, flags);
3179
3180         spin_lock(&phba->pport->work_port_lock);
3181         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3182         spin_unlock(&phba->pport->work_port_lock);
3183
3184         if (psli->mbox_active) {
3185                 list_add_tail(&psli->mbox_active->list, &completions);
3186                 psli->mbox_active = NULL;
3187                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3188         }
3189
3190         /* Return any pending or completed mbox cmds */
3191         list_splice_init(&phba->sli.mboxq, &completions);
3192         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3193         INIT_LIST_HEAD(&psli->mboxq);
3194         INIT_LIST_HEAD(&psli->mboxq_cmpl);
3195
3196         spin_unlock_irqrestore(&phba->hbalock, flags);
3197
3198         while (!list_empty(&completions)) {
3199                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
3200                 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3201                 if (pmb->mbox_cmpl) {
3202                         pmb->mbox_cmpl(phba,pmb);
3203                 }
3204         }
3205         return 1;
3206 }
3207
3208 void
3209 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
3210 {
3211         uint32_t *src = srcp;
3212         uint32_t *dest = destp;
3213         uint32_t ldata;
3214         int i;
3215
3216         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
3217                 ldata = *src;
3218                 ldata = le32_to_cpu(ldata);
3219                 *dest = ldata;
3220                 src++;
3221                 dest++;
3222         }
3223 }
3224
3225 int
3226 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3227                          struct lpfc_dmabuf *mp)
3228 {
3229         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
3230            later */
3231         spin_lock_irq(&phba->hbalock);
3232         list_add_tail(&mp->list, &pring->postbufq);
3233         pring->postbufq_cnt++;
3234         spin_unlock_irq(&phba->hbalock);
3235         return 0;
3236 }
3237
3238
3239 struct lpfc_dmabuf *
3240 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3241                          dma_addr_t phys)
3242 {
3243         struct lpfc_dmabuf *mp, *next_mp;
3244         struct list_head *slp = &pring->postbufq;
3245
3246         /* Search postbufq, from the begining, looking for a match on phys */
3247         spin_lock_irq(&phba->hbalock);
3248         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
3249                 if (mp->phys == phys) {
3250                         list_del_init(&mp->list);
3251                         pring->postbufq_cnt--;
3252                         spin_unlock_irq(&phba->hbalock);
3253                         return mp;
3254                 }
3255         }
3256
3257         spin_unlock_irq(&phba->hbalock);
3258         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3259                         "%d:0410 Cannot find virtual addr for mapped buf on "
3260                         "ring %d Data x%llx x%p x%p x%x\n",
3261                         phba->brd_no, pring->ringno, (unsigned long long)phys,
3262                         slp->next, slp->prev, pring->postbufq_cnt);
3263         return NULL;
3264 }
3265
3266 static void
3267 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3268                         struct lpfc_iocbq *rspiocb)
3269 {
3270         IOCB_t *irsp = &rspiocb->iocb;
3271         uint16_t abort_iotag, abort_context;
3272         struct lpfc_iocbq *abort_iocb;
3273         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3274
3275         abort_iocb = NULL;
3276
3277         if (irsp->ulpStatus) {
3278                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
3279                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
3280
3281                 spin_lock_irq(&phba->hbalock);
3282                 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
3283                         abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
3284
3285                 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
3286                                 "%d:0327 Cannot abort els iocb %p "
3287                                 "with tag %x context %x, abort status %x, "
3288                                 "abort code %x\n",
3289                                 phba->brd_no, abort_iocb, abort_iotag,
3290                                 abort_context, irsp->ulpStatus,
3291                                 irsp->un.ulpWord[4]);
3292
3293                 /*
3294                  * make sure we have the right iocbq before taking it
3295                  * off the txcmplq and try to call completion routine.
3296                  */
3297                 if (!abort_iocb ||
3298                     abort_iocb->iocb.ulpContext != abort_context ||
3299                     (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
3300                         spin_unlock_irq(&phba->hbalock);
3301                 else {
3302                         list_del_init(&abort_iocb->list);
3303                         pring->txcmplq_cnt--;
3304                         spin_unlock_irq(&phba->hbalock);
3305
3306                         abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3307                         abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3308                         abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
3309                         (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
3310                 }
3311         }
3312
3313         lpfc_sli_release_iocbq(phba, cmdiocb);
3314         return;
3315 }
3316
3317 static void
3318 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3319                      struct lpfc_iocbq *rspiocb)
3320 {
3321         IOCB_t *irsp = &rspiocb->iocb;
3322
3323         /* ELS cmd tag <ulpIoTag> completes */
3324         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3325                         "%d (X):0133 Ignoring ELS cmd tag x%x completion Data: "
3326                         "x%x x%x x%x\n",
3327                         phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus,
3328                         irsp->un.ulpWord[4], irsp->ulpTimeout);
3329
3330         lpfc_els_free_iocb(phba, cmdiocb);
3331         return;
3332 }
3333
3334 int
3335 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3336                            struct lpfc_iocbq *cmdiocb)
3337 {
3338         struct lpfc_vport *vport = cmdiocb->vport;
3339         struct lpfc_iocbq *abtsiocbp;
3340         IOCB_t *icmd = NULL;
3341         IOCB_t *iabt = NULL;
3342         int retval = IOCB_ERROR;
3343
3344         /*
3345          * There are certain command types we don't want to abort.  And we
3346          * don't want to abort commands that are already in the process of
3347          * being aborted.
3348          */
3349         icmd = &cmdiocb->iocb;
3350         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
3351             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3352             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
3353                 return 0;
3354
3355         /* If we're unloading, don't abort the iocb, but change the callback so
3356          * that nothing happens when it finishes.
3357          */
3358         if (vport->load_flag & FC_UNLOADING) {
3359                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3360                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3361                 else
3362                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
3363                 goto abort_iotag_exit;
3364         }
3365
3366         /* issue ABTS for this IOCB based on iotag */
3367         abtsiocbp = __lpfc_sli_get_iocbq(phba);
3368         if (abtsiocbp == NULL)
3369                 return 0;
3370
3371         /* This signals the response to set the correct status
3372          * before calling the completion handler.
3373          */
3374         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
3375
3376         iabt = &abtsiocbp->iocb;
3377         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
3378         iabt->un.acxri.abortContextTag = icmd->ulpContext;
3379         iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
3380         iabt->ulpLe = 1;
3381         iabt->ulpClass = icmd->ulpClass;
3382
3383         if (phba->link_state >= LPFC_LINK_UP)
3384                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
3385         else
3386                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
3387
3388         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
3389
3390         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3391                         "%d (%d):0339 Abort xri x%x, original iotag x%x, "
3392                         "abort cmd iotag x%x\n",
3393                         phba->brd_no, vport->vpi,
3394                         iabt->un.acxri.abortContextTag,
3395                         iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
3396         retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
3397
3398 abort_iotag_exit:
3399         /*
3400          * Caller to this routine should check for IOCB_ERROR
3401          * and handle it properly.  This routine no longer removes
3402          * iocb off txcmplq and call compl in case of IOCB_ERROR.
3403          */
3404         return retval;
3405 }
3406
3407 static int
3408 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
3409                            uint64_t lun_id, uint32_t ctx,
3410                            lpfc_ctx_cmd ctx_cmd)
3411 {
3412         struct lpfc_scsi_buf *lpfc_cmd;
3413         struct scsi_cmnd *cmnd;
3414         int rc = 1;
3415
3416         if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
3417                 return rc;
3418
3419         lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
3420         cmnd = lpfc_cmd->pCmd;
3421
3422         if (cmnd == NULL)
3423                 return rc;
3424
3425         switch (ctx_cmd) {
3426         case LPFC_CTX_LUN:
3427                 if ((cmnd->device->id == tgt_id) &&
3428                     (cmnd->device->lun == lun_id))
3429                         rc = 0;
3430                 break;
3431         case LPFC_CTX_TGT:
3432                 if (cmnd->device->id == tgt_id)
3433                         rc = 0;
3434                 break;
3435         case LPFC_CTX_CTX:
3436                 if (iocbq->iocb.ulpContext == ctx)
3437                         rc = 0;
3438                 break;
3439         case LPFC_CTX_HOST:
3440                 rc = 0;
3441                 break;
3442         default:
3443                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
3444                         __FUNCTION__, ctx_cmd);
3445                 break;
3446         }
3447
3448         return rc;
3449 }
3450
3451 int
3452 lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3453                   uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
3454 {
3455         struct lpfc_iocbq *iocbq;
3456         int sum, i;
3457
3458         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
3459                 iocbq = phba->sli.iocbq_lookup[i];
3460
3461                 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
3462                                                 0, ctx_cmd) == 0)
3463                         sum++;
3464         }
3465
3466         return sum;
3467 }
3468
3469 void
3470 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3471                         struct lpfc_iocbq *rspiocb)
3472 {
3473         lpfc_sli_release_iocbq(phba, cmdiocb);
3474         return;
3475 }
3476
3477 int
3478 lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3479                     uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
3480                     lpfc_ctx_cmd abort_cmd)
3481 {
3482         struct lpfc_iocbq *iocbq;
3483         struct lpfc_iocbq *abtsiocb;
3484         IOCB_t *cmd = NULL;
3485         int errcnt = 0, ret_val = 0;
3486         int i;
3487
3488         for (i = 1; i <= phba->sli.last_iotag; i++) {
3489                 iocbq = phba->sli.iocbq_lookup[i];
3490
3491                 if (lpfc_sli_validate_fcp_iocb(iocbq, tgt_id, lun_id, 0,
3492                                                abort_cmd) != 0)
3493                         continue;
3494
3495                 /* issue ABTS for this IOCB based on iotag */
3496                 abtsiocb = lpfc_sli_get_iocbq(phba);
3497                 if (abtsiocb == NULL) {
3498                         errcnt++;
3499                         continue;
3500                 }
3501
3502                 cmd = &iocbq->iocb;
3503                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
3504                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
3505                 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
3506                 abtsiocb->iocb.ulpLe = 1;
3507                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
3508                 abtsiocb->vport = phba->pport;
3509
3510                 if (lpfc_is_link_up(phba))
3511                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
3512                 else
3513                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
3514
3515                 /* Setup callback routine and issue the command. */
3516                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3517                 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
3518                 if (ret_val == IOCB_ERROR) {
3519                         lpfc_sli_release_iocbq(phba, abtsiocb);
3520                         errcnt++;
3521                         continue;
3522                 }
3523         }
3524
3525         return errcnt;
3526 }
3527
3528 static void
3529 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3530                         struct lpfc_iocbq *cmdiocbq,
3531                         struct lpfc_iocbq *rspiocbq)
3532 {
3533         wait_queue_head_t *pdone_q;
3534         unsigned long iflags;
3535
3536         spin_lock_irqsave(&phba->hbalock, iflags);
3537         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3538         if (cmdiocbq->context2 && rspiocbq)
3539                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3540                        &rspiocbq->iocb, sizeof(IOCB_t));
3541
3542         pdone_q = cmdiocbq->context_un.wait_queue;
3543         spin_unlock_irqrestore(&phba->hbalock, iflags);
3544         if (pdone_q)
3545                 wake_up(pdone_q);
3546         return;
3547 }
3548
3549 /*
3550  * Issue the caller's iocb and wait for its completion, but no longer than the
3551  * caller's timeout.  Note that iocb_flags is cleared before the
3552  * lpfc_sli_issue_call since the wake routine sets a unique value and by
3553  * definition this is a wait function.
3554  */
3555
3556 int
3557 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3558                          struct lpfc_sli_ring *pring,
3559                          struct lpfc_iocbq *piocb,
3560                          struct lpfc_iocbq *prspiocbq,
3561                          uint32_t timeout)
3562 {
3563         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3564         long timeleft, timeout_req = 0;
3565         int retval = IOCB_SUCCESS;
3566         uint32_t creg_val;
3567
3568         /*
3569          * If the caller has provided a response iocbq buffer, then context2
3570          * is NULL or its an error.
3571          */
3572         if (prspiocbq) {
3573                 if (piocb->context2)
3574                         return IOCB_ERROR;
3575                 piocb->context2 = prspiocbq;
3576         }
3577
3578         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3579         piocb->context_un.wait_queue = &done_q;
3580         piocb->iocb_flag &= ~LPFC_IO_WAKE;
3581
3582         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3583                 creg_val = readl(phba->HCregaddr);
3584                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
3585                 writel(creg_val, phba->HCregaddr);
3586                 readl(phba->HCregaddr); /* flush */
3587         }
3588
3589         retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3590         if (retval == IOCB_SUCCESS) {
3591                 timeout_req = timeout * HZ;
3592                 timeleft = wait_event_timeout(done_q,
3593                                 piocb->iocb_flag & LPFC_IO_WAKE,
3594                                 timeout_req);
3595
3596                 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3597                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3598                                         "%d:0331 IOCB wake signaled\n",
3599                                         phba->brd_no);
3600                 } else if (timeleft == 0) {
3601                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3602                                         "%d:0338 IOCB wait timeout error - no "
3603                                         "wake response Data x%x\n",
3604                                         phba->brd_no, timeout);
3605                         retval = IOCB_TIMEDOUT;
3606                 } else {
3607                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3608                                         "%d:0330 IOCB wake NOT set, "
3609                                         "Data x%x x%lx\n", phba->brd_no,
3610                                         timeout, (timeleft / jiffies));
3611                         retval = IOCB_TIMEDOUT;
3612                 }
3613         } else {
3614                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3615                                 "%d:0332 IOCB wait issue failed, Data x%x\n",
3616                                 phba->brd_no, retval);
3617                 retval = IOCB_ERROR;
3618         }
3619
3620         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3621                 creg_val = readl(phba->HCregaddr);
3622                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3623                 writel(creg_val, phba->HCregaddr);
3624                 readl(phba->HCregaddr); /* flush */
3625         }
3626
3627         if (prspiocbq)
3628                 piocb->context2 = NULL;
3629
3630         piocb->context_un.wait_queue = NULL;
3631         piocb->iocb_cmpl = NULL;
3632         return retval;
3633 }
3634
3635 int
3636 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3637                          uint32_t timeout)
3638 {
3639         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3640         int retval;
3641
3642         /* The caller must leave context1 empty. */
3643         if (pmboxq->context1 != 0)
3644                 return MBX_NOT_FINISHED;
3645
3646         /* setup wake call as IOCB callback */
3647         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3648         /* setup context field to pass wait_queue pointer to wake function  */
3649         pmboxq->context1 = &done_q;
3650
3651         /* now issue the command */
3652         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3653
3654         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3655                 wait_event_interruptible_timeout(done_q,
3656                                 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3657                                 timeout * HZ);
3658
3659                 pmboxq->context1 = NULL;
3660                 /*
3661                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
3662                  * else do not free the resources.
3663                  */
3664                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
3665                         retval = MBX_SUCCESS;
3666                 else
3667                         retval = MBX_TIMEOUT;
3668         }
3669
3670         return retval;
3671 }
3672
3673 int
3674 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3675 {
3676         struct lpfc_vport *vport = phba->pport;
3677         int i = 0;
3678         uint32_t ha_copy;
3679
3680         while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
3681                 if (i++ > LPFC_MBOX_TMO * 1000)
3682                         return 1;
3683
3684                 /*
3685                  * Call lpfc_sli_handle_mb_event only if a mailbox cmd
3686                  * did finish. This way we won't get the misleading
3687                  * "Stray Mailbox Interrupt" message.
3688                  */
3689                 spin_lock_irq(&phba->hbalock);
3690                 ha_copy = phba->work_ha;
3691                 phba->work_ha &= ~HA_MBATT;
3692                 spin_unlock_irq(&phba->hbalock);
3693
3694                 if (ha_copy & HA_MBATT)
3695                         if (lpfc_sli_handle_mb_event(phba) == 0)
3696                                 i = 0;
3697
3698                 msleep(1);
3699         }
3700
3701         return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3702 }
3703
3704 irqreturn_t
3705 lpfc_intr_handler(int irq, void *dev_id)
3706 {
3707         struct lpfc_hba  *phba;
3708         uint32_t ha_copy;
3709         uint32_t work_ha_copy;
3710         unsigned long status;
3711         int i;
3712         uint32_t control;
3713
3714         MAILBOX_t *mbox, *pmbox;
3715         LPFC_MBOXQ_t *pmb;
3716         int rc;
3717
3718         /*
3719          * Get the driver's phba structure from the dev_id and
3720          * assume the HBA is not interrupting.
3721          */
3722         phba = (struct lpfc_hba *) dev_id;
3723
3724         if (unlikely(!phba))
3725                 return IRQ_NONE;
3726
3727         /* If the pci channel is offline, ignore all the interrupts. */
3728         if (unlikely(pci_channel_offline(phba->pcidev)))
3729                 return IRQ_NONE;
3730
3731         phba->sli.slistat.sli_intr++;
3732
3733         /*
3734          * Call the HBA to see if it is interrupting.  If not, don't claim
3735          * the interrupt
3736          */
3737
3738         /* Ignore all interrupts during initialization. */
3739         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
3740                 return IRQ_NONE;
3741
3742         /*
3743          * Read host attention register to determine interrupt source
3744          * Clear Attention Sources, except Error Attention (to
3745          * preserve status) and Link Attention
3746          */
3747         spin_lock(&phba->hbalock);
3748         ha_copy = readl(phba->HAregaddr);
3749         /* If somebody is waiting to handle an eratt don't process it
3750          * here.  The brdkill function will do this.
3751          */
3752         if (phba->link_flag & LS_IGNORE_ERATT)
3753                 ha_copy &= ~HA_ERATT;
3754         writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3755         readl(phba->HAregaddr); /* flush */
3756         spin_unlock(&phba->hbalock);
3757
3758         if (unlikely(!ha_copy))
3759                 return IRQ_NONE;
3760
3761         work_ha_copy = ha_copy & phba->work_ha_mask;
3762
3763         if (unlikely(work_ha_copy)) {
3764                 if (work_ha_copy & HA_LATT) {
3765                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3766                                 /*
3767                                  * Turn off Link Attention interrupts
3768                                  * until CLEAR_LA done
3769                                  */
3770                                 spin_lock(&phba->hbalock);
3771                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3772                                 control = readl(phba->HCregaddr);
3773                                 control &= ~HC_LAINT_ENA;
3774                                 writel(control, phba->HCregaddr);
3775                                 readl(phba->HCregaddr); /* flush */
3776                                 spin_unlock(&phba->hbalock);
3777                         }
3778                         else
3779                                 work_ha_copy &= ~HA_LATT;
3780                 }
3781
3782                 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3783                         for (i = 0; i < phba->sli.num_rings; i++) {
3784                                 if (work_ha_copy & (HA_RXATT << (4*i))) {
3785                                         /*
3786                                          * Turn off Slow Rings interrupts
3787                                          */
3788                                         spin_lock(&phba->hbalock);
3789                                         control = readl(phba->HCregaddr);
3790                                         control &= ~(HC_R0INT_ENA << i);
3791                                         writel(control, phba->HCregaddr);
3792                                         readl(phba->HCregaddr); /* flush */
3793                                         spin_unlock(&phba->hbalock);
3794                                 }
3795                         }
3796                 }
3797
3798                 if (work_ha_copy & HA_ERATT) {
3799                         phba->link_state = LPFC_HBA_ERROR;
3800                         /*
3801                          * There was a link/board error.  Read the
3802                          * status register to retrieve the error event
3803                          * and process it.
3804                          */
3805                         phba->sli.slistat.err_attn_event++;
3806                         /* Save status info */
3807                         phba->work_hs = readl(phba->HSregaddr);
3808                         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
3809                         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
3810
3811                         /* Clear Chip error bit */
3812                         writel(HA_ERATT, phba->HAregaddr);
3813                         readl(phba->HAregaddr); /* flush */
3814                         phba->pport->stopped = 1;
3815                 }
3816
3817                 if ((work_ha_copy & HA_MBATT) &&
3818                     (phba->sli.mbox_active)) {
3819                         pmb = phba->sli.mbox_active;
3820                         pmbox = &pmb->mb;
3821                         mbox = &phba->slim2p->mbx;
3822
3823                         /* First check out the status word */
3824                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
3825                         if (pmbox->mbxOwner != OWN_HOST) {
3826                                 /*
3827                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
3828                                  * mbxStatus <status>
3829                                  */
3830                                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
3831                                                 LOG_SLI,
3832                                                 "%d (%d):0304 Stray Mailbox "
3833                                                 "Interrupt mbxCommand x%x "
3834                                                 "mbxStatus x%x\n",
3835                                                 phba->brd_no,
3836                                                 (pmb->vport
3837                                                  ? pmb->vport->vpi
3838                                                  : 0),
3839                                                 pmbox->mbxCommand,
3840                                                 pmbox->mbxStatus);
3841                         }
3842                         del_timer_sync(&phba->sli.mbox_tmo);
3843
3844                         spin_lock(&phba->pport->work_port_lock);
3845                         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3846                         spin_unlock(&phba->pport->work_port_lock);
3847                         phba->sli.mbox_active = NULL;
3848                         if (pmb->mbox_cmpl) {
3849                                 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3850                                                       MAILBOX_CMD_SIZE);
3851                         }
3852                         lpfc_mbox_cmpl_put(phba, pmb);
3853                 }
3854                 if ((work_ha_copy & HA_MBATT) &&
3855                     (phba->sli.mbox_active == NULL)) {
3856 send_next_mbox:
3857                         spin_lock(&phba->hbalock);
3858                         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3859                         pmb = lpfc_mbox_get(phba);
3860                         spin_unlock(&phba->hbalock);
3861
3862                         /* Process next mailbox command if there is one */
3863                         if (pmb != NULL) {
3864                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3865                                 if (rc == MBX_NOT_FINISHED) {
3866                                         pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3867                                         lpfc_mbox_cmpl_put(phba, pmb);
3868                                         goto send_next_mbox;
3869                                 }
3870                         } else {
3871                                 /* Turn on IOCB processing */
3872                                 for (i = 0; i < phba->sli.num_rings; i++)
3873                                         lpfc_sli_turn_on_ring(phba, i);
3874                         }
3875
3876                 }
3877
3878                 spin_lock(&phba->hbalock);
3879                 phba->work_ha |= work_ha_copy;
3880                 if (phba->work_wait)
3881                         lpfc_worker_wake_up(phba);
3882                 spin_unlock(&phba->hbalock);
3883         }
3884
3885         ha_copy &= ~(phba->work_ha_mask);
3886
3887         /*
3888          * Process all events on FCP ring.  Take the optimized path for
3889          * FCP IO.  Any other IO is slow path and is handled by
3890          * the worker thread.
3891          */
3892         status = (ha_copy & (HA_RXMASK  << (4*LPFC_FCP_RING)));
3893         status >>= (4*LPFC_FCP_RING);
3894         if (status & HA_RXATT)
3895                 lpfc_sli_handle_fast_ring_event(phba,
3896                                                 &phba->sli.ring[LPFC_FCP_RING],
3897                                                 status);
3898
3899         if (phba->cfg_multi_ring_support == 2) {
3900                 /*
3901                  * Process all events on extra ring.  Take the optimized path
3902                  * for extra ring IO.  Any other IO is slow path and is handled
3903                  * by the worker thread.
3904                  */
3905                 status = (ha_copy & (HA_RXMASK  << (4*LPFC_EXTRA_RING)));
3906                 status >>= (4*LPFC_EXTRA_RING);
3907                 if (status & HA_RXATT) {
3908                         lpfc_sli_handle_fast_ring_event(phba,
3909                                         &phba->sli.ring[LPFC_EXTRA_RING],
3910                                         status);
3911                 }
3912         }
3913         return IRQ_HANDLED;
3914
3915 } /* lpfc_intr_handler */