1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_compat.h"
43 * Define macro to log: Mailbox command x%x cannot issue Data
44 * This allows multiple uses of lpfc_msgBlk0311
45 * w/o perturbing log msg utility.
47 #define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
48 lpfc_printf_log(phba, \
51 "%d (%d):0311 Mailbox command x%x cannot " \
52 "issue Data: x%x x%x x%x\n", \
54 pmbox->vport ? pmbox->vport->vpi : 0, \
55 pmbox->mb.mbxCommand, \
56 phba->pport->port_state, \
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
69 /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
70 * to the start of the ring, and the slot number of the
71 * desired iocb entry, calc a pointer to that entry.
73 static inline IOCB_t *
74 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
76 return (IOCB_t *) (((char *) pring->cmdringaddr) +
77 pring->cmdidx * phba->iocb_cmd_size);
80 static inline IOCB_t *
81 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
83 return (IOCB_t *) (((char *) pring->rspringaddr) +
84 pring->rspidx * phba->iocb_rsp_size);
87 static struct lpfc_iocbq *
88 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
90 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
91 struct lpfc_iocbq * iocbq = NULL;
93 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
98 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
100 struct lpfc_iocbq * iocbq = NULL;
101 unsigned long iflags;
103 spin_lock_irqsave(&phba->hbalock, iflags);
104 iocbq = __lpfc_sli_get_iocbq(phba);
105 spin_unlock_irqrestore(&phba->hbalock, iflags);
110 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
112 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
115 * Clean all volatile data fields, preserve iotag and node struct.
117 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
118 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
122 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
124 unsigned long iflags;
127 * Clean all volatile data fields, preserve iotag and node struct.
129 spin_lock_irqsave(&phba->hbalock, iflags);
130 __lpfc_sli_release_iocbq(phba, iocbq);
131 spin_unlock_irqrestore(&phba->hbalock, iflags);
135 * Translate the iocb command to an iocb command type used to decide the final
136 * disposition of each completed IOCB.
138 static lpfc_iocb_type
139 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
141 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
143 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
147 case CMD_XMIT_SEQUENCE_CR:
148 case CMD_XMIT_SEQUENCE_CX:
149 case CMD_XMIT_BCAST_CN:
150 case CMD_XMIT_BCAST_CX:
151 case CMD_ELS_REQUEST_CR:
152 case CMD_ELS_REQUEST_CX:
153 case CMD_CREATE_XRI_CR:
154 case CMD_CREATE_XRI_CX:
156 case CMD_XMIT_ELS_RSP_CX:
158 case CMD_FCP_IWRITE_CR:
159 case CMD_FCP_IWRITE_CX:
160 case CMD_FCP_IREAD_CR:
161 case CMD_FCP_IREAD_CX:
162 case CMD_FCP_ICMND_CR:
163 case CMD_FCP_ICMND_CX:
164 case CMD_FCP_TSEND_CX:
165 case CMD_FCP_TRSP_CX:
166 case CMD_FCP_TRECEIVE_CX:
167 case CMD_FCP_AUTO_TRSP_CX:
168 case CMD_ADAPTER_MSG:
169 case CMD_ADAPTER_DUMP:
170 case CMD_XMIT_SEQUENCE64_CR:
171 case CMD_XMIT_SEQUENCE64_CX:
172 case CMD_XMIT_BCAST64_CN:
173 case CMD_XMIT_BCAST64_CX:
174 case CMD_ELS_REQUEST64_CR:
175 case CMD_ELS_REQUEST64_CX:
176 case CMD_FCP_IWRITE64_CR:
177 case CMD_FCP_IWRITE64_CX:
178 case CMD_FCP_IREAD64_CR:
179 case CMD_FCP_IREAD64_CX:
180 case CMD_FCP_ICMND64_CR:
181 case CMD_FCP_ICMND64_CX:
182 case CMD_FCP_TSEND64_CX:
183 case CMD_FCP_TRSP64_CX:
184 case CMD_FCP_TRECEIVE64_CX:
185 case CMD_GEN_REQUEST64_CR:
186 case CMD_GEN_REQUEST64_CX:
187 case CMD_XMIT_ELS_RSP64_CX:
188 type = LPFC_SOL_IOCB;
190 case CMD_ABORT_XRI_CN:
191 case CMD_ABORT_XRI_CX:
192 case CMD_CLOSE_XRI_CN:
193 case CMD_CLOSE_XRI_CX:
194 case CMD_XRI_ABORTED_CX:
195 case CMD_ABORT_MXRI64_CN:
196 type = LPFC_ABORT_IOCB;
198 case CMD_RCV_SEQUENCE_CX:
199 case CMD_RCV_ELS_REQ_CX:
200 case CMD_RCV_SEQUENCE64_CX:
201 case CMD_RCV_ELS_REQ64_CX:
202 case CMD_IOCB_RCV_SEQ64_CX:
203 case CMD_IOCB_RCV_ELS64_CX:
204 case CMD_IOCB_RCV_CONT64_CX:
205 type = LPFC_UNSOL_IOCB;
208 type = LPFC_UNKNOWN_IOCB;
216 lpfc_sli_ring_map(struct lpfc_hba *phba)
218 struct lpfc_sli *psli = &phba->sli;
223 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
227 phba->link_state = LPFC_INIT_MBX_CMDS;
228 for (i = 0; i < psli->num_rings; i++) {
229 lpfc_config_ring(phba, i, pmb);
230 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
231 if (rc != MBX_SUCCESS) {
232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
233 "%d:0446 Adapter failed to init (%d), "
234 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
240 phba->link_state = LPFC_HBA_ERROR;
245 mempool_free(pmb, phba->mbox_mem_pool);
250 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
251 struct lpfc_iocbq *piocb)
253 list_add_tail(&piocb->list, &pring->txcmplq);
254 pring->txcmplq_cnt++;
255 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
256 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
257 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
261 mod_timer(&piocb->vport->els_tmofunc,
262 jiffies + HZ * (phba->fc_ratov << 1));
269 static struct lpfc_iocbq *
270 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
272 struct list_head *dlp;
273 struct lpfc_iocbq *cmd_iocb;
277 list_remove_head((&pring->txq), cmd_iocb,
281 /* If the first ptr is not equal to the list header,
282 * deque the IOCBQ_t and return it.
290 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
292 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
293 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
294 &phba->slim2p->mbx.us.s2.port[pring->ringno];
295 uint32_t max_cmd_idx = pring->numCiocb;
297 if ((pring->next_cmdidx == pring->cmdidx) &&
298 (++pring->next_cmdidx >= max_cmd_idx))
299 pring->next_cmdidx = 0;
301 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
303 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
305 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
306 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
307 "%d:0315 Ring %d issue: portCmdGet %d "
308 "is bigger then cmd ring %d\n",
309 phba->brd_no, pring->ringno,
310 pring->local_getidx, max_cmd_idx);
312 phba->link_state = LPFC_HBA_ERROR;
314 * All error attention handlers are posted to
317 phba->work_ha |= HA_ERATT;
318 phba->work_hs = HS_FFER3;
320 /* hbalock should already be held */
322 lpfc_worker_wake_up(phba);
327 if (pring->local_getidx == pring->next_cmdidx)
331 return lpfc_cmd_iocb(phba, pring);
335 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
337 struct lpfc_iocbq **new_arr;
338 struct lpfc_iocbq **old_arr;
340 struct lpfc_sli *psli = &phba->sli;
343 spin_lock_irq(&phba->hbalock);
344 iotag = psli->last_iotag;
345 if(++iotag < psli->iocbq_lookup_len) {
346 psli->last_iotag = iotag;
347 psli->iocbq_lookup[iotag] = iocbq;
348 spin_unlock_irq(&phba->hbalock);
349 iocbq->iotag = iotag;
351 } else if (psli->iocbq_lookup_len < (0xffff
352 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
353 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
354 spin_unlock_irq(&phba->hbalock);
355 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
358 spin_lock_irq(&phba->hbalock);
359 old_arr = psli->iocbq_lookup;
360 if (new_len <= psli->iocbq_lookup_len) {
361 /* highly unprobable case */
363 iotag = psli->last_iotag;
364 if(++iotag < psli->iocbq_lookup_len) {
365 psli->last_iotag = iotag;
366 psli->iocbq_lookup[iotag] = iocbq;
367 spin_unlock_irq(&phba->hbalock);
368 iocbq->iotag = iotag;
371 spin_unlock_irq(&phba->hbalock);
374 if (psli->iocbq_lookup)
375 memcpy(new_arr, old_arr,
376 ((psli->last_iotag + 1) *
377 sizeof (struct lpfc_iocbq *)));
378 psli->iocbq_lookup = new_arr;
379 psli->iocbq_lookup_len = new_len;
380 psli->last_iotag = iotag;
381 psli->iocbq_lookup[iotag] = iocbq;
382 spin_unlock_irq(&phba->hbalock);
383 iocbq->iotag = iotag;
388 spin_unlock_irq(&phba->hbalock);
390 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
391 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
392 phba->brd_no, psli->last_iotag);
398 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
399 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
404 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
407 * Issue iocb command to adapter
409 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
411 pring->stats.iocb_cmd++;
414 * If there is no completion routine to call, we can release the
415 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
416 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
418 if (nextiocb->iocb_cmpl)
419 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
421 __lpfc_sli_release_iocbq(phba, nextiocb);
424 * Let the HBA know what IOCB slot will be the next one the
425 * driver will put a command into.
427 pring->cmdidx = pring->next_cmdidx;
428 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
432 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
434 int ringno = pring->ringno;
436 pring->flag |= LPFC_CALL_RING_AVAILABLE;
441 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
442 * The HBA will tell us when an IOCB entry is available.
444 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
445 readl(phba->CAregaddr); /* flush */
447 pring->stats.iocb_cmd_full++;
451 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
453 int ringno = pring->ringno;
456 * Tell the HBA that there is work to do in this ring.
459 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
460 readl(phba->CAregaddr); /* flush */
464 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
467 struct lpfc_iocbq *nextiocb;
471 * (a) there is anything on the txq to send
473 * (c) link attention events can be processed (fcp ring only)
474 * (d) IOCB processing is not blocked by the outstanding mbox command.
476 if (pring->txq_cnt &&
477 lpfc_is_link_up(phba) &&
478 (pring->ringno != phba->sli.fcp_ring ||
479 phba->sli.sli_flag & LPFC_PROCESS_LA) &&
480 !(pring->flag & LPFC_STOP_IOCB_MBX)) {
482 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
483 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
484 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
487 lpfc_sli_update_ring(phba, pring);
489 lpfc_sli_update_full_ring(phba, pring);
495 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
497 lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
499 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
500 &phba->slim2p->mbx.us.s3_pgp.port[ringno] :
501 &phba->slim2p->mbx.us.s2.port[ringno];
502 unsigned long iflags;
504 /* If the ring is active, flag it */
505 spin_lock_irqsave(&phba->hbalock, iflags);
506 if (phba->sli.ring[ringno].cmdringaddr) {
507 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
508 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
510 * Force update of the local copy of cmdGetInx
512 phba->sli.ring[ringno].local_getidx
513 = le32_to_cpu(pgp->cmdGetInx);
514 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
517 spin_unlock_irqrestore(&phba->hbalock, iflags);
520 struct lpfc_hbq_entry *
521 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
523 struct hbq_s *hbqp = &phba->hbqs[hbqno];
525 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
526 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
527 hbqp->next_hbqPutIdx = 0;
529 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
530 uint32_t raw_index = phba->hbq_get[hbqno];
531 uint32_t getidx = le32_to_cpu(raw_index);
533 hbqp->local_hbqGetIdx = getidx;
535 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
536 lpfc_printf_log(phba, KERN_ERR,
538 "%d:1802 HBQ %d: local_hbqGetIdx "
539 "%u is > than hbqp->entry_count %u\n",
541 hbqp->local_hbqGetIdx,
544 phba->link_state = LPFC_HBA_ERROR;
548 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
552 return (struct lpfc_hbq_entry *) phba->hbqslimp.virt + hbqp->hbqPutIdx;
556 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
558 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
559 struct hbq_dmabuf *hbq_buf;
561 /* Return all memory used by all HBQs */
562 list_for_each_entry_safe(dmabuf, next_dmabuf,
563 &phba->hbq_buffer_list, list) {
564 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
565 list_del(&hbq_buf->dbuf.list);
566 lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys);
572 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
573 struct hbq_dmabuf *hbq_buf)
575 struct lpfc_hbq_entry *hbqe;
576 dma_addr_t physaddr = hbq_buf->dbuf.phys;
578 /* Get next HBQ entry slot to use */
579 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
581 struct hbq_s *hbqp = &phba->hbqs[hbqno];
583 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
584 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
585 hbqe->bde.tus.f.bdeSize = FCELSSIZE;
586 hbqe->bde.tus.f.bdeFlags = 0;
587 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
588 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
590 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
591 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
593 readl(phba->hbq_put + hbqno);
594 list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list);
598 static struct lpfc_hbq_init lpfc_els_hbq = {
603 .ring_mask = 1 << LPFC_ELS_RING,
609 static struct lpfc_hbq_init *lpfc_hbq_defs[] = {
614 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
616 uint32_t i, start, end;
617 struct hbq_dmabuf *hbq_buffer;
619 start = lpfc_hbq_defs[hbqno]->buffer_count;
620 end = count + lpfc_hbq_defs[hbqno]->buffer_count;
621 if (end > lpfc_hbq_defs[hbqno]->entry_count) {
622 end = lpfc_hbq_defs[hbqno]->entry_count;
625 /* Populate HBQ entries */
626 for (i = start; i < end; i++) {
627 hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf),
631 hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI,
632 &hbq_buffer->dbuf.phys);
633 if (hbq_buffer->dbuf.virt == NULL)
635 hbq_buffer->tag = (i | (hbqno << 16));
636 lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer);
637 lpfc_hbq_defs[hbqno]->buffer_count++;
643 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
645 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
646 lpfc_hbq_defs[qno]->add_count));
650 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
652 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
653 lpfc_hbq_defs[qno]->init_count));
657 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
659 struct lpfc_dmabuf *d_buf;
660 struct hbq_dmabuf *hbq_buf;
662 list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) {
663 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
664 if ((hbq_buf->tag & 0xffff) == tag) {
668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
669 "%d:1803 Bad hbq tag. Data: x%x x%x\n",
671 lpfc_hbq_defs[tag >> 16]->buffer_count);
676 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *sp)
681 hbqno = sp->tag >> 16;
682 lpfc_sli_hbq_to_firmware(phba, hbqno, sp);
687 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
691 switch (mbxCommand) {
695 case MBX_RUN_BIU_DIAG:
698 case MBX_CONFIG_LINK:
699 case MBX_CONFIG_RING:
701 case MBX_READ_CONFIG:
702 case MBX_READ_RCONFIG:
704 case MBX_READ_STATUS:
708 case MBX_READ_LNK_STAT:
710 case MBX_UNREG_LOGIN:
713 case MBX_DUMP_MEMORY:
714 case MBX_DUMP_CONTEXT:
719 case MBX_DEL_LD_ENTRY:
720 case MBX_RUN_PROGRAM:
725 case MBX_CONFIG_FARP:
728 case MBX_RUN_BIU_DIAG64:
729 case MBX_CONFIG_PORT:
730 case MBX_READ_SPARM64:
732 case MBX_REG_LOGIN64:
734 case MBX_FLASH_WR_ULA:
736 case MBX_LOAD_EXP_ROM:
748 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
750 wait_queue_head_t *pdone_q;
753 * If pdone_q is empty, the driver thread gave up waiting and
756 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
757 pdone_q = (wait_queue_head_t *) pmboxq->context1;
759 wake_up_interruptible(pdone_q);
764 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
766 struct lpfc_dmabuf *mp;
770 mp = (struct lpfc_dmabuf *) (pmb->context1);
773 lpfc_mbuf_free(phba, mp->virt, mp->phys);
778 * If a REG_LOGIN succeeded after node is destroyed or node
779 * is in re-discovery driver need to cleanup the RPI.
781 if (!(phba->pport->load_flag & FC_UNLOADING) &&
782 pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
783 !pmb->mb.mbxStatus) {
785 rpi = pmb->mb.un.varWords[0];
786 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
787 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
788 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
789 if (rc != MBX_NOT_FINISHED)
793 mempool_free(pmb, phba->mbox_mem_pool);
798 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
805 phba->sli.slistat.mbox_event++;
807 /* Get all completed mailboxe buffers into the cmplq */
808 spin_lock_irq(&phba->hbalock);
809 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
810 spin_unlock_irq(&phba->hbalock);
812 /* Get a Mailbox buffer to setup mailbox commands for callback */
814 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
821 * It is a fatal error if unknown mbox command completion.
823 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
826 /* Unknow mailbox command compl */
827 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
828 "%d (%d):0323 Unknown Mailbox command "
831 pmb->vport ? pmb->vport->vpi : 0,
833 phba->link_state = LPFC_HBA_ERROR;
834 phba->work_hs = HS_FFER3;
835 lpfc_handle_eratt(phba);
839 if (pmbox->mbxStatus) {
840 phba->sli.slistat.mbox_stat_err++;
841 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
842 /* Mbox cmd cmpl error - RETRYing */
843 lpfc_printf_log(phba, KERN_INFO,
845 "%d (%d):0305 Mbox cmd cmpl "
846 "error - RETRYing Data: x%x "
849 pmb->vport ? pmb->vport->vpi :0,
852 pmbox->un.varWords[0],
853 pmb->vport->port_state);
854 pmbox->mbxStatus = 0;
855 pmbox->mbxOwner = OWN_HOST;
856 spin_lock_irq(&phba->hbalock);
857 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
858 spin_unlock_irq(&phba->hbalock);
859 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
860 if (rc == MBX_SUCCESS)
865 /* Mailbox cmd <cmd> Cmpl <cmpl> */
866 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
867 "%d (%d):0307 Mailbox cmd x%x Cmpl x%p "
868 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
870 pmb->vport ? pmb->vport->vpi : 0,
873 *((uint32_t *) pmbox),
874 pmbox->un.varWords[0],
875 pmbox->un.varWords[1],
876 pmbox->un.varWords[2],
877 pmbox->un.varWords[3],
878 pmbox->un.varWords[4],
879 pmbox->un.varWords[5],
880 pmbox->un.varWords[6],
881 pmbox->un.varWords[7]);
884 pmb->mbox_cmpl(phba,pmb);
889 static struct lpfc_dmabuf *
890 lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
892 struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
894 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
895 if (hbq_entry == NULL)
897 list_del(&hbq_entry->dbuf.list);
898 new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC);
899 if (new_hbq_entry == NULL)
900 return &hbq_entry->dbuf;
901 new_hbq_entry->dbuf = hbq_entry->dbuf;
902 new_hbq_entry->tag = -1;
903 hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys);
904 if (hbq_entry->dbuf.virt == NULL) {
905 kfree(new_hbq_entry);
906 return &hbq_entry->dbuf;
908 lpfc_sli_free_hbq(phba, hbq_entry);
909 return &new_hbq_entry->dbuf;
913 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
914 struct lpfc_iocbq *saveq)
922 irsp = &(saveq->iocb);
923 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
924 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
925 || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
926 || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
931 (WORD5 *) & (saveq->iocb.un.
933 Rctl = w5p->hcsw.Rctl;
934 Type = w5p->hcsw.Type;
936 /* Firmware Workaround */
937 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
938 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
939 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
942 w5p->hcsw.Rctl = Rctl;
943 w5p->hcsw.Type = Type;
947 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
948 if (irsp->ulpBdeCount != 0)
949 saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
950 irsp->un.ulpWord[3]);
951 if (irsp->ulpBdeCount == 2)
952 saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
953 irsp->un.ulpWord[15]);
956 /* unSolicited Responses */
957 if (pring->prt[0].profile) {
958 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
959 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
963 /* We must search, based on rctl / type
964 for the right routine */
965 for (i = 0; i < pring->num_mask;
967 if ((pring->prt[i].rctl ==
971 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
972 (pring->prt[i].lpfc_sli_rcv_unsol_event)
973 (phba, pring, saveq);
980 /* Unexpected Rctl / Type received */
981 /* Ring <ringno> handler: unexpected
982 Rctl <Rctl> Type <Type> received */
983 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
984 "%d:0313 Ring %d handler: unexpected Rctl x%x "
985 "Type x%x received\n",
994 static struct lpfc_iocbq *
995 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
996 struct lpfc_sli_ring *pring,
997 struct lpfc_iocbq *prspiocb)
999 struct lpfc_iocbq *cmd_iocb = NULL;
1002 iotag = prspiocb->iocb.ulpIoTag;
1004 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
1005 cmd_iocb = phba->sli.iocbq_lookup[iotag];
1006 list_del_init(&cmd_iocb->list);
1007 pring->txcmplq_cnt--;
1011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1012 "%d:0317 iotag x%x is out off "
1013 "range: max iotag x%x wd0 x%x\n",
1014 phba->brd_no, iotag,
1015 phba->sli.last_iotag,
1016 *(((uint32_t *) &prspiocb->iocb) + 7));
1021 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1022 struct lpfc_iocbq *saveq)
1024 struct lpfc_iocbq *cmdiocbp;
1026 unsigned long iflag;
1028 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
1029 spin_lock_irqsave(&phba->hbalock, iflag);
1030 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
1031 spin_unlock_irqrestore(&phba->hbalock, iflag);
1034 if (cmdiocbp->iocb_cmpl) {
1036 * Post all ELS completions to the worker thread.
1037 * All other are passed to the completion callback.
1039 if (pring->ringno == LPFC_ELS_RING) {
1040 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
1041 cmdiocbp->iocb_flag &=
1042 ~LPFC_DRIVER_ABORTED;
1043 saveq->iocb.ulpStatus =
1044 IOSTAT_LOCAL_REJECT;
1045 saveq->iocb.un.ulpWord[4] =
1049 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
1051 lpfc_sli_release_iocbq(phba, cmdiocbp);
1054 * Unknown initiating command based on the response iotag.
1055 * This could be the case on the ELS ring because of
1058 if (pring->ringno != LPFC_ELS_RING) {
1060 * Ring <ringno> handler: unexpected completion IoTag
1063 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1064 "%d (%d):0322 Ring %d handler: "
1065 "unexpected completion IoTag x%x "
1066 "Data: x%x x%x x%x x%x\n",
1068 cmdiocbp->vport->vpi,
1070 saveq->iocb.ulpIoTag,
1071 saveq->iocb.ulpStatus,
1072 saveq->iocb.un.ulpWord[4],
1073 saveq->iocb.ulpCommand,
1074 saveq->iocb.ulpContext);
1082 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1084 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1085 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1086 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1088 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1089 * rsp ring <portRspMax>
1091 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1092 "%d:0312 Ring %d handler: portRspPut %d "
1093 "is bigger then rsp ring %d\n",
1094 phba->brd_no, pring->ringno,
1095 le32_to_cpu(pgp->rspPutInx),
1098 phba->link_state = LPFC_HBA_ERROR;
1101 * All error attention handlers are posted to
1104 phba->work_ha |= HA_ERATT;
1105 phba->work_hs = HS_FFER3;
1107 /* hbalock should already be held */
1108 if (phba->work_wait)
1109 lpfc_worker_wake_up(phba);
1114 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1116 struct lpfc_sli *psli = &phba->sli;
1117 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1118 IOCB_t *irsp = NULL;
1119 IOCB_t *entry = NULL;
1120 struct lpfc_iocbq *cmdiocbq = NULL;
1121 struct lpfc_iocbq rspiocbq;
1122 struct lpfc_pgp *pgp;
1124 uint32_t portRspPut, portRspMax;
1126 uint32_t rsp_cmpl = 0;
1128 unsigned long iflags;
1130 pring->stats.iocb_event++;
1132 pgp = (phba->sli_rev == 3) ?
1133 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1134 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1138 * The next available response entry should never exceed the maximum
1139 * entries. If it does, treat it as an adapter hardware error.
1141 portRspMax = pring->numRiocb;
1142 portRspPut = le32_to_cpu(pgp->rspPutInx);
1143 if (unlikely(portRspPut >= portRspMax)) {
1144 lpfc_sli_rsp_pointers_error(phba, pring);
1149 while (pring->rspidx != portRspPut) {
1150 entry = lpfc_resp_iocb(phba, pring);
1151 if (++pring->rspidx >= portRspMax)
1154 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1155 (uint32_t *) &rspiocbq.iocb,
1156 phba->iocb_rsp_size);
1157 irsp = &rspiocbq.iocb;
1158 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1159 pring->stats.iocb_rsp++;
1162 if (unlikely(irsp->ulpStatus)) {
1163 /* Rsp ring <ringno> error: IOCB */
1164 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1165 "%d:0326 Rsp Ring %d error: IOCB Data: "
1166 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1167 phba->brd_no, pring->ringno,
1168 irsp->un.ulpWord[0],
1169 irsp->un.ulpWord[1],
1170 irsp->un.ulpWord[2],
1171 irsp->un.ulpWord[3],
1172 irsp->un.ulpWord[4],
1173 irsp->un.ulpWord[5],
1174 *(((uint32_t *) irsp) + 6),
1175 *(((uint32_t *) irsp) + 7));
1179 case LPFC_ABORT_IOCB:
1182 * Idle exchange closed via ABTS from port. No iocb
1183 * resources need to be recovered.
1185 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1186 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1187 "%d:0314 IOCB cmd 0x%x"
1188 " processed. Skipping"
1189 " completion", phba->brd_no,
1194 spin_lock_irqsave(&phba->hbalock, iflags);
1195 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1197 spin_unlock_irqrestore(&phba->hbalock, iflags);
1198 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1199 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1204 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1205 char adaptermsg[LPFC_MAX_ADPTMSG];
1206 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1207 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1209 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1210 phba->brd_no, adaptermsg);
1212 /* Unknown IOCB command */
1213 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1214 "%d:0321 Unknown IOCB command "
1215 "Data: x%x, x%x x%x x%x x%x\n",
1226 * The response IOCB has been processed. Update the ring
1227 * pointer in SLIM. If the port response put pointer has not
1228 * been updated, sync the pgp->rspPutInx and fetch the new port
1229 * response put pointer.
1231 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1233 if (pring->rspidx == portRspPut)
1234 portRspPut = le32_to_cpu(pgp->rspPutInx);
1237 ha_copy = readl(phba->HAregaddr);
1238 ha_copy >>= (LPFC_FCP_RING * 4);
1240 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1241 spin_lock_irqsave(&phba->hbalock, iflags);
1242 pring->stats.iocb_rsp_full++;
1243 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1244 writel(status, phba->CAregaddr);
1245 readl(phba->CAregaddr);
1246 spin_unlock_irqrestore(&phba->hbalock, iflags);
1248 if ((ha_copy & HA_R0CE_RSP) &&
1249 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1250 spin_lock_irqsave(&phba->hbalock, iflags);
1251 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1252 pring->stats.iocb_cmd_empty++;
1254 /* Force update of the local copy of cmdGetInx */
1255 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1256 lpfc_sli_resume_iocb(phba, pring);
1258 if ((pring->lpfc_sli_cmd_available))
1259 (pring->lpfc_sli_cmd_available) (phba, pring);
1261 spin_unlock_irqrestore(&phba->hbalock, iflags);
1268 * This routine presumes LPFC_FCP_RING handling and doesn't bother
1269 * to check it explicitly.
1272 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1273 struct lpfc_sli_ring *pring, uint32_t mask)
1275 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1276 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1277 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1278 IOCB_t *irsp = NULL;
1279 IOCB_t *entry = NULL;
1280 struct lpfc_iocbq *cmdiocbq = NULL;
1281 struct lpfc_iocbq rspiocbq;
1283 uint32_t portRspPut, portRspMax;
1285 lpfc_iocb_type type;
1286 unsigned long iflag;
1287 uint32_t rsp_cmpl = 0;
1289 spin_lock_irqsave(&phba->hbalock, iflag);
1290 pring->stats.iocb_event++;
1293 * The next available response entry should never exceed the maximum
1294 * entries. If it does, treat it as an adapter hardware error.
1296 portRspMax = pring->numRiocb;
1297 portRspPut = le32_to_cpu(pgp->rspPutInx);
1298 if (unlikely(portRspPut >= portRspMax)) {
1299 lpfc_sli_rsp_pointers_error(phba, pring);
1300 spin_unlock_irqrestore(&phba->hbalock, iflag);
1305 while (pring->rspidx != portRspPut) {
1307 * Fetch an entry off the ring and copy it into a local data
1308 * structure. The copy involves a byte-swap since the
1309 * network byte order and pci byte orders are different.
1311 entry = lpfc_resp_iocb(phba, pring);
1313 if (++pring->rspidx >= portRspMax)
1316 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1317 (uint32_t *) &rspiocbq.iocb,
1318 phba->iocb_rsp_size);
1319 INIT_LIST_HEAD(&(rspiocbq.list));
1320 irsp = &rspiocbq.iocb;
1322 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1323 pring->stats.iocb_rsp++;
1326 if (unlikely(irsp->ulpStatus)) {
1328 * If resource errors reported from HBA, reduce
1329 * queuedepths of the SCSI device.
1331 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1332 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1333 spin_unlock_irqrestore(&phba->hbalock, iflag);
1334 lpfc_adjust_queue_depth(phba);
1335 spin_lock_irqsave(&phba->hbalock, iflag);
1338 /* Rsp ring <ringno> error: IOCB */
1339 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1340 "%d:0336 Rsp Ring %d error: IOCB Data: "
1341 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1342 phba->brd_no, pring->ringno,
1343 irsp->un.ulpWord[0],
1344 irsp->un.ulpWord[1],
1345 irsp->un.ulpWord[2],
1346 irsp->un.ulpWord[3],
1347 irsp->un.ulpWord[4],
1348 irsp->un.ulpWord[5],
1349 *(((uint32_t *) irsp) + 6),
1350 *(((uint32_t *) irsp) + 7));
1354 case LPFC_ABORT_IOCB:
1357 * Idle exchange closed via ABTS from port. No iocb
1358 * resources need to be recovered.
1360 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1361 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1362 "%d:0333 IOCB cmd 0x%x"
1363 " processed. Skipping"
1370 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1372 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1373 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1374 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1377 spin_unlock_irqrestore(&phba->hbalock,
1379 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1381 spin_lock_irqsave(&phba->hbalock,
1386 case LPFC_UNSOL_IOCB:
1387 spin_unlock_irqrestore(&phba->hbalock, iflag);
1388 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1389 spin_lock_irqsave(&phba->hbalock, iflag);
1392 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1393 char adaptermsg[LPFC_MAX_ADPTMSG];
1394 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1395 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1397 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1398 phba->brd_no, adaptermsg);
1400 /* Unknown IOCB command */
1401 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1402 "%d:0334 Unknown IOCB command "
1403 "Data: x%x, x%x x%x x%x x%x\n",
1414 * The response IOCB has been processed. Update the ring
1415 * pointer in SLIM. If the port response put pointer has not
1416 * been updated, sync the pgp->rspPutInx and fetch the new port
1417 * response put pointer.
1419 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1421 if (pring->rspidx == portRspPut)
1422 portRspPut = le32_to_cpu(pgp->rspPutInx);
1425 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1426 pring->stats.iocb_rsp_full++;
1427 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1428 writel(status, phba->CAregaddr);
1429 readl(phba->CAregaddr);
1431 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1432 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1433 pring->stats.iocb_cmd_empty++;
1435 /* Force update of the local copy of cmdGetInx */
1436 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1437 lpfc_sli_resume_iocb(phba, pring);
1439 if ((pring->lpfc_sli_cmd_available))
1440 (pring->lpfc_sli_cmd_available) (phba, pring);
1444 spin_unlock_irqrestore(&phba->hbalock, iflag);
1449 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1450 struct lpfc_sli_ring *pring, uint32_t mask)
1452 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1453 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1454 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1456 IOCB_t *irsp = NULL;
1457 struct lpfc_iocbq *rspiocbp = NULL;
1458 struct lpfc_iocbq *next_iocb;
1459 struct lpfc_iocbq *cmdiocbp;
1460 struct lpfc_iocbq *saveq;
1461 uint8_t iocb_cmd_type;
1462 lpfc_iocb_type type;
1463 uint32_t status, free_saveq;
1464 uint32_t portRspPut, portRspMax;
1466 unsigned long iflag;
1468 spin_lock_irqsave(&phba->hbalock, iflag);
1469 pring->stats.iocb_event++;
1472 * The next available response entry should never exceed the maximum
1473 * entries. If it does, treat it as an adapter hardware error.
1475 portRspMax = pring->numRiocb;
1476 portRspPut = le32_to_cpu(pgp->rspPutInx);
1477 if (portRspPut >= portRspMax) {
1479 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1480 * rsp ring <portRspMax>
1482 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1483 "%d:0303 Ring %d handler: portRspPut %d "
1484 "is bigger then rsp ring %d\n",
1485 phba->brd_no, pring->ringno, portRspPut,
1488 phba->link_state = LPFC_HBA_ERROR;
1489 spin_unlock_irqrestore(&phba->hbalock, iflag);
1491 phba->work_hs = HS_FFER3;
1492 lpfc_handle_eratt(phba);
1498 while (pring->rspidx != portRspPut) {
1500 * Build a completion list and call the appropriate handler.
1501 * The process is to get the next available response iocb, get
1502 * a free iocb from the list, copy the response data into the
1503 * free iocb, insert to the continuation list, and update the
1504 * next response index to slim. This process makes response
1505 * iocb's in the ring available to DMA as fast as possible but
1506 * pays a penalty for a copy operation. Since the iocb is
1507 * only 32 bytes, this penalty is considered small relative to
1508 * the PCI reads for register values and a slim write. When
1509 * the ulpLe field is set, the entire Command has been
1512 entry = lpfc_resp_iocb(phba, pring);
1514 rspiocbp = __lpfc_sli_get_iocbq(phba);
1515 if (rspiocbp == NULL) {
1516 printk(KERN_ERR "%s: out of buffers! Failing "
1517 "completion.\n", __FUNCTION__);
1521 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
1522 phba->iocb_rsp_size);
1523 irsp = &rspiocbp->iocb;
1525 if (++pring->rspidx >= portRspMax)
1528 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1530 if (list_empty(&(pring->iocb_continueq))) {
1531 list_add(&rspiocbp->list, &(pring->iocb_continueq));
1533 list_add_tail(&rspiocbp->list,
1534 &(pring->iocb_continueq));
1537 pring->iocb_continueq_cnt++;
1540 * By default, the driver expects to free all resources
1541 * associated with this iocb completion.
1544 saveq = list_get_first(&pring->iocb_continueq,
1545 struct lpfc_iocbq, list);
1546 irsp = &(saveq->iocb);
1547 list_del_init(&pring->iocb_continueq);
1548 pring->iocb_continueq_cnt = 0;
1550 pring->stats.iocb_rsp++;
1553 * If resource errors reported from HBA, reduce
1554 * queuedepths of the SCSI device.
1556 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1557 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1558 spin_unlock_irqrestore(&phba->hbalock, iflag);
1559 lpfc_adjust_queue_depth(phba);
1560 spin_lock_irqsave(&phba->hbalock, iflag);
1563 if (irsp->ulpStatus) {
1564 /* Rsp ring <ringno> error: IOCB */
1565 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1566 "%d:0328 Rsp Ring %d error: "
1571 "x%x x%x x%x x%x\n",
1574 irsp->un.ulpWord[0],
1575 irsp->un.ulpWord[1],
1576 irsp->un.ulpWord[2],
1577 irsp->un.ulpWord[3],
1578 irsp->un.ulpWord[4],
1579 irsp->un.ulpWord[5],
1580 *(((uint32_t *) irsp) + 6),
1581 *(((uint32_t *) irsp) + 7),
1582 *(((uint32_t *) irsp) + 8),
1583 *(((uint32_t *) irsp) + 9),
1584 *(((uint32_t *) irsp) + 10),
1585 *(((uint32_t *) irsp) + 11),
1586 *(((uint32_t *) irsp) + 12),
1587 *(((uint32_t *) irsp) + 13),
1588 *(((uint32_t *) irsp) + 14),
1589 *(((uint32_t *) irsp) + 15));
1593 * Fetch the IOCB command type and call the correct
1594 * completion routine. Solicited and Unsolicited
1595 * IOCBs on the ELS ring get freed back to the
1596 * lpfc_iocb_list by the discovery kernel thread.
1598 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1599 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1600 if (type == LPFC_SOL_IOCB) {
1601 spin_unlock_irqrestore(&phba->hbalock,
1603 rc = lpfc_sli_process_sol_iocb(phba, pring,
1605 spin_lock_irqsave(&phba->hbalock, iflag);
1606 } else if (type == LPFC_UNSOL_IOCB) {
1607 spin_unlock_irqrestore(&phba->hbalock,
1609 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1611 spin_lock_irqsave(&phba->hbalock, iflag);
1612 } else if (type == LPFC_ABORT_IOCB) {
1613 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1615 lpfc_sli_iocbq_lookup(phba, pring,
1617 /* Call the specified completion
1619 if (cmdiocbp->iocb_cmpl) {
1620 spin_unlock_irqrestore(
1623 (cmdiocbp->iocb_cmpl) (phba,
1629 __lpfc_sli_release_iocbq(phba,
1632 } else if (type == LPFC_UNKNOWN_IOCB) {
1633 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1635 char adaptermsg[LPFC_MAX_ADPTMSG];
1637 memset(adaptermsg, 0,
1639 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1641 dev_warn(&((phba->pcidev)->dev),
1643 phba->brd_no, adaptermsg);
1645 /* Unknown IOCB command */
1646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1647 "%d:0335 Unknown IOCB "
1648 "command Data: x%x "
1659 list_for_each_entry_safe(rspiocbp, next_iocb,
1660 &saveq->list, list) {
1661 list_del(&rspiocbp->list);
1662 __lpfc_sli_release_iocbq(phba,
1665 __lpfc_sli_release_iocbq(phba, saveq);
1671 * If the port response put pointer has not been updated, sync
1672 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1673 * response put pointer.
1675 if (pring->rspidx == portRspPut) {
1676 portRspPut = le32_to_cpu(pgp->rspPutInx);
1678 } /* while (pring->rspidx != portRspPut) */
1680 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
1681 /* At least one response entry has been freed */
1682 pring->stats.iocb_rsp_full++;
1683 /* SET RxRE_RSP in Chip Att register */
1684 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1685 writel(status, phba->CAregaddr);
1686 readl(phba->CAregaddr); /* flush */
1688 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1689 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1690 pring->stats.iocb_cmd_empty++;
1692 /* Force update of the local copy of cmdGetInx */
1693 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1694 lpfc_sli_resume_iocb(phba, pring);
1696 if ((pring->lpfc_sli_cmd_available))
1697 (pring->lpfc_sli_cmd_available) (phba, pring);
1701 spin_unlock_irqrestore(&phba->hbalock, iflag);
1706 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1708 LIST_HEAD(completions);
1709 struct lpfc_iocbq *iocb, *next_iocb;
1712 if (pring->ringno == LPFC_ELS_RING) {
1713 lpfc_fabric_abort_hba(phba);
1716 /* Error everything on txq and txcmplq
1719 spin_lock_irq(&phba->hbalock);
1720 list_splice_init(&pring->txq, &completions);
1723 /* Next issue ABTS for everything on the txcmplq */
1724 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1725 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1727 spin_unlock_irq(&phba->hbalock);
1729 while (!list_empty(&completions)) {
1730 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1732 list_del_init(&iocb->list);
1734 if (!iocb->iocb_cmpl)
1735 lpfc_sli_release_iocbq(phba, iocb);
1737 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1738 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1739 (iocb->iocb_cmpl) (phba, iocb, iocb);
1745 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1751 /* Read the HBA Host Status Register */
1752 status = readl(phba->HSregaddr);
1755 * Check status register every 100ms for 5 retries, then every
1756 * 500ms for 5, then every 2.5 sec for 5, then reset board and
1757 * every 2.5 sec for 4.
1758 * Break our of the loop if errors occurred during init.
1760 while (((status & mask) != mask) &&
1761 !(status & HS_FFERM) &&
1773 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1774 lpfc_sli_brdrestart(phba);
1776 /* Read the HBA Host Status Register */
1777 status = readl(phba->HSregaddr);
1780 /* Check to see if any errors occurred during init */
1781 if ((status & HS_FFERM) || (i >= 20)) {
1782 phba->link_state = LPFC_HBA_ERROR;
1789 #define BARRIER_TEST_PATTERN (0xdeadbeef)
1791 void lpfc_reset_barrier(struct lpfc_hba *phba)
1793 uint32_t __iomem *resp_buf;
1794 uint32_t __iomem *mbox_buf;
1795 volatile uint32_t mbox;
1800 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1801 if (hdrtype != 0x80 ||
1802 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1803 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1807 * Tell the other part of the chip to suspend temporarily all
1810 resp_buf = phba->MBslimaddr;
1812 /* Disable the error attention */
1813 hc_copy = readl(phba->HCregaddr);
1814 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1815 readl(phba->HCregaddr); /* flush */
1816 phba->link_flag |= LS_IGNORE_ERATT;
1818 if (readl(phba->HAregaddr) & HA_ERATT) {
1819 /* Clear Chip error bit */
1820 writel(HA_ERATT, phba->HAregaddr);
1821 phba->pport->stopped = 1;
1825 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1826 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1828 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1829 mbox_buf = phba->MBslimaddr;
1830 writel(mbox, mbox_buf);
1833 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1836 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1837 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1838 phba->pport->stopped)
1844 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1845 for (i = 0; readl(resp_buf) != mbox && i < 500; i++)
1850 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1853 if (readl(phba->HAregaddr) & HA_ERATT) {
1854 writel(HA_ERATT, phba->HAregaddr);
1855 phba->pport->stopped = 1;
1859 phba->link_flag &= ~LS_IGNORE_ERATT;
1860 writel(hc_copy, phba->HCregaddr);
1861 readl(phba->HCregaddr); /* flush */
1865 lpfc_sli_brdkill(struct lpfc_hba *phba)
1867 struct lpfc_sli *psli;
1877 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1878 "%d:0329 Kill HBA Data: x%x x%x\n",
1879 phba->brd_no, phba->pport->port_state, psli->sli_flag);
1881 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1885 /* Disable the error attention */
1886 spin_lock_irq(&phba->hbalock);
1887 status = readl(phba->HCregaddr);
1888 status &= ~HC_ERINT_ENA;
1889 writel(status, phba->HCregaddr);
1890 readl(phba->HCregaddr); /* flush */
1891 phba->link_flag |= LS_IGNORE_ERATT;
1892 spin_unlock_irq(&phba->hbalock);
1894 lpfc_kill_board(phba, pmb);
1895 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1896 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1898 if (retval != MBX_SUCCESS) {
1899 if (retval != MBX_BUSY)
1900 mempool_free(pmb, phba->mbox_mem_pool);
1901 spin_lock_irq(&phba->hbalock);
1902 phba->link_flag &= ~LS_IGNORE_ERATT;
1903 spin_unlock_irq(&phba->hbalock);
1907 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1909 mempool_free(pmb, phba->mbox_mem_pool);
1911 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
1912 * attention every 100ms for 3 seconds. If we don't get ERATT after
1913 * 3 seconds we still set HBA_ERROR state because the status of the
1914 * board is now undefined.
1916 ha_copy = readl(phba->HAregaddr);
1918 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
1920 ha_copy = readl(phba->HAregaddr);
1923 del_timer_sync(&psli->mbox_tmo);
1924 if (ha_copy & HA_ERATT) {
1925 writel(HA_ERATT, phba->HAregaddr);
1926 phba->pport->stopped = 1;
1928 spin_lock_irq(&phba->hbalock);
1929 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1930 phba->link_flag &= ~LS_IGNORE_ERATT;
1931 spin_unlock_irq(&phba->hbalock);
1933 psli->mbox_active = NULL;
1934 lpfc_hba_down_post(phba);
1935 phba->link_state = LPFC_HBA_ERROR;
1937 return ha_copy & HA_ERATT ? 0 : 1;
1941 lpfc_sli_brdreset(struct lpfc_hba *phba)
1943 struct lpfc_sli *psli;
1944 struct lpfc_sli_ring *pring;
1951 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1952 "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
1953 phba->pport->port_state, psli->sli_flag);
1955 /* perform board reset */
1956 phba->fc_eventTag = 0;
1957 phba->pport->fc_myDID = 0;
1958 phba->pport->fc_prevDID = 0;
1960 /* Turn off parity checking and serr during the physical reset */
1961 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1962 pci_write_config_word(phba->pcidev, PCI_COMMAND,
1964 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1966 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
1967 /* Now toggle INITFF bit in the Host Control Register */
1968 writel(HC_INITFF, phba->HCregaddr);
1970 readl(phba->HCregaddr); /* flush */
1971 writel(0, phba->HCregaddr);
1972 readl(phba->HCregaddr); /* flush */
1974 /* Restore PCI cmd register */
1975 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
1977 /* Initialize relevant SLI info */
1978 for (i = 0; i < psli->num_rings; i++) {
1979 pring = &psli->ring[i];
1982 pring->next_cmdidx = 0;
1983 pring->local_getidx = 0;
1985 pring->missbufcnt = 0;
1988 phba->link_state = LPFC_WARM_START;
1993 lpfc_sli_brdrestart(struct lpfc_hba *phba)
1996 struct lpfc_sli *psli;
1998 volatile uint32_t word0;
1999 void __iomem *to_slim;
2001 spin_lock_irq(&phba->hbalock);
2006 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2007 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
2008 phba->pport->port_state, psli->sli_flag);
2011 mb = (MAILBOX_t *) &word0;
2012 mb->mbxCommand = MBX_RESTART;
2015 lpfc_reset_barrier(phba);
2017 to_slim = phba->MBslimaddr;
2018 writel(*(uint32_t *) mb, to_slim);
2019 readl(to_slim); /* flush */
2021 /* Only skip post after fc_ffinit is completed */
2022 if (phba->pport->port_state) {
2024 word0 = 1; /* This is really setting up word1 */
2027 word0 = 0; /* This is really setting up word1 */
2029 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2030 writel(*(uint32_t *) mb, to_slim);
2031 readl(to_slim); /* flush */
2033 lpfc_sli_brdreset(phba);
2034 phba->pport->stopped = 0;
2035 phba->link_state = LPFC_INIT_START;
2037 spin_unlock_irq(&phba->hbalock);
2039 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
2040 psli->stats_start = get_seconds();
2047 lpfc_hba_down_post(phba);
2053 lpfc_sli_chipset_init(struct lpfc_hba *phba)
2055 uint32_t status, i = 0;
2057 /* Read the HBA Host Status Register */
2058 status = readl(phba->HSregaddr);
2060 /* Check status register to see what current state is */
2062 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
2064 /* Check every 100ms for 5 retries, then every 500ms for 5, then
2065 * every 2.5 sec for 5, then reset board and every 2.5 sec for
2069 /* Adapter failed to init, timeout, status reg
2071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2072 "%d:0436 Adapter failed to init, "
2073 "timeout, status reg x%x\n",
2074 phba->brd_no, status);
2075 phba->link_state = LPFC_HBA_ERROR;
2079 /* Check to see if any errors occurred during init */
2080 if (status & HS_FFERM) {
2081 /* ERROR: During chipset initialization */
2082 /* Adapter failed to init, chipset, status reg
2084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2085 "%d:0437 Adapter failed to init, "
2086 "chipset, status reg x%x\n",
2089 phba->link_state = LPFC_HBA_ERROR;
2095 } else if (i <= 10) {
2103 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2104 lpfc_sli_brdrestart(phba);
2106 /* Read the HBA Host Status Register */
2107 status = readl(phba->HSregaddr);
2110 /* Check to see if any errors occurred during init */
2111 if (status & HS_FFERM) {
2112 /* ERROR: During chipset initialization */
2113 /* Adapter failed to init, chipset, status reg <status> */
2114 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2115 "%d:0438 Adapter failed to init, chipset, "
2119 phba->link_state = LPFC_HBA_ERROR;
2123 /* Clear all interrupt enable conditions */
2124 writel(0, phba->HCregaddr);
2125 readl(phba->HCregaddr); /* flush */
2127 /* setup host attn register */
2128 writel(0xffffffff, phba->HAregaddr);
2129 readl(phba->HAregaddr); /* flush */
2134 lpfc_sli_hbq_count(void)
2136 return ARRAY_SIZE(lpfc_hbq_defs);
2140 lpfc_sli_hbq_entry_count(void)
2142 int hbq_count = lpfc_sli_hbq_count();
2146 for (i = 0; i < hbq_count; ++i)
2147 count += lpfc_hbq_defs[i]->entry_count;
2152 lpfc_sli_hbq_size(void)
2154 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
2158 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2160 int hbq_count = lpfc_sli_hbq_count();
2164 uint32_t hbq_entry_index;
2166 /* Get a Mailbox buffer to setup mailbox
2167 * commands for HBA initialization
2169 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2176 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2177 phba->link_state = LPFC_INIT_MBX_CMDS;
2179 hbq_entry_index = 0;
2180 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2181 phba->hbqs[hbqno].next_hbqPutIdx = 0;
2182 phba->hbqs[hbqno].hbqPutIdx = 0;
2183 phba->hbqs[hbqno].local_hbqGetIdx = 0;
2184 phba->hbqs[hbqno].entry_count =
2185 lpfc_hbq_defs[hbqno]->entry_count;
2186 lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index,
2188 hbq_entry_index += phba->hbqs[hbqno].entry_count;
2190 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
2191 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
2192 mbxStatus <status>, ring <num> */
2194 lpfc_printf_log(phba, KERN_ERR,
2195 LOG_SLI | LOG_VPORT,
2196 "%d:1805 Adapter failed to init. "
2197 "Data: x%x x%x x%x\n",
2198 phba->brd_no, pmbox->mbxCommand,
2199 pmbox->mbxStatus, hbqno);
2201 phba->link_state = LPFC_HBA_ERROR;
2202 mempool_free(pmb, phba->mbox_mem_pool);
2206 phba->hbq_count = hbq_count;
2208 mempool_free(pmb, phba->mbox_mem_pool);
2210 /* Initially populate or replenish the HBQs */
2211 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2212 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2219 lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2222 uint32_t resetcount = 0, rc = 0, done = 0;
2224 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2226 phba->link_state = LPFC_HBA_ERROR;
2230 phba->sli_rev = sli_mode;
2231 while (resetcount < 2 && !done) {
2232 spin_lock_irq(&phba->hbalock);
2233 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2234 spin_unlock_irq(&phba->hbalock);
2235 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2236 lpfc_sli_brdrestart(phba);
2238 rc = lpfc_sli_chipset_init(phba);
2242 spin_lock_irq(&phba->hbalock);
2243 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2244 spin_unlock_irq(&phba->hbalock);
2247 /* Call pre CONFIG_PORT mailbox command initialization. A
2248 * value of 0 means the call was successful. Any other
2249 * nonzero value is a failure, but if ERESTART is returned,
2250 * the driver may reset the HBA and try again.
2252 rc = lpfc_config_port_prep(phba);
2253 if (rc == -ERESTART) {
2254 phba->link_state = LPFC_LINK_UNKNOWN;
2260 phba->link_state = LPFC_INIT_MBX_CMDS;
2261 lpfc_config_port(phba, pmb);
2262 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2263 if (rc != MBX_SUCCESS) {
2264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2265 "%d:0442 Adapter failed to init, mbxCmd x%x "
2266 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
2267 phba->brd_no, pmb->mb.mbxCommand,
2268 pmb->mb.mbxStatus, 0);
2269 spin_lock_irq(&phba->hbalock);
2270 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2271 spin_unlock_irq(&phba->hbalock);
2275 phba->max_vpi = (phba->max_vpi &&
2276 pmb->mb.un.varCfgPort.gmv) != 0
2277 ? pmb->mb.un.varCfgPort.max_vpi
2284 goto do_prep_failed;
2287 if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2288 (!pmb->mb.un.varCfgPort.cMA)) {
2290 goto do_prep_failed;
2295 mempool_free(pmb, phba->mbox_mem_pool);
2300 lpfc_sli_hba_setup(struct lpfc_hba *phba)
2305 switch (lpfc_sli_mode) {
2307 if (lpfc_npiv_enable) {
2308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2309 "%d:1824 NPIV enabled: Override lpfc_sli_mode "
2310 "parameter (%d) to auto (0).\n",
2311 phba->brd_no, lpfc_sli_mode);
2320 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2321 "%d:1819 Unrecognized lpfc_sli_mode "
2323 phba->brd_no, lpfc_sli_mode);
2328 rc = lpfc_do_config_port(phba, mode);
2329 if (rc && lpfc_sli_mode == 3)
2330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2331 "%d:1820 Unable to select SLI-3. "
2332 "Not supported by adapter.\n",
2334 if (rc && mode != 2)
2335 rc = lpfc_do_config_port(phba, 2);
2337 goto lpfc_sli_hba_setup_error;
2339 if (phba->sli_rev == 3) {
2340 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
2341 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
2342 phba->sli3_options |= LPFC_SLI3_ENABLED;
2343 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
2346 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2347 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
2348 phba->sli3_options = 0;
2351 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2352 "%d:0444 Firmware in SLI %x mode. Max_vpi %d\n",
2353 phba->brd_no, phba->sli_rev, phba->max_vpi);
2354 rc = lpfc_sli_ring_map(phba);
2357 goto lpfc_sli_hba_setup_error;
2361 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2362 rc = lpfc_sli_hbq_setup(phba);
2364 goto lpfc_sli_hba_setup_error;
2367 phba->sli.sli_flag |= LPFC_PROCESS_LA;
2369 rc = lpfc_config_port_post(phba);
2371 goto lpfc_sli_hba_setup_error;
2375 lpfc_sli_hba_setup_error:
2376 phba->link_state = LPFC_HBA_ERROR;
2377 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2378 "%d:0445 Firmware initialization failed\n",
2383 /*! lpfc_mbox_timeout
2387 * \param hba Pointer to per struct lpfc_hba structure
2388 * \param l1 Pointer to the driver's mailbox queue.
2394 * This routine handles mailbox timeout events at timer interrupt context.
2397 lpfc_mbox_timeout(unsigned long ptr)
2399 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2400 unsigned long iflag;
2401 uint32_t tmo_posted;
2403 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2404 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2406 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2407 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2410 spin_lock_irqsave(&phba->hbalock, iflag);
2411 if (phba->work_wait)
2412 lpfc_worker_wake_up(phba);
2413 spin_unlock_irqrestore(&phba->hbalock, iflag);
2418 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2420 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
2421 MAILBOX_t *mb = &pmbox->mb;
2422 struct lpfc_sli *psli = &phba->sli;
2423 struct lpfc_sli_ring *pring;
2425 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
2429 /* Mbox cmd <mbxCommand> timeout */
2430 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2431 "%d:0310 Mailbox command x%x timeout Data: x%x x%x "
2435 phba->pport->port_state,
2437 phba->sli.mbox_active);
2439 /* Setting state unknown so lpfc_sli_abort_iocb_ring
2440 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2441 * it to fail all oustanding SCSI IO.
2443 spin_lock_irq(&phba->pport->work_port_lock);
2444 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2445 spin_unlock_irq(&phba->pport->work_port_lock);
2446 spin_lock_irq(&phba->hbalock);
2447 phba->link_state = LPFC_LINK_UNKNOWN;
2448 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2449 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2450 spin_unlock_irq(&phba->hbalock);
2452 pring = &psli->ring[psli->fcp_ring];
2453 lpfc_sli_abort_iocb_ring(phba, pring);
2455 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2456 "%d:0316 Resetting board due to mailbox timeout\n",
2459 * lpfc_offline calls lpfc_sli_hba_down which will clean up
2460 * on oustanding mailbox commands.
2462 lpfc_offline_prep(phba);
2464 lpfc_sli_brdrestart(phba);
2465 if (lpfc_online(phba) == 0) /* Initialize the HBA */
2466 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2467 lpfc_unblock_mgmt_io(phba);
2472 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2475 struct lpfc_sli *psli = &phba->sli;
2476 uint32_t status, evtctr;
2479 unsigned long drvr_flag = 0;
2480 volatile uint32_t word0, ldata;
2481 void __iomem *to_slim;
2483 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2484 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2486 lpfc_printf_log(phba, KERN_ERR,
2487 LOG_MBOX | LOG_VPORT,
2488 "%d:1806 Mbox x%x failed. No vport\n",
2490 pmbox->mb.mbxCommand);
2492 return MBXERR_ERROR;
2497 /* If the PCI channel is in offline state, do not post mbox. */
2498 if (unlikely(pci_channel_offline(phba->pcidev)))
2499 return MBX_NOT_FINISHED;
2501 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2506 status = MBX_SUCCESS;
2508 if (phba->link_state == LPFC_HBA_ERROR) {
2509 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2511 /* Mbox command <mbxCommand> cannot issue */
2512 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2513 return MBX_NOT_FINISHED;
2516 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2517 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2518 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2519 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2520 return MBX_NOT_FINISHED;
2523 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2524 /* Polling for a mbox command when another one is already active
2525 * is not allowed in SLI. Also, the driver must have established
2526 * SLI2 mode to queue and process multiple mbox commands.
2529 if (flag & MBX_POLL) {
2530 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2532 /* Mbox command <mbxCommand> cannot issue */
2533 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2534 return MBX_NOT_FINISHED;
2537 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2538 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2539 /* Mbox command <mbxCommand> cannot issue */
2540 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2541 return MBX_NOT_FINISHED;
2544 /* Handle STOP IOCB processing flag. This is only meaningful
2545 * if we are not polling for mbox completion.
2547 if (flag & MBX_STOP_IOCB) {
2548 flag &= ~MBX_STOP_IOCB;
2549 /* Now flag each ring */
2550 for (i = 0; i < psli->num_rings; i++) {
2551 /* If the ring is active, flag it */
2552 if (psli->ring[i].cmdringaddr) {
2553 psli->ring[i].flag |=
2559 /* Another mailbox command is still being processed, queue this
2560 * command to be processed later.
2562 lpfc_mbox_put(phba, pmbox);
2564 /* Mbox cmd issue - BUSY */
2565 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2566 "%d (%d):0308 Mbox cmd issue - BUSY Data: "
2567 "x%x x%x x%x x%x\n",
2569 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2570 mb->mbxCommand, phba->pport->port_state,
2571 psli->sli_flag, flag);
2573 psli->slistat.mbox_busy++;
2574 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2579 /* Handle STOP IOCB processing flag. This is only meaningful
2580 * if we are not polling for mbox completion.
2582 if (flag & MBX_STOP_IOCB) {
2583 flag &= ~MBX_STOP_IOCB;
2584 if (flag == MBX_NOWAIT) {
2585 /* Now flag each ring */
2586 for (i = 0; i < psli->num_rings; i++) {
2587 /* If the ring is active, flag it */
2588 if (psli->ring[i].cmdringaddr) {
2589 psli->ring[i].flag |=
2596 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2598 /* If we are not polling, we MUST be in SLI2 mode */
2599 if (flag != MBX_POLL) {
2600 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2601 (mb->mbxCommand != MBX_KILL_BOARD)) {
2602 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2603 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2604 /* Mbox command <mbxCommand> cannot issue */
2605 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2606 return MBX_NOT_FINISHED;
2608 /* timeout active mbox command */
2609 mod_timer(&psli->mbox_tmo, (jiffies +
2610 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2613 /* Mailbox cmd <cmd> issue */
2614 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2615 "%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
2617 phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0,
2618 mb->mbxCommand, phba->pport->port_state,
2619 psli->sli_flag, flag);
2621 psli->slistat.mbox_cmd++;
2622 evtctr = psli->slistat.mbox_event;
2624 /* next set own bit for the adapter and copy over command word */
2625 mb->mbxOwner = OWN_CHIP;
2627 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2628 /* First copy command data to host SLIM area */
2629 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
2631 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2632 /* copy command data into host mbox for cmpl */
2633 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2637 /* First copy mbox command data to HBA SLIM, skip past first
2639 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2640 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2641 MAILBOX_CMD_SIZE - sizeof (uint32_t));
2643 /* Next copy over first word, with mbxOwner set */
2644 ldata = *((volatile uint32_t *)mb);
2645 to_slim = phba->MBslimaddr;
2646 writel(ldata, to_slim);
2647 readl(to_slim); /* flush */
2649 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2650 /* switch over to host mailbox */
2651 psli->sli_flag |= LPFC_SLI2_ACTIVE;
2656 /* interrupt board to doit right away */
2657 writel(CA_MBATT, phba->CAregaddr);
2658 readl(phba->CAregaddr); /* flush */
2662 /* Don't wait for it to finish, just return */
2663 psli->mbox_active = pmbox;
2667 psli->mbox_active = NULL;
2668 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2669 /* First read mbox status word */
2670 word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
2671 word0 = le32_to_cpu(word0);
2673 /* First read mbox status word */
2674 word0 = readl(phba->MBslimaddr);
2677 /* Read the HBA Host Attention Register */
2678 ha_copy = readl(phba->HAregaddr);
2680 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2681 i *= 1000; /* Convert to ms */
2683 /* Wait for command to complete */
2684 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2685 (!(ha_copy & HA_MBATT) &&
2686 (phba->link_state > LPFC_WARM_START))) {
2688 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2689 spin_unlock_irqrestore(&phba->hbalock,
2691 return MBX_NOT_FINISHED;
2694 /* Check if we took a mbox interrupt while we were
2696 if (((word0 & OWN_CHIP) != OWN_CHIP)
2697 && (evtctr != psli->slistat.mbox_event))
2700 spin_unlock_irqrestore(&phba->hbalock,
2705 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2707 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2708 /* First copy command data */
2709 word0 = *((volatile uint32_t *)
2710 &phba->slim2p->mbx);
2711 word0 = le32_to_cpu(word0);
2712 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2714 volatile uint32_t slimword0;
2715 /* Check real SLIM for any errors */
2716 slimword0 = readl(phba->MBslimaddr);
2717 slimmb = (MAILBOX_t *) & slimword0;
2718 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2719 && slimmb->mbxStatus) {
2726 /* First copy command data */
2727 word0 = readl(phba->MBslimaddr);
2729 /* Read the HBA Host Attention Register */
2730 ha_copy = readl(phba->HAregaddr);
2733 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2734 /* copy results back to user */
2735 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2738 /* First copy command data */
2739 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2741 if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2743 lpfc_memcpy_from_slim((void *)pmbox->context2,
2744 phba->MBslimaddr + DMP_RSP_OFFSET,
2745 mb->un.varDmp.word_cnt);
2749 writel(HA_MBATT, phba->HAregaddr);
2750 readl(phba->HAregaddr); /* flush */
2752 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2753 status = mb->mbxStatus;
2756 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2761 * Caller needs to hold lock.
2764 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2765 struct lpfc_iocbq *piocb)
2767 /* Insert the caller's iocb in the txq tail for later processing. */
2768 list_add_tail(&piocb->list, &pring->txq);
2773 static struct lpfc_iocbq *
2774 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2775 struct lpfc_iocbq **piocb)
2777 struct lpfc_iocbq * nextiocb;
2779 nextiocb = lpfc_sli_ringtx_get(phba, pring);
2789 * Lockless version of lpfc_sli_issue_iocb.
2792 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2793 struct lpfc_iocbq *piocb, uint32_t flag)
2795 struct lpfc_iocbq *nextiocb;
2798 if (piocb->iocb_cmpl && (!piocb->vport) &&
2799 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2800 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2801 lpfc_printf_log(phba, KERN_ERR,
2802 LOG_SLI | LOG_VPORT,
2803 "%d:1807 IOCB x%x failed. No vport\n",
2805 piocb->iocb.ulpCommand);
2811 /* If the PCI channel is in offline state, do not post iocbs. */
2812 if (unlikely(pci_channel_offline(phba->pcidev)))
2816 * We should never get an IOCB if we are in a < LINK_DOWN state
2818 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
2822 * Check to see if we are blocking IOCB processing because of a
2823 * outstanding mbox command.
2825 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2828 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
2830 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
2831 * can be issued if the link is not up.
2833 switch (piocb->iocb.ulpCommand) {
2834 case CMD_QUE_RING_BUF_CN:
2835 case CMD_QUE_RING_BUF64_CN:
2837 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2838 * completion, iocb_cmpl MUST be 0.
2840 if (piocb->iocb_cmpl)
2841 piocb->iocb_cmpl = NULL;
2843 case CMD_CREATE_XRI_CR:
2844 case CMD_CLOSE_XRI_CN:
2845 case CMD_CLOSE_XRI_CX:
2852 * For FCP commands, we must be in a state where we can process link
2855 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2856 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
2860 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2861 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2862 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2865 lpfc_sli_update_ring(phba, pring);
2867 lpfc_sli_update_full_ring(phba, pring);
2870 return IOCB_SUCCESS;
2875 pring->stats.iocb_cmd_delay++;
2879 if (!(flag & SLI_IOCB_RET_IOCB)) {
2880 __lpfc_sli_ringtx_put(phba, pring, piocb);
2881 return IOCB_SUCCESS;
2889 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2890 struct lpfc_iocbq *piocb, uint32_t flag)
2892 unsigned long iflags;
2895 spin_lock_irqsave(&phba->hbalock, iflags);
2896 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2897 spin_unlock_irqrestore(&phba->hbalock, iflags);
2903 lpfc_extra_ring_setup( struct lpfc_hba *phba)
2905 struct lpfc_sli *psli;
2906 struct lpfc_sli_ring *pring;
2910 /* Adjust cmd/rsp ring iocb entries more evenly */
2912 /* Take some away from the FCP ring */
2913 pring = &psli->ring[psli->fcp_ring];
2914 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2915 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2916 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2917 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2919 /* and give them to the extra ring */
2920 pring = &psli->ring[psli->extra_ring];
2922 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2923 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2924 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2925 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2927 /* Setup default profile for this ring */
2928 pring->iotag_max = 4096;
2929 pring->num_mask = 1;
2930 pring->prt[0].profile = 0; /* Mask 0 */
2931 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2932 pring->prt[0].type = phba->cfg_multi_ring_type;
2933 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2938 lpfc_sli_setup(struct lpfc_hba *phba)
2940 int i, totiocbsize = 0;
2941 struct lpfc_sli *psli = &phba->sli;
2942 struct lpfc_sli_ring *pring;
2944 psli->num_rings = MAX_CONFIGURED_RINGS;
2946 psli->fcp_ring = LPFC_FCP_RING;
2947 psli->next_ring = LPFC_FCP_NEXT_RING;
2948 psli->extra_ring = LPFC_EXTRA_RING;
2950 psli->iocbq_lookup = NULL;
2951 psli->iocbq_lookup_len = 0;
2952 psli->last_iotag = 0;
2954 for (i = 0; i < psli->num_rings; i++) {
2955 pring = &psli->ring[i];
2957 case LPFC_FCP_RING: /* ring 0 - FCP */
2958 /* numCiocb and numRiocb are used in config_port */
2959 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
2960 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
2961 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2962 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2963 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2964 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2965 pring->sizeCiocb = (phba->sli_rev == 3) ?
2966 SLI3_IOCB_CMD_SIZE :
2968 pring->sizeRiocb = (phba->sli_rev == 3) ?
2969 SLI3_IOCB_RSP_SIZE :
2971 pring->iotag_ctr = 0;
2973 (phba->cfg_hba_queue_depth * 2);
2974 pring->fast_iotag = pring->iotag_max;
2975 pring->num_mask = 0;
2977 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
2978 /* numCiocb and numRiocb are used in config_port */
2979 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2980 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
2981 pring->sizeCiocb = (phba->sli_rev == 3) ?
2982 SLI3_IOCB_CMD_SIZE :
2984 pring->sizeRiocb = (phba->sli_rev == 3) ?
2985 SLI3_IOCB_RSP_SIZE :
2987 pring->iotag_max = phba->cfg_hba_queue_depth;
2988 pring->num_mask = 0;
2990 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
2991 /* numCiocb and numRiocb are used in config_port */
2992 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2993 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
2994 pring->sizeCiocb = (phba->sli_rev == 3) ?
2995 SLI3_IOCB_CMD_SIZE :
2997 pring->sizeRiocb = (phba->sli_rev == 3) ?
2998 SLI3_IOCB_RSP_SIZE :
3000 pring->fast_iotag = 0;
3001 pring->iotag_ctr = 0;
3002 pring->iotag_max = 4096;
3003 pring->num_mask = 4;
3004 pring->prt[0].profile = 0; /* Mask 0 */
3005 pring->prt[0].rctl = FC_ELS_REQ;
3006 pring->prt[0].type = FC_ELS_DATA;
3007 pring->prt[0].lpfc_sli_rcv_unsol_event =
3008 lpfc_els_unsol_event;
3009 pring->prt[1].profile = 0; /* Mask 1 */
3010 pring->prt[1].rctl = FC_ELS_RSP;
3011 pring->prt[1].type = FC_ELS_DATA;
3012 pring->prt[1].lpfc_sli_rcv_unsol_event =
3013 lpfc_els_unsol_event;
3014 pring->prt[2].profile = 0; /* Mask 2 */
3015 /* NameServer Inquiry */
3016 pring->prt[2].rctl = FC_UNSOL_CTL;
3018 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
3019 pring->prt[2].lpfc_sli_rcv_unsol_event =
3020 lpfc_ct_unsol_event;
3021 pring->prt[3].profile = 0; /* Mask 3 */
3022 /* NameServer response */
3023 pring->prt[3].rctl = FC_SOL_CTL;
3025 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
3026 pring->prt[3].lpfc_sli_rcv_unsol_event =
3027 lpfc_ct_unsol_event;
3030 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
3031 (pring->numRiocb * pring->sizeRiocb);
3033 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
3034 /* Too many cmd / rsp ring entries in SLI2 SLIM */
3035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3036 "%d:0462 Too many cmd / rsp ring entries in "
3037 "SLI2 SLIM Data: x%x x%lx\n",
3038 phba->brd_no, totiocbsize,
3039 (unsigned long) MAX_SLIM_IOCB_SIZE);
3041 if (phba->cfg_multi_ring_support == 2)
3042 lpfc_extra_ring_setup(phba);
3048 lpfc_sli_queue_setup(struct lpfc_hba *phba)
3050 struct lpfc_sli *psli;
3051 struct lpfc_sli_ring *pring;
3055 spin_lock_irq(&phba->hbalock);
3056 INIT_LIST_HEAD(&psli->mboxq);
3057 INIT_LIST_HEAD(&psli->mboxq_cmpl);
3058 /* Initialize list headers for txq and txcmplq as double linked lists */
3059 for (i = 0; i < psli->num_rings; i++) {
3060 pring = &psli->ring[i];
3062 pring->next_cmdidx = 0;
3063 pring->local_getidx = 0;
3065 INIT_LIST_HEAD(&pring->txq);
3066 INIT_LIST_HEAD(&pring->txcmplq);
3067 INIT_LIST_HEAD(&pring->iocb_continueq);
3068 INIT_LIST_HEAD(&pring->postbufq);
3070 spin_unlock_irq(&phba->hbalock);
3075 lpfc_sli_host_down(struct lpfc_vport *vport)
3077 struct lpfc_hba *phba = vport->phba;
3078 struct lpfc_sli *psli = &phba->sli;
3079 struct lpfc_sli_ring *pring;
3080 struct lpfc_iocbq *iocb, *next_iocb;
3081 IOCB_t *icmd = NULL;
3083 unsigned long flags = 0;
3084 uint16_t prev_pring_flag;
3086 lpfc_cleanup_discovery_resources(vport);
3088 spin_lock_irqsave(&phba->hbalock, flags);
3090 for (i = 0; i < psli->num_rings; i++) {
3091 pring = &psli->ring[i];
3092 prev_pring_flag = pring->flag;
3093 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3096 * Error everything on the txq since these iocbs have not been
3097 * given to the FW yet.
3100 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3101 if (iocb->vport != vport)
3103 list_del_init(&iocb->list);
3105 if (iocb->iocb_cmpl) {
3107 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3108 icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3109 spin_unlock_irqrestore(&phba->hbalock, flags);
3110 (iocb->iocb_cmpl) (phba, iocb, iocb);
3111 spin_lock_irqsave(&phba->hbalock, flags);
3113 lpfc_sli_release_iocbq(phba, iocb);
3116 /* Next issue ABTS for everything on the txcmplq */
3117 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3119 if (iocb->vport != vport)
3121 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3124 pring->flag = prev_pring_flag;
3127 spin_unlock_irqrestore(&phba->hbalock, flags);
3133 lpfc_sli_hba_down(struct lpfc_hba *phba)
3135 LIST_HEAD(completions);
3136 struct lpfc_sli *psli = &phba->sli;
3137 struct lpfc_sli_ring *pring;
3139 struct lpfc_iocbq *iocb;
3142 unsigned long flags = 0;
3144 lpfc_hba_down_prep(phba);
3146 lpfc_fabric_abort_hba(phba);
3148 spin_lock_irqsave(&phba->hbalock, flags);
3149 for (i = 0; i < psli->num_rings; i++) {
3150 pring = &psli->ring[i];
3151 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3154 * Error everything on the txq since these iocbs have not been
3155 * given to the FW yet.
3157 list_splice_init(&pring->txq, &completions);
3161 spin_unlock_irqrestore(&phba->hbalock, flags);
3163 while (!list_empty(&completions)) {
3164 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3167 if (!iocb->iocb_cmpl)
3168 lpfc_sli_release_iocbq(phba, iocb);
3170 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3171 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3172 (iocb->iocb_cmpl) (phba, iocb, iocb);
3176 /* Return any active mbox cmds */
3177 del_timer_sync(&psli->mbox_tmo);
3178 spin_lock_irqsave(&phba->hbalock, flags);
3180 spin_lock(&phba->pport->work_port_lock);
3181 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3182 spin_unlock(&phba->pport->work_port_lock);
3184 if (psli->mbox_active) {
3185 list_add_tail(&psli->mbox_active->list, &completions);
3186 psli->mbox_active = NULL;
3187 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3190 /* Return any pending or completed mbox cmds */
3191 list_splice_init(&phba->sli.mboxq, &completions);
3192 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3193 INIT_LIST_HEAD(&psli->mboxq);
3194 INIT_LIST_HEAD(&psli->mboxq_cmpl);
3196 spin_unlock_irqrestore(&phba->hbalock, flags);
3198 while (!list_empty(&completions)) {
3199 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
3200 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3201 if (pmb->mbox_cmpl) {
3202 pmb->mbox_cmpl(phba,pmb);
3209 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
3211 uint32_t *src = srcp;
3212 uint32_t *dest = destp;
3216 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
3218 ldata = le32_to_cpu(ldata);
3226 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3227 struct lpfc_dmabuf *mp)
3229 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
3231 spin_lock_irq(&phba->hbalock);
3232 list_add_tail(&mp->list, &pring->postbufq);
3233 pring->postbufq_cnt++;
3234 spin_unlock_irq(&phba->hbalock);
3239 struct lpfc_dmabuf *
3240 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3243 struct lpfc_dmabuf *mp, *next_mp;
3244 struct list_head *slp = &pring->postbufq;
3246 /* Search postbufq, from the begining, looking for a match on phys */
3247 spin_lock_irq(&phba->hbalock);
3248 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
3249 if (mp->phys == phys) {
3250 list_del_init(&mp->list);
3251 pring->postbufq_cnt--;
3252 spin_unlock_irq(&phba->hbalock);
3257 spin_unlock_irq(&phba->hbalock);
3258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3259 "%d:0410 Cannot find virtual addr for mapped buf on "
3260 "ring %d Data x%llx x%p x%p x%x\n",
3261 phba->brd_no, pring->ringno, (unsigned long long)phys,
3262 slp->next, slp->prev, pring->postbufq_cnt);
3267 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3268 struct lpfc_iocbq *rspiocb)
3270 IOCB_t *irsp = &rspiocb->iocb;
3271 uint16_t abort_iotag, abort_context;
3272 struct lpfc_iocbq *abort_iocb;
3273 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3277 if (irsp->ulpStatus) {
3278 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
3279 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
3281 spin_lock_irq(&phba->hbalock);
3282 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
3283 abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
3285 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
3286 "%d:0327 Cannot abort els iocb %p "
3287 "with tag %x context %x, abort status %x, "
3289 phba->brd_no, abort_iocb, abort_iotag,
3290 abort_context, irsp->ulpStatus,
3291 irsp->un.ulpWord[4]);
3294 * make sure we have the right iocbq before taking it
3295 * off the txcmplq and try to call completion routine.
3298 abort_iocb->iocb.ulpContext != abort_context ||
3299 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
3300 spin_unlock_irq(&phba->hbalock);
3302 list_del_init(&abort_iocb->list);
3303 pring->txcmplq_cnt--;
3304 spin_unlock_irq(&phba->hbalock);
3306 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3307 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3308 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
3309 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
3313 lpfc_sli_release_iocbq(phba, cmdiocb);
3318 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3319 struct lpfc_iocbq *rspiocb)
3321 IOCB_t *irsp = &rspiocb->iocb;
3323 /* ELS cmd tag <ulpIoTag> completes */
3324 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3325 "%d (X):0133 Ignoring ELS cmd tag x%x completion Data: "
3327 phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus,
3328 irsp->un.ulpWord[4], irsp->ulpTimeout);
3330 lpfc_els_free_iocb(phba, cmdiocb);
3335 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3336 struct lpfc_iocbq *cmdiocb)
3338 struct lpfc_vport *vport = cmdiocb->vport;
3339 struct lpfc_iocbq *abtsiocbp;
3340 IOCB_t *icmd = NULL;
3341 IOCB_t *iabt = NULL;
3342 int retval = IOCB_ERROR;
3345 * There are certain command types we don't want to abort. And we
3346 * don't want to abort commands that are already in the process of
3349 icmd = &cmdiocb->iocb;
3350 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
3351 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3352 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
3355 /* If we're unloading, don't abort the iocb, but change the callback so
3356 * that nothing happens when it finishes.
3358 if (vport->load_flag & FC_UNLOADING) {
3359 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3360 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3362 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
3363 goto abort_iotag_exit;
3366 /* issue ABTS for this IOCB based on iotag */
3367 abtsiocbp = __lpfc_sli_get_iocbq(phba);
3368 if (abtsiocbp == NULL)
3371 /* This signals the response to set the correct status
3372 * before calling the completion handler.
3374 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
3376 iabt = &abtsiocbp->iocb;
3377 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
3378 iabt->un.acxri.abortContextTag = icmd->ulpContext;
3379 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
3381 iabt->ulpClass = icmd->ulpClass;
3383 if (phba->link_state >= LPFC_LINK_UP)
3384 iabt->ulpCommand = CMD_ABORT_XRI_CN;
3386 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
3388 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
3390 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3391 "%d (%d):0339 Abort xri x%x, original iotag x%x, "
3392 "abort cmd iotag x%x\n",
3393 phba->brd_no, vport->vpi,
3394 iabt->un.acxri.abortContextTag,
3395 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
3396 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
3400 * Caller to this routine should check for IOCB_ERROR
3401 * and handle it properly. This routine no longer removes
3402 * iocb off txcmplq and call compl in case of IOCB_ERROR.
3408 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
3409 uint64_t lun_id, uint32_t ctx,
3410 lpfc_ctx_cmd ctx_cmd)
3412 struct lpfc_scsi_buf *lpfc_cmd;
3413 struct scsi_cmnd *cmnd;
3416 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
3419 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
3420 cmnd = lpfc_cmd->pCmd;
3427 if ((cmnd->device->id == tgt_id) &&
3428 (cmnd->device->lun == lun_id))
3432 if (cmnd->device->id == tgt_id)
3436 if (iocbq->iocb.ulpContext == ctx)
3443 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
3444 __FUNCTION__, ctx_cmd);
3452 lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3453 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
3455 struct lpfc_iocbq *iocbq;
3458 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
3459 iocbq = phba->sli.iocbq_lookup[i];
3461 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
3470 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3471 struct lpfc_iocbq *rspiocb)
3473 lpfc_sli_release_iocbq(phba, cmdiocb);
3478 lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3479 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
3480 lpfc_ctx_cmd abort_cmd)
3482 struct lpfc_iocbq *iocbq;
3483 struct lpfc_iocbq *abtsiocb;
3485 int errcnt = 0, ret_val = 0;
3488 for (i = 1; i <= phba->sli.last_iotag; i++) {
3489 iocbq = phba->sli.iocbq_lookup[i];
3491 if (lpfc_sli_validate_fcp_iocb(iocbq, tgt_id, lun_id, 0,
3495 /* issue ABTS for this IOCB based on iotag */
3496 abtsiocb = lpfc_sli_get_iocbq(phba);
3497 if (abtsiocb == NULL) {
3503 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
3504 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
3505 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
3506 abtsiocb->iocb.ulpLe = 1;
3507 abtsiocb->iocb.ulpClass = cmd->ulpClass;
3508 abtsiocb->vport = phba->pport;
3510 if (lpfc_is_link_up(phba))
3511 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
3513 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
3515 /* Setup callback routine and issue the command. */
3516 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3517 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
3518 if (ret_val == IOCB_ERROR) {
3519 lpfc_sli_release_iocbq(phba, abtsiocb);
3529 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3530 struct lpfc_iocbq *cmdiocbq,
3531 struct lpfc_iocbq *rspiocbq)
3533 wait_queue_head_t *pdone_q;
3534 unsigned long iflags;
3536 spin_lock_irqsave(&phba->hbalock, iflags);
3537 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3538 if (cmdiocbq->context2 && rspiocbq)
3539 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3540 &rspiocbq->iocb, sizeof(IOCB_t));
3542 pdone_q = cmdiocbq->context_un.wait_queue;
3543 spin_unlock_irqrestore(&phba->hbalock, iflags);
3550 * Issue the caller's iocb and wait for its completion, but no longer than the
3551 * caller's timeout. Note that iocb_flags is cleared before the
3552 * lpfc_sli_issue_call since the wake routine sets a unique value and by
3553 * definition this is a wait function.
3557 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3558 struct lpfc_sli_ring *pring,
3559 struct lpfc_iocbq *piocb,
3560 struct lpfc_iocbq *prspiocbq,
3563 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3564 long timeleft, timeout_req = 0;
3565 int retval = IOCB_SUCCESS;
3569 * If the caller has provided a response iocbq buffer, then context2
3570 * is NULL or its an error.
3573 if (piocb->context2)
3575 piocb->context2 = prspiocbq;
3578 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3579 piocb->context_un.wait_queue = &done_q;
3580 piocb->iocb_flag &= ~LPFC_IO_WAKE;
3582 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3583 creg_val = readl(phba->HCregaddr);
3584 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
3585 writel(creg_val, phba->HCregaddr);
3586 readl(phba->HCregaddr); /* flush */
3589 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3590 if (retval == IOCB_SUCCESS) {
3591 timeout_req = timeout * HZ;
3592 timeleft = wait_event_timeout(done_q,
3593 piocb->iocb_flag & LPFC_IO_WAKE,
3596 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3598 "%d:0331 IOCB wake signaled\n",
3600 } else if (timeleft == 0) {
3601 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3602 "%d:0338 IOCB wait timeout error - no "
3603 "wake response Data x%x\n",
3604 phba->brd_no, timeout);
3605 retval = IOCB_TIMEDOUT;
3607 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3608 "%d:0330 IOCB wake NOT set, "
3609 "Data x%x x%lx\n", phba->brd_no,
3610 timeout, (timeleft / jiffies));
3611 retval = IOCB_TIMEDOUT;
3614 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3615 "%d:0332 IOCB wait issue failed, Data x%x\n",
3616 phba->brd_no, retval);
3617 retval = IOCB_ERROR;
3620 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3621 creg_val = readl(phba->HCregaddr);
3622 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3623 writel(creg_val, phba->HCregaddr);
3624 readl(phba->HCregaddr); /* flush */
3628 piocb->context2 = NULL;
3630 piocb->context_un.wait_queue = NULL;
3631 piocb->iocb_cmpl = NULL;
3636 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3639 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3642 /* The caller must leave context1 empty. */
3643 if (pmboxq->context1 != 0)
3644 return MBX_NOT_FINISHED;
3646 /* setup wake call as IOCB callback */
3647 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3648 /* setup context field to pass wait_queue pointer to wake function */
3649 pmboxq->context1 = &done_q;
3651 /* now issue the command */
3652 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3654 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3655 wait_event_interruptible_timeout(done_q,
3656 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3659 pmboxq->context1 = NULL;
3661 * if LPFC_MBX_WAKE flag is set the mailbox is completed
3662 * else do not free the resources.
3664 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
3665 retval = MBX_SUCCESS;
3667 retval = MBX_TIMEOUT;
3674 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3676 struct lpfc_vport *vport = phba->pport;
3680 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
3681 if (i++ > LPFC_MBOX_TMO * 1000)
3685 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
3686 * did finish. This way we won't get the misleading
3687 * "Stray Mailbox Interrupt" message.
3689 spin_lock_irq(&phba->hbalock);
3690 ha_copy = phba->work_ha;
3691 phba->work_ha &= ~HA_MBATT;
3692 spin_unlock_irq(&phba->hbalock);
3694 if (ha_copy & HA_MBATT)
3695 if (lpfc_sli_handle_mb_event(phba) == 0)
3701 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3705 lpfc_intr_handler(int irq, void *dev_id)
3707 struct lpfc_hba *phba;
3709 uint32_t work_ha_copy;
3710 unsigned long status;
3714 MAILBOX_t *mbox, *pmbox;
3719 * Get the driver's phba structure from the dev_id and
3720 * assume the HBA is not interrupting.
3722 phba = (struct lpfc_hba *) dev_id;
3724 if (unlikely(!phba))
3727 /* If the pci channel is offline, ignore all the interrupts. */
3728 if (unlikely(pci_channel_offline(phba->pcidev)))
3731 phba->sli.slistat.sli_intr++;
3734 * Call the HBA to see if it is interrupting. If not, don't claim
3738 /* Ignore all interrupts during initialization. */
3739 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
3743 * Read host attention register to determine interrupt source
3744 * Clear Attention Sources, except Error Attention (to
3745 * preserve status) and Link Attention
3747 spin_lock(&phba->hbalock);
3748 ha_copy = readl(phba->HAregaddr);
3749 /* If somebody is waiting to handle an eratt don't process it
3750 * here. The brdkill function will do this.
3752 if (phba->link_flag & LS_IGNORE_ERATT)
3753 ha_copy &= ~HA_ERATT;
3754 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3755 readl(phba->HAregaddr); /* flush */
3756 spin_unlock(&phba->hbalock);
3758 if (unlikely(!ha_copy))
3761 work_ha_copy = ha_copy & phba->work_ha_mask;
3763 if (unlikely(work_ha_copy)) {
3764 if (work_ha_copy & HA_LATT) {
3765 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3767 * Turn off Link Attention interrupts
3768 * until CLEAR_LA done
3770 spin_lock(&phba->hbalock);
3771 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3772 control = readl(phba->HCregaddr);
3773 control &= ~HC_LAINT_ENA;
3774 writel(control, phba->HCregaddr);
3775 readl(phba->HCregaddr); /* flush */
3776 spin_unlock(&phba->hbalock);
3779 work_ha_copy &= ~HA_LATT;
3782 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3783 for (i = 0; i < phba->sli.num_rings; i++) {
3784 if (work_ha_copy & (HA_RXATT << (4*i))) {
3786 * Turn off Slow Rings interrupts
3788 spin_lock(&phba->hbalock);
3789 control = readl(phba->HCregaddr);
3790 control &= ~(HC_R0INT_ENA << i);
3791 writel(control, phba->HCregaddr);
3792 readl(phba->HCregaddr); /* flush */
3793 spin_unlock(&phba->hbalock);
3798 if (work_ha_copy & HA_ERATT) {
3799 phba->link_state = LPFC_HBA_ERROR;
3801 * There was a link/board error. Read the
3802 * status register to retrieve the error event
3805 phba->sli.slistat.err_attn_event++;
3806 /* Save status info */
3807 phba->work_hs = readl(phba->HSregaddr);
3808 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
3809 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
3811 /* Clear Chip error bit */
3812 writel(HA_ERATT, phba->HAregaddr);
3813 readl(phba->HAregaddr); /* flush */
3814 phba->pport->stopped = 1;
3817 if ((work_ha_copy & HA_MBATT) &&
3818 (phba->sli.mbox_active)) {
3819 pmb = phba->sli.mbox_active;
3821 mbox = &phba->slim2p->mbx;
3823 /* First check out the status word */
3824 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
3825 if (pmbox->mbxOwner != OWN_HOST) {
3827 * Stray Mailbox Interrupt, mbxCommand <cmd>
3828 * mbxStatus <status>
3830 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
3832 "%d (%d):0304 Stray Mailbox "
3833 "Interrupt mbxCommand x%x "
3842 del_timer_sync(&phba->sli.mbox_tmo);
3844 spin_lock(&phba->pport->work_port_lock);
3845 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3846 spin_unlock(&phba->pport->work_port_lock);
3847 phba->sli.mbox_active = NULL;
3848 if (pmb->mbox_cmpl) {
3849 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3852 lpfc_mbox_cmpl_put(phba, pmb);
3854 if ((work_ha_copy & HA_MBATT) &&
3855 (phba->sli.mbox_active == NULL)) {
3857 spin_lock(&phba->hbalock);
3858 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3859 pmb = lpfc_mbox_get(phba);
3860 spin_unlock(&phba->hbalock);
3862 /* Process next mailbox command if there is one */
3864 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3865 if (rc == MBX_NOT_FINISHED) {
3866 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3867 lpfc_mbox_cmpl_put(phba, pmb);
3868 goto send_next_mbox;
3871 /* Turn on IOCB processing */
3872 for (i = 0; i < phba->sli.num_rings; i++)
3873 lpfc_sli_turn_on_ring(phba, i);
3878 spin_lock(&phba->hbalock);
3879 phba->work_ha |= work_ha_copy;
3880 if (phba->work_wait)
3881 lpfc_worker_wake_up(phba);
3882 spin_unlock(&phba->hbalock);
3885 ha_copy &= ~(phba->work_ha_mask);
3888 * Process all events on FCP ring. Take the optimized path for
3889 * FCP IO. Any other IO is slow path and is handled by
3890 * the worker thread.
3892 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
3893 status >>= (4*LPFC_FCP_RING);
3894 if (status & HA_RXATT)
3895 lpfc_sli_handle_fast_ring_event(phba,
3896 &phba->sli.ring[LPFC_FCP_RING],
3899 if (phba->cfg_multi_ring_support == 2) {
3901 * Process all events on extra ring. Take the optimized path
3902 * for extra ring IO. Any other IO is slow path and is handled
3903 * by the worker thread.
3905 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
3906 status >>= (4*LPFC_EXTRA_RING);
3907 if (status & HA_RXATT) {
3908 lpfc_sli_handle_fast_ring_event(phba,
3909 &phba->sli.ring[LPFC_EXTRA_RING],
3915 } /* lpfc_intr_handler */