1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_vport.h"
40 #include "lpfc_debugfs.h"
42 /* AlpaArray for assignment of scsid for scan-down and bind_method */
43 static uint8_t lpfcAlpaArray[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
59 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
60 static void lpfc_disc_flush_list(struct lpfc_vport *vport);
63 lpfc_terminate_rport_io(struct fc_rport *rport)
65 struct lpfc_rport_data *rdata;
66 struct lpfc_nodelist * ndlp;
67 struct lpfc_hba *phba;
69 rdata = rport->dd_data;
73 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
74 printk(KERN_ERR "Cannot find remote node"
75 " to terminate I/O Data x%x\n",
80 phba = ndlp->vport->phba;
82 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
83 "rport terminate: sid:x%x did:x%x flg:x%x",
84 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
86 if (ndlp->nlp_sid != NLP_NO_SID) {
87 lpfc_sli_abort_iocb(ndlp->vport,
88 &phba->sli.ring[phba->sli.fcp_ring],
89 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
93 * A device is normally blocked for rediscovery and unblocked when
94 * devloss timeout happens. In case a vport is removed or driver
95 * unloaded before devloss timeout happens, we need to unblock here.
97 scsi_target_unblock(&rport->dev);
102 * This function will be called when dev_loss_tmo fire.
105 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
107 struct lpfc_rport_data *rdata;
108 struct lpfc_nodelist * ndlp;
109 struct lpfc_vport *vport;
110 struct lpfc_hba *phba;
111 struct lpfc_work_evt *evtp;
115 rdata = rport->dd_data;
123 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
124 "rport devlosscb: sid:x%x did:x%x flg:x%x",
125 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
127 /* Don't defer this if we are in the process of deleting the vport
128 * or unloading the driver. The unload will cleanup the node
129 * appropriately we just need to cleanup the ndlp rport info here.
131 if (vport->load_flag & FC_UNLOADING) {
132 put_node = rdata->pnode != NULL;
133 put_rport = ndlp->rport != NULL;
139 put_device(&rport->dev);
143 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
146 evtp = &ndlp->dev_loss_evt;
148 if (!list_empty(&evtp->evt_listp))
151 spin_lock_irq(&phba->hbalock);
152 evtp->evt_arg1 = ndlp;
153 evtp->evt = LPFC_EVT_DEV_LOSS;
154 list_add_tail(&evtp->evt_listp, &phba->work_list);
156 wake_up(phba->work_wait);
158 spin_unlock_irq(&phba->hbalock);
164 * This function is called from the worker thread when dev_loss_tmo
168 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
170 struct lpfc_rport_data *rdata;
171 struct fc_rport *rport;
172 struct lpfc_vport *vport;
173 struct lpfc_hba *phba;
184 rdata = rport->dd_data;
185 name = (uint8_t *) &ndlp->nlp_portname;
189 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
190 "rport devlosstmo:did:x%x type:x%x id:x%x",
191 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
193 /* Don't defer this if we are in the process of deleting the vport
194 * or unloading the driver. The unload will cleanup the node
195 * appropriately we just need to cleanup the ndlp rport info here.
197 if (vport->load_flag & FC_UNLOADING) {
198 if (ndlp->nlp_sid != NLP_NO_SID) {
199 /* flush the target */
200 lpfc_sli_abort_iocb(vport,
201 &phba->sli.ring[phba->sli.fcp_ring],
202 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
204 put_node = rdata->pnode != NULL;
205 put_rport = ndlp->rport != NULL;
211 put_device(&rport->dev);
215 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
218 if (ndlp->nlp_type & NLP_FABRIC) {
219 /* We will clean up these Nodes in linkup */
220 put_node = rdata->pnode != NULL;
221 put_rport = ndlp->rport != NULL;
227 put_device(&rport->dev);
231 if (ndlp->nlp_sid != NLP_NO_SID) {
233 /* flush the target */
234 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
235 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
237 if (vport->load_flag & FC_UNLOADING)
241 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
242 "0203 Devloss timeout on "
243 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
244 "NPort x%x Data: x%x x%x x%x\n",
245 *name, *(name+1), *(name+2), *(name+3),
246 *(name+4), *(name+5), *(name+6), *(name+7),
247 ndlp->nlp_DID, ndlp->nlp_flag,
248 ndlp->nlp_state, ndlp->nlp_rpi);
250 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
251 "0204 Devloss timeout on "
252 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
253 "NPort x%x Data: x%x x%x x%x\n",
254 *name, *(name+1), *(name+2), *(name+3),
255 *(name+4), *(name+5), *(name+6), *(name+7),
256 ndlp->nlp_DID, ndlp->nlp_flag,
257 ndlp->nlp_state, ndlp->nlp_rpi);
260 put_node = rdata->pnode != NULL;
261 put_rport = ndlp->rport != NULL;
267 put_device(&rport->dev);
269 if (!(vport->load_flag & FC_UNLOADING) &&
270 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
271 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
272 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) {
273 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
279 lpfc_worker_wake_up(struct lpfc_hba *phba)
281 wake_up(phba->work_wait);
286 lpfc_work_list_done(struct lpfc_hba *phba)
288 struct lpfc_work_evt *evtp = NULL;
289 struct lpfc_nodelist *ndlp;
292 spin_lock_irq(&phba->hbalock);
293 while (!list_empty(&phba->work_list)) {
294 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
296 spin_unlock_irq(&phba->hbalock);
299 case LPFC_EVT_ELS_RETRY:
300 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
301 lpfc_els_retry_delay_handler(ndlp);
302 free_evt = 0; /* evt is part of ndlp */
304 case LPFC_EVT_DEV_LOSS:
305 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
307 lpfc_dev_loss_tmo_handler(ndlp);
311 case LPFC_EVT_ONLINE:
312 if (phba->link_state < LPFC_LINK_DOWN)
313 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
315 *(int *) (evtp->evt_arg1) = 0;
316 complete((struct completion *)(evtp->evt_arg2));
318 case LPFC_EVT_OFFLINE_PREP:
319 if (phba->link_state >= LPFC_LINK_DOWN)
320 lpfc_offline_prep(phba);
321 *(int *)(evtp->evt_arg1) = 0;
322 complete((struct completion *)(evtp->evt_arg2));
324 case LPFC_EVT_OFFLINE:
326 lpfc_sli_brdrestart(phba);
327 *(int *)(evtp->evt_arg1) =
328 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
329 lpfc_unblock_mgmt_io(phba);
330 complete((struct completion *)(evtp->evt_arg2));
332 case LPFC_EVT_WARM_START:
334 lpfc_reset_barrier(phba);
335 lpfc_sli_brdreset(phba);
336 lpfc_hba_down_post(phba);
337 *(int *)(evtp->evt_arg1) =
338 lpfc_sli_brdready(phba, HS_MBRDY);
339 lpfc_unblock_mgmt_io(phba);
340 complete((struct completion *)(evtp->evt_arg2));
344 *(int *)(evtp->evt_arg1)
345 = (phba->pport->stopped)
346 ? 0 : lpfc_sli_brdkill(phba);
347 lpfc_unblock_mgmt_io(phba);
348 complete((struct completion *)(evtp->evt_arg2));
353 spin_lock_irq(&phba->hbalock);
355 spin_unlock_irq(&phba->hbalock);
360 lpfc_work_done(struct lpfc_hba *phba)
362 struct lpfc_sli_ring *pring;
363 uint32_t ha_copy, status, control, work_port_events;
364 struct lpfc_vport **vports;
365 struct lpfc_vport *vport;
368 spin_lock_irq(&phba->hbalock);
369 ha_copy = phba->work_ha;
371 spin_unlock_irq(&phba->hbalock);
373 if (ha_copy & HA_ERATT)
374 lpfc_handle_eratt(phba);
376 if (ha_copy & HA_MBATT)
377 lpfc_sli_handle_mb_event(phba);
379 if (ha_copy & HA_LATT)
380 lpfc_handle_latt(phba);
381 vports = lpfc_create_vport_work_array(phba);
383 for(i = 0; i <= phba->max_vpi; i++) {
385 * We could have no vports in array if unloading, so if
386 * this happens then just use the pport
388 if (vports[i] == NULL && i == 0)
394 work_port_events = vport->work_port_events;
395 if (work_port_events & WORKER_DISC_TMO)
396 lpfc_disc_timeout_handler(vport);
397 if (work_port_events & WORKER_ELS_TMO)
398 lpfc_els_timeout_handler(vport);
399 if (work_port_events & WORKER_HB_TMO)
400 lpfc_hb_timeout_handler(phba);
401 if (work_port_events & WORKER_MBOX_TMO)
402 lpfc_mbox_timeout_handler(phba);
403 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
404 lpfc_unblock_fabric_iocbs(phba);
405 if (work_port_events & WORKER_FDMI_TMO)
406 lpfc_fdmi_timeout_handler(vport);
407 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
408 lpfc_ramp_down_queue_handler(phba);
409 if (work_port_events & WORKER_RAMP_UP_QUEUE)
410 lpfc_ramp_up_queue_handler(phba);
411 spin_lock_irq(&vport->work_port_lock);
412 vport->work_port_events &= ~work_port_events;
413 spin_unlock_irq(&vport->work_port_lock);
415 lpfc_destroy_vport_work_array(phba, vports);
417 pring = &phba->sli.ring[LPFC_ELS_RING];
418 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
419 status >>= (4*LPFC_ELS_RING);
420 if ((status & HA_RXMASK)
421 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
422 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
423 pring->flag |= LPFC_DEFERRED_RING_EVENT;
425 lpfc_sli_handle_slow_ring_event(phba, pring,
428 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
431 * Turn on Ring interrupts
433 spin_lock_irq(&phba->hbalock);
434 control = readl(phba->HCregaddr);
435 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
436 lpfc_debugfs_slow_ring_trc(phba,
437 "WRK Enable ring: cntl:x%x hacopy:x%x",
438 control, ha_copy, 0);
440 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
441 writel(control, phba->HCregaddr);
442 readl(phba->HCregaddr); /* flush */
445 lpfc_debugfs_slow_ring_trc(phba,
446 "WRK Ring ok: cntl:x%x hacopy:x%x",
447 control, ha_copy, 0);
449 spin_unlock_irq(&phba->hbalock);
451 lpfc_work_list_done(phba);
455 check_work_wait_done(struct lpfc_hba *phba)
457 struct lpfc_vport *vport;
458 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
461 spin_lock_irq(&phba->hbalock);
462 list_for_each_entry(vport, &phba->port_list, listentry) {
463 if (vport->work_port_events) {
468 if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
469 kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
473 phba->work_found = 0;
474 spin_unlock_irq(&phba->hbalock);
480 lpfc_do_work(void *p)
482 struct lpfc_hba *phba = p;
484 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
486 set_user_nice(current, -20);
487 phba->work_wait = &work_waitq;
488 phba->work_found = 0;
492 rc = wait_event_interruptible(work_waitq,
493 check_work_wait_done(phba));
497 if (kthread_should_stop())
500 lpfc_work_done(phba);
502 /* If there is alot of slow ring work, like during link up
503 * check_work_wait_done() may cause this thread to not give
504 * up the CPU for very long periods of time. This may cause
505 * soft lockups or other problems. To avoid these situations
506 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
507 * consecutive iterations.
509 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
510 phba->work_found = 0;
514 phba->work_wait = NULL;
519 * This is only called to handle FC worker events. Since this a rare
520 * occurance, we allocate a struct lpfc_work_evt structure here instead of
521 * embedding it in the IOCB.
524 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
527 struct lpfc_work_evt *evtp;
531 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
532 * be queued to worker thread for processing
534 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
538 evtp->evt_arg1 = arg1;
539 evtp->evt_arg2 = arg2;
542 spin_lock_irqsave(&phba->hbalock, flags);
543 list_add_tail(&evtp->evt_listp, &phba->work_list);
545 lpfc_worker_wake_up(phba);
546 spin_unlock_irqrestore(&phba->hbalock, flags);
552 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
554 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
555 struct lpfc_hba *phba = vport->phba;
556 struct lpfc_nodelist *ndlp, *next_ndlp;
559 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
560 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
563 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
564 ((vport->port_type == LPFC_NPIV_PORT) &&
565 (ndlp->nlp_DID == NameServer_DID)))
566 lpfc_unreg_rpi(vport, ndlp);
568 /* Leave Fabric nodes alone on link down */
569 if (!remove && ndlp->nlp_type & NLP_FABRIC)
571 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
574 : NLP_EVT_DEVICE_RECOVERY);
576 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
577 lpfc_mbx_unreg_vpi(vport);
578 spin_lock_irq(shost->host_lock);
579 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
580 spin_unlock_irq(shost->host_lock);
585 lpfc_port_link_failure(struct lpfc_vport *vport)
587 /* Cleanup any outstanding RSCN activity */
588 lpfc_els_flush_rscn(vport);
590 /* Cleanup any outstanding ELS commands */
591 lpfc_els_flush_cmd(vport);
593 lpfc_cleanup_rpis(vport, 0);
595 /* Turn off discovery timer if its running */
596 lpfc_can_disctmo(vport);
600 lpfc_linkdown_port(struct lpfc_vport *vport)
602 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
604 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
606 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
607 "Link Down: state:x%x rtry:x%x flg:x%x",
608 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
610 lpfc_port_link_failure(vport);
615 lpfc_linkdown(struct lpfc_hba *phba)
617 struct lpfc_vport *vport = phba->pport;
618 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
619 struct lpfc_vport **vports;
623 if (phba->link_state == LPFC_LINK_DOWN) {
626 spin_lock_irq(&phba->hbalock);
627 if (phba->link_state > LPFC_LINK_DOWN) {
628 phba->link_state = LPFC_LINK_DOWN;
629 phba->pport->fc_flag &= ~FC_LBIT;
631 spin_unlock_irq(&phba->hbalock);
632 vports = lpfc_create_vport_work_array(phba);
634 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
635 /* Issue a LINK DOWN event to all nodes */
636 lpfc_linkdown_port(vports[i]);
638 lpfc_destroy_vport_work_array(phba, vports);
639 /* Clean up any firmware default rpi's */
640 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
642 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
644 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
645 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
646 == MBX_NOT_FINISHED) {
647 mempool_free(mb, phba->mbox_mem_pool);
651 /* Setup myDID for link up if we are in pt2pt mode */
652 if (phba->pport->fc_flag & FC_PT2PT) {
653 phba->pport->fc_myDID = 0;
654 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
656 lpfc_config_link(phba, mb);
657 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
659 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
660 == MBX_NOT_FINISHED) {
661 mempool_free(mb, phba->mbox_mem_pool);
664 spin_lock_irq(shost->host_lock);
665 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
666 spin_unlock_irq(shost->host_lock);
673 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
675 struct lpfc_nodelist *ndlp;
677 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
678 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
681 if (ndlp->nlp_type & NLP_FABRIC) {
682 /* On Linkup its safe to clean up the ndlp
683 * from Fabric connections.
685 if (ndlp->nlp_DID != Fabric_DID)
686 lpfc_unreg_rpi(vport, ndlp);
687 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
688 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
689 /* Fail outstanding IO now since device is
692 lpfc_unreg_rpi(vport, ndlp);
698 lpfc_linkup_port(struct lpfc_vport *vport)
700 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
701 struct lpfc_hba *phba = vport->phba;
703 if ((vport->load_flag & FC_UNLOADING) != 0)
706 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
707 "Link Up: top:x%x speed:x%x flg:x%x",
708 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
710 /* If NPIV is not enabled, only bring the physical port up */
711 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
712 (vport != phba->pport))
715 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
717 spin_lock_irq(shost->host_lock);
718 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
719 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
720 vport->fc_flag |= FC_NDISC_ACTIVE;
721 vport->fc_ns_retry = 0;
722 spin_unlock_irq(shost->host_lock);
724 if (vport->fc_flag & FC_LBIT)
725 lpfc_linkup_cleanup_nodes(vport);
730 lpfc_linkup(struct lpfc_hba *phba)
732 struct lpfc_vport **vports;
735 phba->link_state = LPFC_LINK_UP;
737 /* Unblock fabric iocbs if they are blocked */
738 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
739 del_timer_sync(&phba->fabric_block_timer);
741 vports = lpfc_create_vport_work_array(phba);
743 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
744 lpfc_linkup_port(vports[i]);
745 lpfc_destroy_vport_work_array(phba, vports);
746 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
747 lpfc_issue_clear_la(phba, phba->pport);
753 * This routine handles processing a CLEAR_LA mailbox
754 * command upon completion. It is setup in the LPFC_MBOXQ
755 * as the completion routine when the command is
756 * handed off to the SLI layer.
759 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
761 struct lpfc_vport *vport = pmb->vport;
762 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
763 struct lpfc_sli *psli = &phba->sli;
764 MAILBOX_t *mb = &pmb->mb;
767 /* Since we don't do discovery right now, turn these off here */
768 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
769 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
770 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
772 /* Check for error */
773 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
774 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
775 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
776 "0320 CLEAR_LA mbxStatus error x%x hba "
778 mb->mbxStatus, vport->port_state);
779 phba->link_state = LPFC_HBA_ERROR;
783 if (vport->port_type == LPFC_PHYSICAL_PORT)
784 phba->link_state = LPFC_HBA_READY;
786 spin_lock_irq(&phba->hbalock);
787 psli->sli_flag |= LPFC_PROCESS_LA;
788 control = readl(phba->HCregaddr);
789 control |= HC_LAINT_ENA;
790 writel(control, phba->HCregaddr);
791 readl(phba->HCregaddr); /* flush */
792 spin_unlock_irq(&phba->hbalock);
795 vport->num_disc_nodes = 0;
796 /* go thru NPR nodes and issue ELS PLOGIs */
797 if (vport->fc_npr_cnt)
798 lpfc_els_disc_plogi(vport);
800 if (!vport->num_disc_nodes) {
801 spin_lock_irq(shost->host_lock);
802 vport->fc_flag &= ~FC_NDISC_ACTIVE;
803 spin_unlock_irq(shost->host_lock);
806 vport->port_state = LPFC_VPORT_READY;
809 /* Device Discovery completes */
810 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
811 "0225 Device Discovery completes\n");
812 mempool_free(pmb, phba->mbox_mem_pool);
814 spin_lock_irq(shost->host_lock);
815 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
816 spin_unlock_irq(shost->host_lock);
818 del_timer_sync(&phba->fc_estabtmo);
820 lpfc_can_disctmo(vport);
822 /* turn on Link Attention interrupts */
824 spin_lock_irq(&phba->hbalock);
825 psli->sli_flag |= LPFC_PROCESS_LA;
826 control = readl(phba->HCregaddr);
827 control |= HC_LAINT_ENA;
828 writel(control, phba->HCregaddr);
829 readl(phba->HCregaddr); /* flush */
830 spin_unlock_irq(&phba->hbalock);
837 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
839 struct lpfc_vport *vport = pmb->vport;
841 if (pmb->mb.mbxStatus)
844 mempool_free(pmb, phba->mbox_mem_pool);
846 if (phba->fc_topology == TOPOLOGY_LOOP &&
847 vport->fc_flag & FC_PUBLIC_LOOP &&
848 !(vport->fc_flag & FC_LBIT)) {
849 /* Need to wait for FAN - use discovery timer
850 * for timeout. port_state is identically
851 * LPFC_LOCAL_CFG_LINK while waiting for FAN
853 lpfc_set_disctmo(vport);
857 /* Start discovery by sending a FLOGI. port_state is identically
858 * LPFC_FLOGI while waiting for FLOGI cmpl
860 if (vport->port_state != LPFC_FLOGI) {
861 lpfc_initial_flogi(vport);
866 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
867 "0306 CONFIG_LINK mbxStatus error x%x "
869 pmb->mb.mbxStatus, vport->port_state);
870 mempool_free(pmb, phba->mbox_mem_pool);
874 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
875 "0200 CONFIG_LINK bad hba state x%x\n",
878 lpfc_issue_clear_la(phba, vport);
883 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
885 MAILBOX_t *mb = &pmb->mb;
886 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
887 struct lpfc_vport *vport = pmb->vport;
890 /* Check for error */
892 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
893 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
894 "0319 READ_SPARAM mbxStatus error x%x "
896 mb->mbxStatus, vport->port_state);
901 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
902 sizeof (struct serv_parm));
903 if (phba->cfg_soft_wwnn)
904 u64_to_wwn(phba->cfg_soft_wwnn,
905 vport->fc_sparam.nodeName.u.wwn);
906 if (phba->cfg_soft_wwpn)
907 u64_to_wwn(phba->cfg_soft_wwpn,
908 vport->fc_sparam.portName.u.wwn);
909 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
910 sizeof(vport->fc_nodename));
911 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
912 sizeof(vport->fc_portname));
913 if (vport->port_type == LPFC_PHYSICAL_PORT) {
914 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
915 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
918 lpfc_mbuf_free(phba, mp->virt, mp->phys);
920 mempool_free(pmb, phba->mbox_mem_pool);
924 pmb->context1 = NULL;
925 lpfc_mbuf_free(phba, mp->virt, mp->phys);
927 lpfc_issue_clear_la(phba, vport);
928 mempool_free(pmb, phba->mbox_mem_pool);
933 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
935 struct lpfc_vport *vport = phba->pport;
936 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
938 struct lpfc_dmabuf *mp;
941 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
942 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
944 spin_lock_irq(&phba->hbalock);
945 switch (la->UlnkSpeed) {
947 phba->fc_linkspeed = LA_1GHZ_LINK;
950 phba->fc_linkspeed = LA_2GHZ_LINK;
953 phba->fc_linkspeed = LA_4GHZ_LINK;
956 phba->fc_linkspeed = LA_8GHZ_LINK;
959 phba->fc_linkspeed = LA_UNKNW_LINK;
963 phba->fc_topology = la->topology;
964 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
966 if (phba->fc_topology == TOPOLOGY_LOOP) {
967 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
969 /* Get Loop Map information */
971 vport->fc_flag |= FC_LBIT;
973 vport->fc_myDID = la->granted_AL_PA;
974 i = la->un.lilpBde64.tus.f.bdeSize;
977 phba->alpa_map[0] = 0;
979 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
990 numalpa = phba->alpa_map[0];
992 while (j < numalpa) {
993 memset(un.pamap, 0, 16);
994 for (k = 1; j < numalpa; k++) {
996 phba->alpa_map[j + 1];
1001 /* Link Up Event ALPA map */
1002 lpfc_printf_log(phba,
1005 "1304 Link Up Event "
1006 "ALPA map Data: x%x "
1008 un.pa.wd1, un.pa.wd2,
1009 un.pa.wd3, un.pa.wd4);
1014 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
1015 if (phba->max_vpi && phba->cfg_enable_npiv &&
1016 (phba->sli_rev == 3))
1017 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
1019 vport->fc_myDID = phba->fc_pref_DID;
1020 vport->fc_flag |= FC_LBIT;
1022 spin_unlock_irq(&phba->hbalock);
1026 lpfc_read_sparam(phba, sparam_mbox, 0);
1027 sparam_mbox->vport = vport;
1028 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1029 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
1030 if (rc == MBX_NOT_FINISHED) {
1031 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1032 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1034 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1036 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1042 vport->port_state = LPFC_LOCAL_CFG_LINK;
1043 lpfc_config_link(phba, cfglink_mbox);
1044 cfglink_mbox->vport = vport;
1045 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1046 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1047 if (rc != MBX_NOT_FINISHED)
1049 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1052 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1053 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1054 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1055 vport->port_state, sparam_mbox, cfglink_mbox);
1056 lpfc_issue_clear_la(phba, vport);
1061 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1064 struct lpfc_sli *psli = &phba->sli;
1066 lpfc_linkdown(phba);
1068 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1069 spin_lock_irq(&phba->hbalock);
1070 psli->sli_flag |= LPFC_PROCESS_LA;
1071 control = readl(phba->HCregaddr);
1072 control |= HC_LAINT_ENA;
1073 writel(control, phba->HCregaddr);
1074 readl(phba->HCregaddr); /* flush */
1075 spin_unlock_irq(&phba->hbalock);
1079 * This routine handles processing a READ_LA mailbox
1080 * command upon completion. It is setup in the LPFC_MBOXQ
1081 * as the completion routine when the command is
1082 * handed off to the SLI layer.
1085 lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1087 struct lpfc_vport *vport = pmb->vport;
1088 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1090 MAILBOX_t *mb = &pmb->mb;
1091 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1093 /* Check for error */
1094 if (mb->mbxStatus) {
1095 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1096 "1307 READ_LA mbox error x%x state x%x\n",
1097 mb->mbxStatus, vport->port_state);
1098 lpfc_mbx_issue_link_down(phba);
1099 phba->link_state = LPFC_HBA_ERROR;
1100 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1103 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
1105 memcpy(&phba->alpa_map[0], mp->virt, 128);
1107 spin_lock_irq(shost->host_lock);
1109 vport->fc_flag |= FC_BYPASSED_MODE;
1111 vport->fc_flag &= ~FC_BYPASSED_MODE;
1112 spin_unlock_irq(shost->host_lock);
1114 if (((phba->fc_eventTag + 1) < la->eventTag) ||
1115 (phba->fc_eventTag == la->eventTag)) {
1116 phba->fc_stat.LinkMultiEvent++;
1117 if (la->attType == AT_LINK_UP)
1118 if (phba->fc_eventTag != 0)
1119 lpfc_linkdown(phba);
1122 phba->fc_eventTag = la->eventTag;
1124 if (la->attType == AT_LINK_UP) {
1125 phba->fc_stat.LinkUp++;
1126 if (phba->link_flag & LS_LOOPBACK_MODE) {
1127 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1128 "1306 Link Up Event in loop back mode "
1129 "x%x received Data: x%x x%x x%x x%x\n",
1130 la->eventTag, phba->fc_eventTag,
1131 la->granted_AL_PA, la->UlnkSpeed,
1134 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1135 "1303 Link Up Event x%x received "
1136 "Data: x%x x%x x%x x%x\n",
1137 la->eventTag, phba->fc_eventTag,
1138 la->granted_AL_PA, la->UlnkSpeed,
1141 lpfc_mbx_process_link_up(phba, la);
1143 phba->fc_stat.LinkDown++;
1144 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1145 "1305 Link Down Event x%x received "
1146 "Data: x%x x%x x%x\n",
1147 la->eventTag, phba->fc_eventTag,
1148 phba->pport->port_state, vport->fc_flag);
1149 lpfc_mbx_issue_link_down(phba);
1152 lpfc_mbx_cmpl_read_la_free_mbuf:
1153 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1155 mempool_free(pmb, phba->mbox_mem_pool);
1160 * This routine handles processing a REG_LOGIN mailbox
1161 * command upon completion. It is setup in the LPFC_MBOXQ
1162 * as the completion routine when the command is
1163 * handed off to the SLI layer.
1166 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1168 struct lpfc_vport *vport = pmb->vport;
1169 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1170 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1172 pmb->context1 = NULL;
1174 /* Good status, call state machine */
1175 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
1176 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1178 mempool_free(pmb, phba->mbox_mem_pool);
1185 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1187 MAILBOX_t *mb = &pmb->mb;
1188 struct lpfc_vport *vport = pmb->vport;
1189 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1191 switch (mb->mbxStatus) {
1195 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1196 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
1200 vport->unreg_vpi_cmpl = VPORT_OK;
1201 mempool_free(pmb, phba->mbox_mem_pool);
1203 * This shost reference might have been taken at the beginning of
1204 * lpfc_vport_delete()
1206 if (vport->load_flag & FC_UNLOADING)
1207 scsi_host_put(shost);
1211 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1213 struct lpfc_hba *phba = vport->phba;
1217 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1221 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1222 mbox->vport = vport;
1223 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1224 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1225 if (rc == MBX_NOT_FINISHED) {
1226 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1227 "1800 Could not issue unreg_vpi\n");
1228 mempool_free(mbox, phba->mbox_mem_pool);
1229 vport->unreg_vpi_cmpl = VPORT_ERROR;
1234 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1236 struct lpfc_vport *vport = pmb->vport;
1237 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1238 MAILBOX_t *mb = &pmb->mb;
1240 switch (mb->mbxStatus) {
1244 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1245 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
1247 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1248 spin_lock_irq(shost->host_lock);
1249 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1250 spin_unlock_irq(shost->host_lock);
1251 vport->fc_myDID = 0;
1255 vport->num_disc_nodes = 0;
1256 /* go thru NPR list and issue ELS PLOGIs */
1257 if (vport->fc_npr_cnt)
1258 lpfc_els_disc_plogi(vport);
1260 if (!vport->num_disc_nodes) {
1261 spin_lock_irq(shost->host_lock);
1262 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1263 spin_unlock_irq(shost->host_lock);
1264 lpfc_can_disctmo(vport);
1266 vport->port_state = LPFC_VPORT_READY;
1269 mempool_free(pmb, phba->mbox_mem_pool);
1274 * This routine handles processing a Fabric REG_LOGIN mailbox
1275 * command upon completion. It is setup in the LPFC_MBOXQ
1276 * as the completion routine when the command is
1277 * handed off to the SLI layer.
1280 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1282 struct lpfc_vport *vport = pmb->vport;
1283 MAILBOX_t *mb = &pmb->mb;
1284 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1285 struct lpfc_nodelist *ndlp;
1286 struct lpfc_vport **vports;
1289 ndlp = (struct lpfc_nodelist *) pmb->context2;
1290 pmb->context1 = NULL;
1291 pmb->context2 = NULL;
1292 if (mb->mbxStatus) {
1293 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1295 mempool_free(pmb, phba->mbox_mem_pool);
1298 if (phba->fc_topology == TOPOLOGY_LOOP) {
1299 /* FLOGI failed, use loop map to make discovery list */
1300 lpfc_disc_list_loopmap(vport);
1302 /* Start discovery */
1303 lpfc_disc_start(vport);
1307 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1308 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1309 "0258 Register Fabric login error: 0x%x\n",
1314 ndlp->nlp_rpi = mb->un.varWords[0];
1315 ndlp->nlp_type |= NLP_FABRIC;
1316 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1318 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
1320 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1321 vports = lpfc_create_vport_work_array(phba);
1324 i <= phba->max_vpi && vports[i] != NULL;
1326 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1328 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1329 lpfc_initial_fdisc(vports[i]);
1330 else if (phba->sli3_options &
1331 LPFC_SLI3_NPIV_ENABLED) {
1332 lpfc_vport_set_state(vports[i],
1333 FC_VPORT_NO_FABRIC_SUPP);
1334 lpfc_printf_vlog(vport, KERN_ERR,
1337 "Fabric support\n");
1340 lpfc_destroy_vport_work_array(phba, vports);
1341 lpfc_do_scr_ns_plogi(phba, vport);
1344 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1346 mempool_free(pmb, phba->mbox_mem_pool);
1351 * This routine handles processing a NameServer REG_LOGIN mailbox
1352 * command upon completion. It is setup in the LPFC_MBOXQ
1353 * as the completion routine when the command is
1354 * handed off to the SLI layer.
1357 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1359 MAILBOX_t *mb = &pmb->mb;
1360 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1361 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1362 struct lpfc_vport *vport = pmb->vport;
1364 if (mb->mbxStatus) {
1367 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1369 mempool_free(pmb, phba->mbox_mem_pool);
1371 /* If no other thread is using the ndlp, free it */
1372 lpfc_nlp_not_used(ndlp);
1374 if (phba->fc_topology == TOPOLOGY_LOOP) {
1376 * RegLogin failed, use loop map to make discovery
1379 lpfc_disc_list_loopmap(vport);
1381 /* Start discovery */
1382 lpfc_disc_start(vport);
1385 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1386 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1387 "0260 Register NameServer error: 0x%x\n",
1392 pmb->context1 = NULL;
1394 ndlp->nlp_rpi = mb->un.varWords[0];
1395 ndlp->nlp_type |= NLP_FABRIC;
1396 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1398 if (vport->port_state < LPFC_VPORT_READY) {
1399 /* Link up discovery requires Fabric registration. */
1400 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1401 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1402 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1403 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1404 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1406 /* Issue SCR just before NameServer GID_FT Query */
1407 lpfc_issue_els_scr(vport, SCR_DID, 0);
1410 vport->fc_ns_retry = 0;
1411 /* Good status, issue CT Request to NameServer */
1412 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1413 /* Cannot issue NameServer Query, so finish up discovery */
1418 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1420 mempool_free(pmb, phba->mbox_mem_pool);
1426 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1428 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1429 struct fc_rport *rport;
1430 struct lpfc_rport_data *rdata;
1431 struct fc_rport_identifiers rport_ids;
1432 struct lpfc_hba *phba = vport->phba;
1434 /* Remote port has reappeared. Re-register w/ FC transport */
1435 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1436 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1437 rport_ids.port_id = ndlp->nlp_DID;
1438 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1441 * We leave our node pointer in rport->dd_data when we unregister a
1442 * FCP target port. But fc_remote_port_add zeros the space to which
1443 * rport->dd_data points. So, if we're reusing a previously
1444 * registered port, drop the reference that we took the last time we
1445 * registered the port.
1447 if (ndlp->rport && ndlp->rport->dd_data &&
1448 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1452 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1453 "rport add: did:x%x flg:x%x type x%x",
1454 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1456 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
1457 if (!rport || !get_device(&rport->dev)) {
1458 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1459 "Warning: fc_remote_port_add failed\n");
1463 /* initialize static port data */
1464 rport->maxframe_size = ndlp->nlp_maxframe;
1465 rport->supported_classes = ndlp->nlp_class_sup;
1466 rdata = rport->dd_data;
1467 rdata->pnode = lpfc_nlp_get(ndlp);
1469 if (ndlp->nlp_type & NLP_FCP_TARGET)
1470 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1471 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1472 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1475 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1476 fc_remote_port_rolechg(rport, rport_ids.roles);
1478 if ((rport->scsi_target_id != -1) &&
1479 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1480 ndlp->nlp_sid = rport->scsi_target_id;
1486 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1488 struct fc_rport *rport = ndlp->rport;
1490 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1491 "rport delete: did:x%x flg:x%x type x%x",
1492 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1494 fc_remote_port_delete(rport);
1500 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
1502 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1504 spin_lock_irq(shost->host_lock);
1506 case NLP_STE_UNUSED_NODE:
1507 vport->fc_unused_cnt += count;
1509 case NLP_STE_PLOGI_ISSUE:
1510 vport->fc_plogi_cnt += count;
1512 case NLP_STE_ADISC_ISSUE:
1513 vport->fc_adisc_cnt += count;
1515 case NLP_STE_REG_LOGIN_ISSUE:
1516 vport->fc_reglogin_cnt += count;
1518 case NLP_STE_PRLI_ISSUE:
1519 vport->fc_prli_cnt += count;
1521 case NLP_STE_UNMAPPED_NODE:
1522 vport->fc_unmap_cnt += count;
1524 case NLP_STE_MAPPED_NODE:
1525 vport->fc_map_cnt += count;
1527 case NLP_STE_NPR_NODE:
1528 vport->fc_npr_cnt += count;
1531 spin_unlock_irq(shost->host_lock);
1535 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1536 int old_state, int new_state)
1538 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1540 if (new_state == NLP_STE_UNMAPPED_NODE) {
1541 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1542 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1543 ndlp->nlp_type |= NLP_FC_NODE;
1545 if (new_state == NLP_STE_MAPPED_NODE)
1546 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1547 if (new_state == NLP_STE_NPR_NODE)
1548 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1550 /* Transport interface */
1551 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1552 old_state == NLP_STE_UNMAPPED_NODE)) {
1553 vport->phba->nport_event_cnt++;
1554 lpfc_unregister_remote_port(ndlp);
1557 if (new_state == NLP_STE_MAPPED_NODE ||
1558 new_state == NLP_STE_UNMAPPED_NODE) {
1559 vport->phba->nport_event_cnt++;
1561 * Tell the fc transport about the port, if we haven't
1562 * already. If we have, and it's a scsi entity, be
1563 * sure to unblock any attached scsi devices
1565 lpfc_register_remote_port(vport, ndlp);
1568 * if we added to Mapped list, but the remote port
1569 * registration failed or assigned a target id outside
1570 * our presentable range - move the node to the
1573 if (new_state == NLP_STE_MAPPED_NODE &&
1575 ndlp->rport->scsi_target_id == -1 ||
1576 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1577 spin_lock_irq(shost->host_lock);
1578 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1579 spin_unlock_irq(shost->host_lock);
1580 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1585 lpfc_nlp_state_name(char *buffer, size_t size, int state)
1587 static char *states[] = {
1588 [NLP_STE_UNUSED_NODE] = "UNUSED",
1589 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1590 [NLP_STE_ADISC_ISSUE] = "ADISC",
1591 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1592 [NLP_STE_PRLI_ISSUE] = "PRLI",
1593 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1594 [NLP_STE_MAPPED_NODE] = "MAPPED",
1595 [NLP_STE_NPR_NODE] = "NPR",
1598 if (state < NLP_STE_MAX_STATE && states[state])
1599 strlcpy(buffer, states[state], size);
1601 snprintf(buffer, size, "unknown (%d)", state);
1606 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1609 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1610 int old_state = ndlp->nlp_state;
1611 char name1[16], name2[16];
1613 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1614 "0904 NPort state transition x%06x, %s -> %s\n",
1616 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1617 lpfc_nlp_state_name(name2, sizeof(name2), state));
1619 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1620 "node statechg did:x%x old:%d ste:%d",
1621 ndlp->nlp_DID, old_state, state);
1623 if (old_state == NLP_STE_NPR_NODE &&
1624 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1625 state != NLP_STE_NPR_NODE)
1626 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1627 if (old_state == NLP_STE_UNMAPPED_NODE) {
1628 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1629 ndlp->nlp_type &= ~NLP_FC_NODE;
1632 if (list_empty(&ndlp->nlp_listp)) {
1633 spin_lock_irq(shost->host_lock);
1634 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1635 spin_unlock_irq(shost->host_lock);
1636 } else if (old_state)
1637 lpfc_nlp_counters(vport, old_state, -1);
1639 ndlp->nlp_state = state;
1640 lpfc_nlp_counters(vport, state, 1);
1641 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
1645 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1647 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1649 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1650 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1651 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1652 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1653 spin_lock_irq(shost->host_lock);
1654 list_del_init(&ndlp->nlp_listp);
1655 spin_unlock_irq(shost->host_lock);
1656 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1657 NLP_STE_UNUSED_NODE);
1661 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1664 * Use of lpfc_drop_node and UNUSED list. lpfc_drop_node should
1665 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
1666 * the ndlp from the vport. The ndlp resides on the UNUSED list
1667 * until ALL other outstanding threads have completed. Thus, if a
1668 * ndlp is on the UNUSED list already, we should never do another
1669 * lpfc_drop_node() on it.
1671 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1677 * Start / ReStart rescue timer for Discovery / RSCN handling
1680 lpfc_set_disctmo(struct lpfc_vport *vport)
1682 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1683 struct lpfc_hba *phba = vport->phba;
1686 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
1687 /* For FAN, timeout should be greater then edtov */
1688 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1690 /* Normal discovery timeout should be > then ELS/CT timeout
1691 * FC spec states we need 3 * ratov for CT requests
1693 tmo = ((phba->fc_ratov * 3) + 3);
1697 if (!timer_pending(&vport->fc_disctmo)) {
1698 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1699 "set disc timer: tmo:x%x state:x%x flg:x%x",
1700 tmo, vport->port_state, vport->fc_flag);
1703 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1704 spin_lock_irq(shost->host_lock);
1705 vport->fc_flag |= FC_DISC_TMO;
1706 spin_unlock_irq(shost->host_lock);
1708 /* Start Discovery Timer state <hba_state> */
1709 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1710 "0247 Start Discovery Timer state x%x "
1711 "Data: x%x x%lx x%x x%x\n",
1712 vport->port_state, tmo,
1713 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1714 vport->fc_adisc_cnt);
1720 * Cancel rescue timer for Discovery / RSCN handling
1723 lpfc_can_disctmo(struct lpfc_vport *vport)
1725 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1726 unsigned long iflags;
1728 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1729 "can disc timer: state:x%x rtry:x%x flg:x%x",
1730 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1732 /* Turn off discovery timer if its running */
1733 if (vport->fc_flag & FC_DISC_TMO) {
1734 spin_lock_irqsave(shost->host_lock, iflags);
1735 vport->fc_flag &= ~FC_DISC_TMO;
1736 spin_unlock_irqrestore(shost->host_lock, iflags);
1737 del_timer_sync(&vport->fc_disctmo);
1738 spin_lock_irqsave(&vport->work_port_lock, iflags);
1739 vport->work_port_events &= ~WORKER_DISC_TMO;
1740 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
1743 /* Cancel Discovery Timer state <hba_state> */
1744 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1745 "0248 Cancel Discovery Timer state x%x "
1746 "Data: x%x x%x x%x\n",
1747 vport->port_state, vport->fc_flag,
1748 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
1753 * Check specified ring for outstanding IOCB on the SLI queue
1754 * Return true if iocb matches the specified nport
1757 lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1758 struct lpfc_sli_ring *pring,
1759 struct lpfc_iocbq *iocb,
1760 struct lpfc_nodelist *ndlp)
1762 struct lpfc_sli *psli = &phba->sli;
1763 IOCB_t *icmd = &iocb->iocb;
1764 struct lpfc_vport *vport = ndlp->vport;
1766 if (iocb->vport != vport)
1769 if (pring->ringno == LPFC_ELS_RING) {
1770 switch (icmd->ulpCommand) {
1771 case CMD_GEN_REQUEST64_CR:
1772 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1774 case CMD_ELS_REQUEST64_CR:
1775 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1777 case CMD_XMIT_ELS_RSP64_CX:
1778 if (iocb->context1 == (uint8_t *) ndlp)
1781 } else if (pring->ringno == psli->extra_ring) {
1783 } else if (pring->ringno == psli->fcp_ring) {
1784 /* Skip match check if waiting to relogin to FCP target */
1785 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1786 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1789 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1792 } else if (pring->ringno == psli->next_ring) {
1799 * Free resources / clean up outstanding I/Os
1800 * associated with nlp_rpi in the LPFC_NODELIST entry.
1803 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1805 LIST_HEAD(completions);
1806 struct lpfc_sli *psli;
1807 struct lpfc_sli_ring *pring;
1808 struct lpfc_iocbq *iocb, *next_iocb;
1812 lpfc_fabric_abort_nport(ndlp);
1815 * Everything that matches on txcmplq will be returned
1816 * by firmware with a no rpi error.
1819 rpi = ndlp->nlp_rpi;
1821 /* Now process each ring */
1822 for (i = 0; i < psli->num_rings; i++) {
1823 pring = &psli->ring[i];
1825 spin_lock_irq(&phba->hbalock);
1826 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1829 * Check to see if iocb matches the nport we are
1832 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1834 /* It matches, so deque and call compl
1836 list_move_tail(&iocb->list,
1841 spin_unlock_irq(&phba->hbalock);
1845 while (!list_empty(&completions)) {
1846 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1847 list_del_init(&iocb->list);
1849 if (!iocb->iocb_cmpl)
1850 lpfc_sli_release_iocbq(phba, iocb);
1853 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1854 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1855 (iocb->iocb_cmpl)(phba, iocb, iocb);
1863 * Free rpi associated with LPFC_NODELIST entry.
1864 * This routine is called from lpfc_freenode(), when we are removing
1865 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1866 * LOGO that completes successfully, and we are waiting to PLOGI back
1867 * to the remote NPort. In addition, it is called after we receive
1868 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1869 * we are waiting to PLOGI back to the remote NPort.
1872 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1874 struct lpfc_hba *phba = vport->phba;
1878 if (ndlp->nlp_rpi) {
1879 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1881 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1882 mbox->vport = vport;
1883 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1884 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1885 if (rc == MBX_NOT_FINISHED)
1886 mempool_free(mbox, phba->mbox_mem_pool);
1888 lpfc_no_rpi(phba, ndlp);
1896 lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1898 struct lpfc_hba *phba = vport->phba;
1902 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1904 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1905 mbox->vport = vport;
1906 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1907 mbox->context1 = NULL;
1908 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1909 if (rc == MBX_NOT_FINISHED) {
1910 mempool_free(mbox, phba->mbox_mem_pool);
1916 lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1918 struct lpfc_hba *phba = vport->phba;
1922 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1924 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1925 mbox->vport = vport;
1926 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1927 mbox->context1 = NULL;
1928 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
1929 if (rc == MBX_NOT_FINISHED) {
1930 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1931 "1815 Could not issue "
1932 "unreg_did (default rpis)\n");
1933 mempool_free(mbox, phba->mbox_mem_pool);
1939 * Free resources associated with LPFC_NODELIST entry
1940 * so it can be freed.
1943 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1945 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1946 struct lpfc_hba *phba = vport->phba;
1947 LPFC_MBOXQ_t *mb, *nextmb;
1948 struct lpfc_dmabuf *mp;
1950 /* Cleanup node for NPort <nlp_DID> */
1951 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1952 "0900 Cleanup node for NPort x%x "
1953 "Data: x%x x%x x%x\n",
1954 ndlp->nlp_DID, ndlp->nlp_flag,
1955 ndlp->nlp_state, ndlp->nlp_rpi);
1956 lpfc_dequeue_node(vport, ndlp);
1958 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1959 if ((mb = phba->sli.mbox_active)) {
1960 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1961 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1962 mb->context2 = NULL;
1963 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1967 spin_lock_irq(&phba->hbalock);
1968 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1969 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1970 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1971 mp = (struct lpfc_dmabuf *) (mb->context1);
1973 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1976 list_del(&mb->list);
1977 mempool_free(mb, phba->mbox_mem_pool);
1981 spin_unlock_irq(&phba->hbalock);
1983 lpfc_els_abort(phba,ndlp);
1984 spin_lock_irq(shost->host_lock);
1985 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1986 spin_unlock_irq(shost->host_lock);
1988 ndlp->nlp_last_elscmd = 0;
1989 del_timer_sync(&ndlp->nlp_delayfunc);
1991 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1992 list_del_init(&ndlp->els_retry_evt.evt_listp);
1993 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
1994 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1996 lpfc_unreg_rpi(vport, ndlp);
2002 * Check to see if we can free the nlp back to the freelist.
2003 * If we are in the middle of using the nlp in the discovery state
2004 * machine, defer the free till we reach the end of the state machine.
2007 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2009 struct lpfc_hba *phba = vport->phba;
2010 struct lpfc_rport_data *rdata;
2014 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
2015 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2018 if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
2019 /* For this case we need to cleanup the default rpi
2020 * allocated by the firmware.
2022 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
2024 rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID,
2025 (uint8_t *) &vport->fc_sparam, mbox, 0);
2027 mempool_free(mbox, phba->mbox_mem_pool);
2030 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
2031 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
2032 mbox->vport = vport;
2033 mbox->context2 = NULL;
2034 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2035 if (rc == MBX_NOT_FINISHED) {
2036 mempool_free(mbox, phba->mbox_mem_pool);
2042 lpfc_cleanup_node(vport, ndlp);
2045 * We can get here with a non-NULL ndlp->rport because when we
2046 * unregister a rport we don't break the rport/node linkage. So if we
2047 * do, make sure we don't leaving any dangling pointers behind.
2050 rdata = ndlp->rport->dd_data;
2051 rdata->pnode = NULL;
2057 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2060 D_ID mydid, ndlpdid, matchdid;
2062 if (did == Bcast_DID)
2065 if (ndlp->nlp_DID == 0) {
2069 /* First check for Direct match */
2070 if (ndlp->nlp_DID == did)
2073 /* Next check for area/domain identically equals 0 match */
2074 mydid.un.word = vport->fc_myDID;
2075 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2079 matchdid.un.word = did;
2080 ndlpdid.un.word = ndlp->nlp_DID;
2081 if (matchdid.un.b.id == ndlpdid.un.b.id) {
2082 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
2083 (mydid.un.b.area == matchdid.un.b.area)) {
2084 if ((ndlpdid.un.b.domain == 0) &&
2085 (ndlpdid.un.b.area == 0)) {
2086 if (ndlpdid.un.b.id)
2092 matchdid.un.word = ndlp->nlp_DID;
2093 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
2094 (mydid.un.b.area == ndlpdid.un.b.area)) {
2095 if ((matchdid.un.b.domain == 0) &&
2096 (matchdid.un.b.area == 0)) {
2097 if (matchdid.un.b.id)
2105 /* Search for a nodelist entry */
2106 static struct lpfc_nodelist *
2107 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2109 struct lpfc_nodelist *ndlp;
2112 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2113 if (lpfc_matchdid(vport, ndlp, did)) {
2114 data1 = (((uint32_t) ndlp->nlp_state << 24) |
2115 ((uint32_t) ndlp->nlp_xri << 16) |
2116 ((uint32_t) ndlp->nlp_type << 8) |
2117 ((uint32_t) ndlp->nlp_rpi & 0xff));
2118 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2119 "0929 FIND node DID "
2120 "Data: x%p x%x x%x x%x\n",
2121 ndlp, ndlp->nlp_DID,
2122 ndlp->nlp_flag, data1);
2127 /* FIND node did <did> NOT FOUND */
2128 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2129 "0932 FIND node did x%x NOT FOUND.\n", did);
2133 struct lpfc_nodelist *
2134 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2136 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2137 struct lpfc_nodelist *ndlp;
2139 spin_lock_irq(shost->host_lock);
2140 ndlp = __lpfc_findnode_did(vport, did);
2141 spin_unlock_irq(shost->host_lock);
2145 struct lpfc_nodelist *
2146 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2148 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2149 struct lpfc_nodelist *ndlp;
2151 ndlp = lpfc_findnode_did(vport, did);
2153 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
2154 lpfc_rscn_payload_check(vport, did) == 0)
2156 ndlp = (struct lpfc_nodelist *)
2157 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
2160 lpfc_nlp_init(vport, ndlp, did);
2161 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2162 spin_lock_irq(shost->host_lock);
2163 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2164 spin_unlock_irq(shost->host_lock);
2167 if (vport->fc_flag & FC_RSCN_MODE) {
2168 if (lpfc_rscn_payload_check(vport, did)) {
2169 /* If we've already recieved a PLOGI from this NPort
2170 * we don't need to try to discover it again.
2172 if (ndlp->nlp_flag & NLP_RCV_PLOGI)
2175 spin_lock_irq(shost->host_lock);
2176 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2177 spin_unlock_irq(shost->host_lock);
2179 /* Since this node is marked for discovery,
2180 * delay timeout is not needed.
2182 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2183 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2187 /* If we've already recieved a PLOGI from this NPort,
2188 * or we are already in the process of discovery on it,
2189 * we don't need to try to discover it again.
2191 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
2192 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2193 ndlp->nlp_flag & NLP_RCV_PLOGI)
2195 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2196 spin_lock_irq(shost->host_lock);
2197 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2198 spin_unlock_irq(shost->host_lock);
2203 /* Build a list of nodes to discover based on the loopmap */
2205 lpfc_disc_list_loopmap(struct lpfc_vport *vport)
2207 struct lpfc_hba *phba = vport->phba;
2209 uint32_t alpa, index;
2211 if (!lpfc_is_link_up(phba))
2214 if (phba->fc_topology != TOPOLOGY_LOOP)
2217 /* Check for loop map present or not */
2218 if (phba->alpa_map[0]) {
2219 for (j = 1; j <= phba->alpa_map[0]; j++) {
2220 alpa = phba->alpa_map[j];
2221 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
2223 lpfc_setup_disc_node(vport, alpa);
2226 /* No alpamap, so try all alpa's */
2227 for (j = 0; j < FC_MAXLOOP; j++) {
2228 /* If cfg_scan_down is set, start from highest
2229 * ALPA (0xef) to lowest (0x1).
2231 if (vport->cfg_scan_down)
2234 index = FC_MAXLOOP - j - 1;
2235 alpa = lpfcAlpaArray[index];
2236 if ((vport->fc_myDID & 0xff) == alpa)
2238 lpfc_setup_disc_node(vport, alpa);
2245 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2248 struct lpfc_sli *psli = &phba->sli;
2249 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2250 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
2251 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
2255 * if it's not a physical port or if we already send
2256 * clear_la then don't send it.
2258 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2259 (vport->port_type != LPFC_PHYSICAL_PORT))
2262 /* Link up discovery */
2263 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2264 phba->link_state = LPFC_CLEAR_LA;
2265 lpfc_clear_la(phba, mbox);
2266 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2267 mbox->vport = vport;
2268 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2269 if (rc == MBX_NOT_FINISHED) {
2270 mempool_free(mbox, phba->mbox_mem_pool);
2271 lpfc_disc_flush_list(vport);
2272 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2273 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2274 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2275 phba->link_state = LPFC_HBA_ERROR;
2280 /* Reg_vpi to tell firmware to resume normal operations */
2282 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2284 LPFC_MBOXQ_t *regvpimbox;
2286 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2288 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2289 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2290 regvpimbox->vport = vport;
2291 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
2292 == MBX_NOT_FINISHED) {
2293 mempool_free(regvpimbox, phba->mbox_mem_pool);
2298 /* Start Link up / RSCN discovery on NPR nodes */
2300 lpfc_disc_start(struct lpfc_vport *vport)
2302 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2303 struct lpfc_hba *phba = vport->phba;
2305 uint32_t clear_la_pending;
2308 if (!lpfc_is_link_up(phba))
2311 if (phba->link_state == LPFC_CLEAR_LA)
2312 clear_la_pending = 1;
2314 clear_la_pending = 0;
2316 if (vport->port_state < LPFC_VPORT_READY)
2317 vport->port_state = LPFC_DISC_AUTH;
2319 lpfc_set_disctmo(vport);
2321 if (vport->fc_prevDID == vport->fc_myDID)
2326 vport->fc_prevDID = vport->fc_myDID;
2327 vport->num_disc_nodes = 0;
2329 /* Start Discovery state <hba_state> */
2330 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2331 "0202 Start Discovery hba state x%x "
2332 "Data: x%x x%x x%x\n",
2333 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
2334 vport->fc_adisc_cnt);
2336 /* First do ADISCs - if any */
2337 num_sent = lpfc_els_disc_adisc(vport);
2343 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2344 * continue discovery.
2346 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2347 !(vport->fc_flag & FC_RSCN_MODE)) {
2348 lpfc_issue_reg_vpi(phba, vport);
2353 * For SLI2, we need to set port_state to READY and continue
2356 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
2357 /* If we get here, there is nothing to ADISC */
2358 if (vport->port_type == LPFC_PHYSICAL_PORT)
2359 lpfc_issue_clear_la(phba, vport);
2361 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2362 vport->num_disc_nodes = 0;
2363 /* go thru NPR nodes and issue ELS PLOGIs */
2364 if (vport->fc_npr_cnt)
2365 lpfc_els_disc_plogi(vport);
2367 if (!vport->num_disc_nodes) {
2368 spin_lock_irq(shost->host_lock);
2369 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2370 spin_unlock_irq(shost->host_lock);
2371 lpfc_can_disctmo(vport);
2374 vport->port_state = LPFC_VPORT_READY;
2376 /* Next do PLOGIs - if any */
2377 num_sent = lpfc_els_disc_plogi(vport);
2382 if (vport->fc_flag & FC_RSCN_MODE) {
2383 /* Check to see if more RSCNs came in while we
2384 * were processing this one.
2386 if ((vport->fc_rscn_id_cnt == 0) &&
2387 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
2388 spin_lock_irq(shost->host_lock);
2389 vport->fc_flag &= ~FC_RSCN_MODE;
2390 spin_unlock_irq(shost->host_lock);
2391 lpfc_can_disctmo(vport);
2393 lpfc_els_handle_rscn(vport);
2400 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2401 * ring the match the sppecified nodelist.
2404 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2406 LIST_HEAD(completions);
2407 struct lpfc_sli *psli;
2409 struct lpfc_iocbq *iocb, *next_iocb;
2410 struct lpfc_sli_ring *pring;
2413 pring = &psli->ring[LPFC_ELS_RING];
2415 /* Error matching iocb on txq or txcmplq
2416 * First check the txq.
2418 spin_lock_irq(&phba->hbalock);
2419 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2420 if (iocb->context1 != ndlp) {
2424 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2425 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2427 list_move_tail(&iocb->list, &completions);
2432 /* Next check the txcmplq */
2433 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2434 if (iocb->context1 != ndlp) {
2438 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2439 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2440 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2443 spin_unlock_irq(&phba->hbalock);
2445 while (!list_empty(&completions)) {
2446 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2447 list_del_init(&iocb->list);
2449 if (!iocb->iocb_cmpl)
2450 lpfc_sli_release_iocbq(phba, iocb);
2453 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2454 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2455 (iocb->iocb_cmpl) (phba, iocb, iocb);
2461 lpfc_disc_flush_list(struct lpfc_vport *vport)
2463 struct lpfc_nodelist *ndlp, *next_ndlp;
2464 struct lpfc_hba *phba = vport->phba;
2466 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2467 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2469 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2470 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2471 lpfc_free_tx(phba, ndlp);
2478 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2480 lpfc_els_flush_rscn(vport);
2481 lpfc_els_flush_cmd(vport);
2482 lpfc_disc_flush_list(vport);
2485 /*****************************************************************************/
2487 * NAME: lpfc_disc_timeout
2489 * FUNCTION: Fibre Channel driver discovery timeout routine.
2491 * EXECUTION ENVIRONMENT: interrupt only
2499 /*****************************************************************************/
2501 lpfc_disc_timeout(unsigned long ptr)
2503 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2504 struct lpfc_hba *phba = vport->phba;
2505 unsigned long flags = 0;
2507 if (unlikely(!phba))
2510 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
2511 spin_lock_irqsave(&vport->work_port_lock, flags);
2512 vport->work_port_events |= WORKER_DISC_TMO;
2513 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2515 spin_lock_irqsave(&phba->hbalock, flags);
2516 if (phba->work_wait)
2517 lpfc_worker_wake_up(phba);
2518 spin_unlock_irqrestore(&phba->hbalock, flags);
2524 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2526 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2527 struct lpfc_hba *phba = vport->phba;
2528 struct lpfc_sli *psli = &phba->sli;
2529 struct lpfc_nodelist *ndlp, *next_ndlp;
2530 LPFC_MBOXQ_t *initlinkmbox;
2531 int rc, clrlaerr = 0;
2533 if (!(vport->fc_flag & FC_DISC_TMO))
2536 spin_lock_irq(shost->host_lock);
2537 vport->fc_flag &= ~FC_DISC_TMO;
2538 spin_unlock_irq(shost->host_lock);
2540 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2541 "disc timeout: state:x%x rtry:x%x flg:x%x",
2542 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2544 switch (vport->port_state) {
2546 case LPFC_LOCAL_CFG_LINK:
2547 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2551 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
2552 "0221 FAN timeout\n");
2553 /* Start discovery by sending FLOGI, clean up old rpis */
2554 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2556 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2558 if (ndlp->nlp_type & NLP_FABRIC) {
2559 /* Clean up the ndlp on Fabric connections */
2560 lpfc_drop_node(vport, ndlp);
2562 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2563 /* Fail outstanding IO now since device
2564 * is marked for PLOGI.
2566 lpfc_unreg_rpi(vport, ndlp);
2569 if (vport->port_state != LPFC_FLOGI) {
2570 lpfc_initial_flogi(vport);
2577 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2578 /* Initial FLOGI timeout */
2579 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2580 "0222 Initial %s timeout\n",
2581 vport->vpi ? "FDISC" : "FLOGI");
2583 /* Assume no Fabric and go on with discovery.
2584 * Check for outstanding ELS FLOGI to abort.
2587 /* FLOGI failed, so just use loop map to make discovery list */
2588 lpfc_disc_list_loopmap(vport);
2590 /* Start discovery */
2591 lpfc_disc_start(vport);
2594 case LPFC_FABRIC_CFG_LINK:
2595 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2597 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2598 "0223 Timeout while waiting for "
2599 "NameServer login\n");
2600 /* Next look for NameServer ndlp */
2601 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2603 lpfc_els_abort(phba, ndlp);
2605 /* ReStart discovery */
2609 /* Check for wait for NameServer Rsp timeout */
2610 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2611 "0224 NameServer Query timeout "
2613 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2615 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2616 /* Try it one more time */
2617 vport->fc_ns_retry++;
2618 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2619 vport->fc_ns_retry, 0);
2623 vport->fc_ns_retry = 0;
2627 * Discovery is over.
2628 * set port_state to PORT_READY if SLI2.
2629 * cmpl_reg_vpi will set port_state to READY for SLI3.
2631 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2632 lpfc_issue_reg_vpi(phba, vport);
2633 else { /* NPIV Not enabled */
2634 lpfc_issue_clear_la(phba, vport);
2635 vport->port_state = LPFC_VPORT_READY;
2638 /* Setup and issue mailbox INITIALIZE LINK command */
2639 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2640 if (!initlinkmbox) {
2641 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2642 "0206 Device Discovery "
2643 "completion error\n");
2644 phba->link_state = LPFC_HBA_ERROR;
2648 lpfc_linkdown(phba);
2649 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2650 phba->cfg_link_speed);
2651 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2652 initlinkmbox->vport = vport;
2653 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2654 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
2655 lpfc_set_loopback_flag(phba);
2656 if (rc == MBX_NOT_FINISHED)
2657 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2661 case LPFC_DISC_AUTH:
2662 /* Node Authentication timeout */
2663 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2664 "0227 Node Authentication timeout\n");
2665 lpfc_disc_flush_list(vport);
2668 * set port_state to PORT_READY if SLI2.
2669 * cmpl_reg_vpi will set port_state to READY for SLI3.
2671 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2672 lpfc_issue_reg_vpi(phba, vport);
2673 else { /* NPIV Not enabled */
2674 lpfc_issue_clear_la(phba, vport);
2675 vport->port_state = LPFC_VPORT_READY;
2679 case LPFC_VPORT_READY:
2680 if (vport->fc_flag & FC_RSCN_MODE) {
2681 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2682 "0231 RSCN timeout Data: x%x "
2684 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2686 /* Cleanup any outstanding ELS commands */
2687 lpfc_els_flush_cmd(vport);
2689 lpfc_els_flush_rscn(vport);
2690 lpfc_disc_flush_list(vport);
2695 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2696 "0229 Unexpected discovery timeout, "
2697 "vport State x%x\n", vport->port_state);
2701 switch (phba->link_state) {
2703 /* CLEAR LA timeout */
2704 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2705 "0228 CLEAR LA timeout\n");
2710 lpfc_issue_clear_la(phba, vport);
2712 case LPFC_LINK_UNKNOWN:
2713 case LPFC_WARM_START:
2714 case LPFC_INIT_START:
2715 case LPFC_INIT_MBX_CMDS:
2716 case LPFC_LINK_DOWN:
2717 case LPFC_HBA_ERROR:
2718 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2719 "0230 Unexpected timeout, hba link "
2720 "state x%x\n", phba->link_state);
2724 case LPFC_HBA_READY:
2729 lpfc_disc_flush_list(vport);
2730 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2731 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2732 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2733 vport->port_state = LPFC_VPORT_READY;
2740 * This routine handles processing a NameServer REG_LOGIN mailbox
2741 * command upon completion. It is setup in the LPFC_MBOXQ
2742 * as the completion routine when the command is
2743 * handed off to the SLI layer.
2746 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2748 MAILBOX_t *mb = &pmb->mb;
2749 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2750 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2751 struct lpfc_vport *vport = pmb->vport;
2753 pmb->context1 = NULL;
2755 ndlp->nlp_rpi = mb->un.varWords[0];
2756 ndlp->nlp_type |= NLP_FABRIC;
2757 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2760 * Start issuing Fabric-Device Management Interface (FDMI) command to
2761 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2762 * fdmi-on=2 (supporting RPA/hostnmae)
2765 if (vport->cfg_fdmi_on == 1)
2766 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2768 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2770 /* Mailbox took a reference to the node */
2772 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2774 mempool_free(pmb, phba->mbox_mem_pool);
2780 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2782 uint16_t *rpi = param;
2784 return ndlp->nlp_rpi == *rpi;
2788 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2790 return memcmp(&ndlp->nlp_portname, param,
2791 sizeof(ndlp->nlp_portname)) == 0;
2794 static struct lpfc_nodelist *
2795 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2797 struct lpfc_nodelist *ndlp;
2799 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2800 if (filter(ndlp, param))
2808 * Search node lists for a remote port matching filter criteria
2809 * Caller needs to hold host_lock before calling this routine.
2811 struct lpfc_nodelist *
2812 lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2814 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2815 struct lpfc_nodelist *ndlp;
2817 spin_lock_irq(shost->host_lock);
2818 ndlp = __lpfc_find_node(vport, filter, param);
2819 spin_unlock_irq(shost->host_lock);
2825 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2826 * returns the node list element pointer else return NULL.
2828 struct lpfc_nodelist *
2829 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2831 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2835 struct lpfc_nodelist *
2836 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2838 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2839 struct lpfc_nodelist *ndlp;
2841 spin_lock_irq(shost->host_lock);
2842 ndlp = __lpfc_findnode_rpi(vport, rpi);
2843 spin_unlock_irq(shost->host_lock);
2849 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2850 * returns the node element list pointer else return NULL.
2852 struct lpfc_nodelist *
2853 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2856 struct lpfc_nodelist *ndlp;
2858 spin_lock_irq(shost->host_lock);
2859 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2860 spin_unlock_irq(shost->host_lock);
2865 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2868 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2869 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2870 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2871 init_timer(&ndlp->nlp_delayfunc);
2872 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2873 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2874 ndlp->nlp_DID = did;
2875 ndlp->vport = vport;
2876 ndlp->nlp_sid = NLP_NO_SID;
2877 INIT_LIST_HEAD(&ndlp->nlp_listp);
2878 kref_init(&ndlp->kref);
2880 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2881 "node init: did:x%x",
2882 ndlp->nlp_DID, 0, 0);
2887 /* This routine releases all resources associated with a specifc NPort's ndlp
2888 * and mempool_free's the nodelist.
2891 lpfc_nlp_release(struct kref *kref)
2893 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2896 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2897 "node release: did:x%x flg:x%x type:x%x",
2898 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2900 lpfc_nlp_remove(ndlp->vport, ndlp);
2901 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2904 /* This routine bumps the reference count for a ndlp structure to ensure
2905 * that one discovery thread won't free a ndlp while another discovery thread
2908 struct lpfc_nodelist *
2909 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2912 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2913 "node get: did:x%x flg:x%x refcnt:x%x",
2914 ndlp->nlp_DID, ndlp->nlp_flag,
2915 atomic_read(&ndlp->kref.refcount));
2916 kref_get(&ndlp->kref);
2922 /* This routine decrements the reference count for a ndlp structure. If the
2923 * count goes to 0, this indicates the the associated nodelist should be freed.
2926 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2929 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2930 "node put: did:x%x flg:x%x refcnt:x%x",
2931 ndlp->nlp_DID, ndlp->nlp_flag,
2932 atomic_read(&ndlp->kref.refcount));
2934 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2937 /* This routine free's the specified nodelist if it is not in use
2938 * by any other discovery thread. This routine returns 1 if the ndlp
2939 * is not being used by anyone and has been freed. A return value of
2940 * 0 indicates it is being used by another discovery thread and the
2941 * refcount is left unchanged.
2944 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
2946 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2947 "node not used: did:x%x flg:x%x refcnt:x%x",
2948 ndlp->nlp_DID, ndlp->nlp_flag,
2949 atomic_read(&ndlp->kref.refcount));
2951 if (atomic_read(&ndlp->kref.refcount) == 1) {