1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_vport.h"
40 #include "lpfc_debugfs.h"
42 /* AlpaArray for assignment of scsid for scan-down and bind_method */
43 static uint8_t lpfcAlpaArray[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
59 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
62 lpfc_terminate_rport_io(struct fc_rport *rport)
64 struct lpfc_rport_data *rdata;
65 struct lpfc_nodelist * ndlp;
66 struct lpfc_hba *phba;
68 rdata = rport->dd_data;
72 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
73 printk(KERN_ERR "Cannot find remote node"
74 " to terminate I/O Data x%x\n",
79 phba = ndlp->vport->phba;
81 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
82 "rport terminate: sid:x%x did:x%x flg:x%x",
83 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
85 if (ndlp->nlp_sid != NLP_NO_SID) {
86 lpfc_sli_abort_iocb(ndlp->vport,
87 &phba->sli.ring[phba->sli.fcp_ring],
88 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
92 * A device is normally blocked for rediscovery and unblocked when
93 * devloss timeout happens. In case a vport is removed or driver
94 * unloaded before devloss timeout happens, we need to unblock here.
96 scsi_target_unblock(&rport->dev);
101 * This function will be called when dev_loss_tmo fire.
104 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
106 struct lpfc_rport_data *rdata;
107 struct lpfc_nodelist * ndlp;
108 struct lpfc_vport *vport;
109 struct lpfc_hba *phba;
110 struct lpfc_work_evt *evtp;
112 rdata = rport->dd_data;
116 if (rport->scsi_target_id != -1) {
117 printk(KERN_ERR "Cannot find remote node"
118 " for rport in dev_loss_tmo_callbk x%x\n",
127 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
128 "rport devlosscb: sid:x%x did:x%x flg:x%x",
129 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
131 evtp = &ndlp->dev_loss_evt;
133 if (!list_empty(&evtp->evt_listp))
136 spin_lock_irq(&phba->hbalock);
137 evtp->evt_arg1 = ndlp;
138 evtp->evt = LPFC_EVT_DEV_LOSS;
139 list_add_tail(&evtp->evt_listp, &phba->work_list);
141 wake_up(phba->work_wait);
143 spin_unlock_irq(&phba->hbalock);
149 * This function is called from the worker thread when dev_loss_tmo
153 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
155 struct lpfc_rport_data *rdata;
156 struct fc_rport *rport;
157 struct lpfc_vport *vport;
158 struct lpfc_hba *phba;
167 rdata = rport->dd_data;
168 name = (uint8_t *) &ndlp->nlp_portname;
172 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
173 "rport devlosstmo:did:x%x type:x%x id:x%x",
174 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
176 if (!(vport->load_flag & FC_UNLOADING) &&
177 ndlp->nlp_state == NLP_STE_MAPPED_NODE)
180 if (ndlp->nlp_type & NLP_FABRIC) {
184 /* We will clean up these Nodes in linkup */
185 put_node = rdata->pnode != NULL;
186 put_rport = ndlp->rport != NULL;
192 put_device(&rport->dev);
196 if (ndlp->nlp_sid != NLP_NO_SID) {
198 /* flush the target */
199 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
200 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
202 if (vport->load_flag & FC_UNLOADING)
206 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
207 "0203 Devloss timeout on "
208 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
209 "NPort x%x Data: x%x x%x x%x\n",
210 *name, *(name+1), *(name+2), *(name+3),
211 *(name+4), *(name+5), *(name+6), *(name+7),
212 ndlp->nlp_DID, ndlp->nlp_flag,
213 ndlp->nlp_state, ndlp->nlp_rpi);
215 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
216 "0204 Devloss timeout on "
217 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
218 "NPort x%x Data: x%x x%x x%x\n",
219 *name, *(name+1), *(name+2), *(name+3),
220 *(name+4), *(name+5), *(name+6), *(name+7),
221 ndlp->nlp_DID, ndlp->nlp_flag,
222 ndlp->nlp_state, ndlp->nlp_rpi);
225 if (!(vport->load_flag & FC_UNLOADING) &&
226 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
227 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
228 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
229 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
234 put_node = rdata->pnode != NULL;
235 put_rport = ndlp->rport != NULL;
241 put_device(&rport->dev);
247 lpfc_worker_wake_up(struct lpfc_hba *phba)
249 wake_up(phba->work_wait);
254 lpfc_work_list_done(struct lpfc_hba *phba)
256 struct lpfc_work_evt *evtp = NULL;
257 struct lpfc_nodelist *ndlp;
260 spin_lock_irq(&phba->hbalock);
261 while (!list_empty(&phba->work_list)) {
262 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
264 spin_unlock_irq(&phba->hbalock);
267 case LPFC_EVT_ELS_RETRY:
268 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
269 lpfc_els_retry_delay_handler(ndlp);
270 free_evt = 0; /* evt is part of ndlp */
272 case LPFC_EVT_DEV_LOSS:
273 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
275 lpfc_dev_loss_tmo_handler(ndlp);
279 case LPFC_EVT_ONLINE:
280 if (phba->link_state < LPFC_LINK_DOWN)
281 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
283 *(int *) (evtp->evt_arg1) = 0;
284 complete((struct completion *)(evtp->evt_arg2));
286 case LPFC_EVT_OFFLINE_PREP:
287 if (phba->link_state >= LPFC_LINK_DOWN)
288 lpfc_offline_prep(phba);
289 *(int *)(evtp->evt_arg1) = 0;
290 complete((struct completion *)(evtp->evt_arg2));
292 case LPFC_EVT_OFFLINE:
294 lpfc_sli_brdrestart(phba);
295 *(int *)(evtp->evt_arg1) =
296 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
297 lpfc_unblock_mgmt_io(phba);
298 complete((struct completion *)(evtp->evt_arg2));
300 case LPFC_EVT_WARM_START:
302 lpfc_reset_barrier(phba);
303 lpfc_sli_brdreset(phba);
304 lpfc_hba_down_post(phba);
305 *(int *)(evtp->evt_arg1) =
306 lpfc_sli_brdready(phba, HS_MBRDY);
307 lpfc_unblock_mgmt_io(phba);
308 complete((struct completion *)(evtp->evt_arg2));
312 *(int *)(evtp->evt_arg1)
313 = (phba->pport->stopped)
314 ? 0 : lpfc_sli_brdkill(phba);
315 lpfc_unblock_mgmt_io(phba);
316 complete((struct completion *)(evtp->evt_arg2));
321 spin_lock_irq(&phba->hbalock);
323 spin_unlock_irq(&phba->hbalock);
328 lpfc_work_done(struct lpfc_hba *phba)
330 struct lpfc_sli_ring *pring;
331 uint32_t ha_copy, status, control, work_port_events;
332 struct lpfc_vport **vports;
333 struct lpfc_vport *vport;
336 spin_lock_irq(&phba->hbalock);
337 ha_copy = phba->work_ha;
339 spin_unlock_irq(&phba->hbalock);
341 if (ha_copy & HA_ERATT)
342 lpfc_handle_eratt(phba);
344 if (ha_copy & HA_MBATT)
345 lpfc_sli_handle_mb_event(phba);
347 if (ha_copy & HA_LATT)
348 lpfc_handle_latt(phba);
349 vports = lpfc_create_vport_work_array(phba);
351 for(i = 0; i < LPFC_MAX_VPORTS; i++) {
353 * We could have no vports in array if unloading, so if
354 * this happens then just use the pport
356 if (vports[i] == NULL && i == 0)
362 work_port_events = vport->work_port_events;
363 if (work_port_events & WORKER_DISC_TMO)
364 lpfc_disc_timeout_handler(vport);
365 if (work_port_events & WORKER_ELS_TMO)
366 lpfc_els_timeout_handler(vport);
367 if (work_port_events & WORKER_HB_TMO)
368 lpfc_hb_timeout_handler(phba);
369 if (work_port_events & WORKER_MBOX_TMO)
370 lpfc_mbox_timeout_handler(phba);
371 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
372 lpfc_unblock_fabric_iocbs(phba);
373 if (work_port_events & WORKER_FDMI_TMO)
374 lpfc_fdmi_timeout_handler(vport);
375 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
376 lpfc_ramp_down_queue_handler(phba);
377 if (work_port_events & WORKER_RAMP_UP_QUEUE)
378 lpfc_ramp_up_queue_handler(phba);
379 spin_lock_irq(&vport->work_port_lock);
380 vport->work_port_events &= ~work_port_events;
381 spin_unlock_irq(&vport->work_port_lock);
383 lpfc_destroy_vport_work_array(vports);
385 pring = &phba->sli.ring[LPFC_ELS_RING];
386 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
387 status >>= (4*LPFC_ELS_RING);
388 if ((status & HA_RXMASK)
389 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
390 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
391 pring->flag |= LPFC_DEFERRED_RING_EVENT;
393 lpfc_sli_handle_slow_ring_event(phba, pring,
396 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
399 * Turn on Ring interrupts
401 spin_lock_irq(&phba->hbalock);
402 control = readl(phba->HCregaddr);
403 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
404 lpfc_debugfs_slow_ring_trc(phba,
405 "WRK Enable ring: cntl:x%x hacopy:x%x",
406 control, ha_copy, 0);
408 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
409 writel(control, phba->HCregaddr);
410 readl(phba->HCregaddr); /* flush */
413 lpfc_debugfs_slow_ring_trc(phba,
414 "WRK Ring ok: cntl:x%x hacopy:x%x",
415 control, ha_copy, 0);
417 spin_unlock_irq(&phba->hbalock);
419 lpfc_work_list_done(phba);
423 check_work_wait_done(struct lpfc_hba *phba)
425 struct lpfc_vport *vport;
426 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
429 spin_lock_irq(&phba->hbalock);
430 list_for_each_entry(vport, &phba->port_list, listentry) {
431 if (vport->work_port_events) {
436 if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
437 kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
441 phba->work_found = 0;
442 spin_unlock_irq(&phba->hbalock);
448 lpfc_do_work(void *p)
450 struct lpfc_hba *phba = p;
452 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
454 set_user_nice(current, -20);
455 phba->work_wait = &work_waitq;
456 phba->work_found = 0;
460 rc = wait_event_interruptible(work_waitq,
461 check_work_wait_done(phba));
465 if (kthread_should_stop())
468 lpfc_work_done(phba);
470 /* If there is alot of slow ring work, like during link up
471 * check_work_wait_done() may cause this thread to not give
472 * up the CPU for very long periods of time. This may cause
473 * soft lockups or other problems. To avoid these situations
474 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
475 * consecutive iterations.
477 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
478 phba->work_found = 0;
482 phba->work_wait = NULL;
487 * This is only called to handle FC worker events. Since this a rare
488 * occurance, we allocate a struct lpfc_work_evt structure here instead of
489 * embedding it in the IOCB.
492 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
495 struct lpfc_work_evt *evtp;
499 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
500 * be queued to worker thread for processing
502 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
506 evtp->evt_arg1 = arg1;
507 evtp->evt_arg2 = arg2;
510 spin_lock_irqsave(&phba->hbalock, flags);
511 list_add_tail(&evtp->evt_listp, &phba->work_list);
513 lpfc_worker_wake_up(phba);
514 spin_unlock_irqrestore(&phba->hbalock, flags);
520 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
522 struct lpfc_hba *phba = vport->phba;
523 struct lpfc_nodelist *ndlp, *next_ndlp;
526 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
527 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
530 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
531 ((vport->port_type == LPFC_NPIV_PORT) &&
532 (ndlp->nlp_DID == NameServer_DID)))
533 lpfc_unreg_rpi(vport, ndlp);
535 /* Leave Fabric nodes alone on link down */
536 if (!remove && ndlp->nlp_type & NLP_FABRIC)
538 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
541 : NLP_EVT_DEVICE_RECOVERY);
543 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
544 lpfc_mbx_unreg_vpi(vport);
545 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
550 lpfc_port_link_failure(struct lpfc_vport *vport)
552 struct lpfc_nodelist *ndlp, *next_ndlp;
554 /* Cleanup any outstanding RSCN activity */
555 lpfc_els_flush_rscn(vport);
557 /* Cleanup any outstanding ELS commands */
558 lpfc_els_flush_cmd(vport);
560 lpfc_cleanup_rpis(vport, 0);
562 /* free any ndlp's on unused list */
563 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
564 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
565 lpfc_drop_node(vport, ndlp);
567 /* Turn off discovery timer if its running */
568 lpfc_can_disctmo(vport);
572 lpfc_linkdown_port(struct lpfc_vport *vport)
574 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
576 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
578 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
579 "Link Down: state:x%x rtry:x%x flg:x%x",
580 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
582 lpfc_port_link_failure(vport);
587 lpfc_linkdown(struct lpfc_hba *phba)
589 struct lpfc_vport *vport = phba->pport;
590 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
591 struct lpfc_vport **vports;
595 if (phba->link_state == LPFC_LINK_DOWN) {
598 spin_lock_irq(&phba->hbalock);
599 if (phba->link_state > LPFC_LINK_DOWN) {
600 phba->link_state = LPFC_LINK_DOWN;
601 phba->pport->fc_flag &= ~FC_LBIT;
603 spin_unlock_irq(&phba->hbalock);
604 vports = lpfc_create_vport_work_array(phba);
606 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
607 /* Issue a LINK DOWN event to all nodes */
608 lpfc_linkdown_port(vports[i]);
610 lpfc_destroy_vport_work_array(vports);
611 /* Clean up any firmware default rpi's */
612 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
614 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
616 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
617 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
618 == MBX_NOT_FINISHED) {
619 mempool_free(mb, phba->mbox_mem_pool);
623 /* Setup myDID for link up if we are in pt2pt mode */
624 if (phba->pport->fc_flag & FC_PT2PT) {
625 phba->pport->fc_myDID = 0;
626 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
628 lpfc_config_link(phba, mb);
629 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
631 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
632 == MBX_NOT_FINISHED) {
633 mempool_free(mb, phba->mbox_mem_pool);
636 spin_lock_irq(shost->host_lock);
637 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
638 spin_unlock_irq(shost->host_lock);
645 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
647 struct lpfc_nodelist *ndlp;
649 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
650 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
653 if (ndlp->nlp_type & NLP_FABRIC) {
654 /* On Linkup its safe to clean up the ndlp
655 * from Fabric connections.
657 if (ndlp->nlp_DID != Fabric_DID)
658 lpfc_unreg_rpi(vport, ndlp);
659 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
660 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
661 /* Fail outstanding IO now since device is
664 lpfc_unreg_rpi(vport, ndlp);
670 lpfc_linkup_port(struct lpfc_vport *vport)
672 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
673 struct lpfc_nodelist *ndlp, *next_ndlp;
674 struct lpfc_hba *phba = vport->phba;
676 if ((vport->load_flag & FC_UNLOADING) != 0)
679 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
680 "Link Up: top:x%x speed:x%x flg:x%x",
681 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
683 /* If NPIV is not enabled, only bring the physical port up */
684 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
685 (vport != phba->pport))
688 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
690 spin_lock_irq(shost->host_lock);
691 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
692 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
693 vport->fc_flag |= FC_NDISC_ACTIVE;
694 vport->fc_ns_retry = 0;
695 spin_unlock_irq(shost->host_lock);
697 if (vport->fc_flag & FC_LBIT)
698 lpfc_linkup_cleanup_nodes(vport);
700 /* free any ndlp's in unused state */
701 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
703 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
704 lpfc_drop_node(vport, ndlp);
708 lpfc_linkup(struct lpfc_hba *phba)
710 struct lpfc_vport **vports;
713 phba->link_state = LPFC_LINK_UP;
715 /* Unblock fabric iocbs if they are blocked */
716 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
717 del_timer_sync(&phba->fabric_block_timer);
719 vports = lpfc_create_vport_work_array(phba);
721 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
722 lpfc_linkup_port(vports[i]);
723 lpfc_destroy_vport_work_array(vports);
724 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
725 lpfc_issue_clear_la(phba, phba->pport);
731 * This routine handles processing a CLEAR_LA mailbox
732 * command upon completion. It is setup in the LPFC_MBOXQ
733 * as the completion routine when the command is
734 * handed off to the SLI layer.
737 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
739 struct lpfc_vport *vport = pmb->vport;
740 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
741 struct lpfc_sli *psli = &phba->sli;
742 MAILBOX_t *mb = &pmb->mb;
745 /* Since we don't do discovery right now, turn these off here */
746 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
747 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
748 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
750 /* Check for error */
751 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
752 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
753 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
754 "0320 CLEAR_LA mbxStatus error x%x hba "
756 mb->mbxStatus, vport->port_state);
757 phba->link_state = LPFC_HBA_ERROR;
761 if (vport->port_type == LPFC_PHYSICAL_PORT)
762 phba->link_state = LPFC_HBA_READY;
764 spin_lock_irq(&phba->hbalock);
765 psli->sli_flag |= LPFC_PROCESS_LA;
766 control = readl(phba->HCregaddr);
767 control |= HC_LAINT_ENA;
768 writel(control, phba->HCregaddr);
769 readl(phba->HCregaddr); /* flush */
770 spin_unlock_irq(&phba->hbalock);
773 vport->num_disc_nodes = 0;
774 /* go thru NPR nodes and issue ELS PLOGIs */
775 if (vport->fc_npr_cnt)
776 lpfc_els_disc_plogi(vport);
778 if (!vport->num_disc_nodes) {
779 spin_lock_irq(shost->host_lock);
780 vport->fc_flag &= ~FC_NDISC_ACTIVE;
781 spin_unlock_irq(shost->host_lock);
784 vport->port_state = LPFC_VPORT_READY;
787 /* Device Discovery completes */
788 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
789 "0225 Device Discovery completes\n");
790 mempool_free(pmb, phba->mbox_mem_pool);
792 spin_lock_irq(shost->host_lock);
793 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
794 spin_unlock_irq(shost->host_lock);
796 del_timer_sync(&phba->fc_estabtmo);
798 lpfc_can_disctmo(vport);
800 /* turn on Link Attention interrupts */
802 spin_lock_irq(&phba->hbalock);
803 psli->sli_flag |= LPFC_PROCESS_LA;
804 control = readl(phba->HCregaddr);
805 control |= HC_LAINT_ENA;
806 writel(control, phba->HCregaddr);
807 readl(phba->HCregaddr); /* flush */
808 spin_unlock_irq(&phba->hbalock);
815 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
817 struct lpfc_vport *vport = pmb->vport;
819 if (pmb->mb.mbxStatus)
822 mempool_free(pmb, phba->mbox_mem_pool);
824 if (phba->fc_topology == TOPOLOGY_LOOP &&
825 vport->fc_flag & FC_PUBLIC_LOOP &&
826 !(vport->fc_flag & FC_LBIT)) {
827 /* Need to wait for FAN - use discovery timer
828 * for timeout. port_state is identically
829 * LPFC_LOCAL_CFG_LINK while waiting for FAN
831 lpfc_set_disctmo(vport);
835 /* Start discovery by sending a FLOGI. port_state is identically
836 * LPFC_FLOGI while waiting for FLOGI cmpl
838 if (vport->port_state != LPFC_FLOGI) {
839 lpfc_initial_flogi(vport);
844 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
845 "0306 CONFIG_LINK mbxStatus error x%x "
847 pmb->mb.mbxStatus, vport->port_state);
848 mempool_free(pmb, phba->mbox_mem_pool);
852 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
853 "0200 CONFIG_LINK bad hba state x%x\n",
856 lpfc_issue_clear_la(phba, vport);
861 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
863 MAILBOX_t *mb = &pmb->mb;
864 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
865 struct lpfc_vport *vport = pmb->vport;
868 /* Check for error */
870 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
871 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
872 "0319 READ_SPARAM mbxStatus error x%x "
874 mb->mbxStatus, vport->port_state);
879 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
880 sizeof (struct serv_parm));
881 if (phba->cfg_soft_wwnn)
882 u64_to_wwn(phba->cfg_soft_wwnn,
883 vport->fc_sparam.nodeName.u.wwn);
884 if (phba->cfg_soft_wwpn)
885 u64_to_wwn(phba->cfg_soft_wwpn,
886 vport->fc_sparam.portName.u.wwn);
887 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
888 sizeof(vport->fc_nodename));
889 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
890 sizeof(vport->fc_portname));
891 if (vport->port_type == LPFC_PHYSICAL_PORT) {
892 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
893 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
896 lpfc_mbuf_free(phba, mp->virt, mp->phys);
898 mempool_free(pmb, phba->mbox_mem_pool);
902 pmb->context1 = NULL;
903 lpfc_mbuf_free(phba, mp->virt, mp->phys);
905 lpfc_issue_clear_la(phba, vport);
906 mempool_free(pmb, phba->mbox_mem_pool);
911 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
913 struct lpfc_vport *vport = phba->pport;
914 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
916 struct lpfc_dmabuf *mp;
919 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
920 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
922 spin_lock_irq(&phba->hbalock);
923 switch (la->UlnkSpeed) {
925 phba->fc_linkspeed = LA_1GHZ_LINK;
928 phba->fc_linkspeed = LA_2GHZ_LINK;
931 phba->fc_linkspeed = LA_4GHZ_LINK;
934 phba->fc_linkspeed = LA_8GHZ_LINK;
937 phba->fc_linkspeed = LA_UNKNW_LINK;
941 phba->fc_topology = la->topology;
942 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
944 if (phba->fc_topology == TOPOLOGY_LOOP) {
945 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
947 /* Get Loop Map information */
949 vport->fc_flag |= FC_LBIT;
951 vport->fc_myDID = la->granted_AL_PA;
952 i = la->un.lilpBde64.tus.f.bdeSize;
955 phba->alpa_map[0] = 0;
957 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
968 numalpa = phba->alpa_map[0];
970 while (j < numalpa) {
971 memset(un.pamap, 0, 16);
972 for (k = 1; j < numalpa; k++) {
974 phba->alpa_map[j + 1];
979 /* Link Up Event ALPA map */
980 lpfc_printf_log(phba,
983 "1304 Link Up Event "
984 "ALPA map Data: x%x "
986 un.pa.wd1, un.pa.wd2,
987 un.pa.wd3, un.pa.wd4);
992 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
993 if (phba->max_vpi && phba->cfg_enable_npiv &&
994 (phba->sli_rev == 3))
995 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
997 vport->fc_myDID = phba->fc_pref_DID;
998 vport->fc_flag |= FC_LBIT;
1000 spin_unlock_irq(&phba->hbalock);
1004 lpfc_read_sparam(phba, sparam_mbox, 0);
1005 sparam_mbox->vport = vport;
1006 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1007 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
1008 if (rc == MBX_NOT_FINISHED) {
1009 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1010 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1012 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1014 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1020 vport->port_state = LPFC_LOCAL_CFG_LINK;
1021 lpfc_config_link(phba, cfglink_mbox);
1022 cfglink_mbox->vport = vport;
1023 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1024 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1025 if (rc != MBX_NOT_FINISHED)
1027 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1030 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1031 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1032 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1033 vport->port_state, sparam_mbox, cfglink_mbox);
1034 lpfc_issue_clear_la(phba, vport);
1039 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1042 struct lpfc_sli *psli = &phba->sli;
1044 lpfc_linkdown(phba);
1046 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1047 spin_lock_irq(&phba->hbalock);
1048 psli->sli_flag |= LPFC_PROCESS_LA;
1049 control = readl(phba->HCregaddr);
1050 control |= HC_LAINT_ENA;
1051 writel(control, phba->HCregaddr);
1052 readl(phba->HCregaddr); /* flush */
1053 spin_unlock_irq(&phba->hbalock);
1057 * This routine handles processing a READ_LA mailbox
1058 * command upon completion. It is setup in the LPFC_MBOXQ
1059 * as the completion routine when the command is
1060 * handed off to the SLI layer.
1063 lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1065 struct lpfc_vport *vport = pmb->vport;
1066 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1068 MAILBOX_t *mb = &pmb->mb;
1069 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1071 /* Check for error */
1072 if (mb->mbxStatus) {
1073 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1074 "1307 READ_LA mbox error x%x state x%x\n",
1075 mb->mbxStatus, vport->port_state);
1076 lpfc_mbx_issue_link_down(phba);
1077 phba->link_state = LPFC_HBA_ERROR;
1078 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1081 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
1083 memcpy(&phba->alpa_map[0], mp->virt, 128);
1085 spin_lock_irq(shost->host_lock);
1087 vport->fc_flag |= FC_BYPASSED_MODE;
1089 vport->fc_flag &= ~FC_BYPASSED_MODE;
1090 spin_unlock_irq(shost->host_lock);
1092 if (((phba->fc_eventTag + 1) < la->eventTag) ||
1093 (phba->fc_eventTag == la->eventTag)) {
1094 phba->fc_stat.LinkMultiEvent++;
1095 if (la->attType == AT_LINK_UP)
1096 if (phba->fc_eventTag != 0)
1097 lpfc_linkdown(phba);
1100 phba->fc_eventTag = la->eventTag;
1102 if (la->attType == AT_LINK_UP) {
1103 phba->fc_stat.LinkUp++;
1104 if (phba->link_flag & LS_LOOPBACK_MODE) {
1105 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1106 "1306 Link Up Event in loop back mode "
1107 "x%x received Data: x%x x%x x%x x%x\n",
1108 la->eventTag, phba->fc_eventTag,
1109 la->granted_AL_PA, la->UlnkSpeed,
1112 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1113 "1303 Link Up Event x%x received "
1114 "Data: x%x x%x x%x x%x\n",
1115 la->eventTag, phba->fc_eventTag,
1116 la->granted_AL_PA, la->UlnkSpeed,
1119 lpfc_mbx_process_link_up(phba, la);
1121 phba->fc_stat.LinkDown++;
1122 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1123 "1305 Link Down Event x%x received "
1124 "Data: x%x x%x x%x\n",
1125 la->eventTag, phba->fc_eventTag,
1126 phba->pport->port_state, vport->fc_flag);
1127 lpfc_mbx_issue_link_down(phba);
1130 lpfc_mbx_cmpl_read_la_free_mbuf:
1131 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1133 mempool_free(pmb, phba->mbox_mem_pool);
1138 * This routine handles processing a REG_LOGIN mailbox
1139 * command upon completion. It is setup in the LPFC_MBOXQ
1140 * as the completion routine when the command is
1141 * handed off to the SLI layer.
1144 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1146 struct lpfc_vport *vport = pmb->vport;
1147 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1148 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1150 pmb->context1 = NULL;
1152 /* Good status, call state machine */
1153 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
1154 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1156 mempool_free(pmb, phba->mbox_mem_pool);
1163 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1165 MAILBOX_t *mb = &pmb->mb;
1166 struct lpfc_vport *vport = pmb->vport;
1167 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1169 switch (mb->mbxStatus) {
1173 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1174 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
1178 vport->unreg_vpi_cmpl = VPORT_OK;
1179 mempool_free(pmb, phba->mbox_mem_pool);
1181 * This shost reference might have been taken at the beginning of
1182 * lpfc_vport_delete()
1184 if (vport->load_flag & FC_UNLOADING)
1185 scsi_host_put(shost);
1189 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1191 struct lpfc_hba *phba = vport->phba;
1195 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1199 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1200 mbox->vport = vport;
1201 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1202 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1203 if (rc == MBX_NOT_FINISHED) {
1204 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1205 "1800 Could not issue unreg_vpi\n");
1206 mempool_free(mbox, phba->mbox_mem_pool);
1207 vport->unreg_vpi_cmpl = VPORT_ERROR;
1212 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1214 struct lpfc_vport *vport = pmb->vport;
1215 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1216 MAILBOX_t *mb = &pmb->mb;
1218 switch (mb->mbxStatus) {
1222 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1223 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
1225 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1226 spin_lock_irq(shost->host_lock);
1227 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1228 spin_unlock_irq(shost->host_lock);
1229 vport->fc_myDID = 0;
1233 vport->num_disc_nodes = 0;
1234 /* go thru NPR list and issue ELS PLOGIs */
1235 if (vport->fc_npr_cnt)
1236 lpfc_els_disc_plogi(vport);
1238 if (!vport->num_disc_nodes) {
1239 spin_lock_irq(shost->host_lock);
1240 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1241 spin_unlock_irq(shost->host_lock);
1242 lpfc_can_disctmo(vport);
1244 vport->port_state = LPFC_VPORT_READY;
1247 mempool_free(pmb, phba->mbox_mem_pool);
1252 * This routine handles processing a Fabric REG_LOGIN mailbox
1253 * command upon completion. It is setup in the LPFC_MBOXQ
1254 * as the completion routine when the command is
1255 * handed off to the SLI layer.
1258 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1260 struct lpfc_vport *vport = pmb->vport;
1261 MAILBOX_t *mb = &pmb->mb;
1262 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1263 struct lpfc_nodelist *ndlp;
1264 struct lpfc_vport **vports;
1267 ndlp = (struct lpfc_nodelist *) pmb->context2;
1268 pmb->context1 = NULL;
1269 pmb->context2 = NULL;
1270 if (mb->mbxStatus) {
1271 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1273 mempool_free(pmb, phba->mbox_mem_pool);
1276 if (phba->fc_topology == TOPOLOGY_LOOP) {
1277 /* FLOGI failed, use loop map to make discovery list */
1278 lpfc_disc_list_loopmap(vport);
1280 /* Start discovery */
1281 lpfc_disc_start(vport);
1285 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1286 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1287 "0258 Register Fabric login error: 0x%x\n",
1292 ndlp->nlp_rpi = mb->un.varWords[0];
1293 ndlp->nlp_type |= NLP_FABRIC;
1294 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1296 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
1298 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1299 vports = lpfc_create_vport_work_array(phba);
1302 i < LPFC_MAX_VPORTS && vports[i] != NULL;
1304 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1306 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1307 lpfc_initial_fdisc(vports[i]);
1308 else if (phba->sli3_options &
1309 LPFC_SLI3_NPIV_ENABLED) {
1310 lpfc_vport_set_state(vports[i],
1311 FC_VPORT_NO_FABRIC_SUPP);
1312 lpfc_printf_vlog(vport, KERN_ERR,
1315 "Fabric support\n");
1318 lpfc_destroy_vport_work_array(vports);
1319 lpfc_do_scr_ns_plogi(phba, vport);
1322 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1324 mempool_free(pmb, phba->mbox_mem_pool);
1329 * This routine handles processing a NameServer REG_LOGIN mailbox
1330 * command upon completion. It is setup in the LPFC_MBOXQ
1331 * as the completion routine when the command is
1332 * handed off to the SLI layer.
1335 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1337 MAILBOX_t *mb = &pmb->mb;
1338 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1339 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1340 struct lpfc_vport *vport = pmb->vport;
1342 if (mb->mbxStatus) {
1345 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1347 mempool_free(pmb, phba->mbox_mem_pool);
1348 lpfc_drop_node(vport, ndlp);
1350 if (phba->fc_topology == TOPOLOGY_LOOP) {
1352 * RegLogin failed, use loop map to make discovery
1355 lpfc_disc_list_loopmap(vport);
1357 /* Start discovery */
1358 lpfc_disc_start(vport);
1361 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1362 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1363 "0260 Register NameServer error: 0x%x\n",
1368 pmb->context1 = NULL;
1370 ndlp->nlp_rpi = mb->un.varWords[0];
1371 ndlp->nlp_type |= NLP_FABRIC;
1372 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1374 if (vport->port_state < LPFC_VPORT_READY) {
1375 /* Link up discovery requires Fabric registration. */
1376 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1377 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1378 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1379 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1380 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1382 /* Issue SCR just before NameServer GID_FT Query */
1383 lpfc_issue_els_scr(vport, SCR_DID, 0);
1386 vport->fc_ns_retry = 0;
1387 /* Good status, issue CT Request to NameServer */
1388 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1389 /* Cannot issue NameServer Query, so finish up discovery */
1394 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1396 mempool_free(pmb, phba->mbox_mem_pool);
1402 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1404 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1405 struct fc_rport *rport;
1406 struct lpfc_rport_data *rdata;
1407 struct fc_rport_identifiers rport_ids;
1408 struct lpfc_hba *phba = vport->phba;
1410 /* Remote port has reappeared. Re-register w/ FC transport */
1411 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1412 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1413 rport_ids.port_id = ndlp->nlp_DID;
1414 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1417 * We leave our node pointer in rport->dd_data when we unregister a
1418 * FCP target port. But fc_remote_port_add zeros the space to which
1419 * rport->dd_data points. So, if we're reusing a previously
1420 * registered port, drop the reference that we took the last time we
1421 * registered the port.
1423 if (ndlp->rport && ndlp->rport->dd_data &&
1424 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1428 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1429 "rport add: did:x%x flg:x%x type x%x",
1430 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1432 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
1433 if (!rport || !get_device(&rport->dev)) {
1434 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1435 "Warning: fc_remote_port_add failed\n");
1439 /* initialize static port data */
1440 rport->maxframe_size = ndlp->nlp_maxframe;
1441 rport->supported_classes = ndlp->nlp_class_sup;
1442 rdata = rport->dd_data;
1443 rdata->pnode = lpfc_nlp_get(ndlp);
1445 if (ndlp->nlp_type & NLP_FCP_TARGET)
1446 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1447 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1448 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1451 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1452 fc_remote_port_rolechg(rport, rport_ids.roles);
1454 if ((rport->scsi_target_id != -1) &&
1455 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1456 ndlp->nlp_sid = rport->scsi_target_id;
1462 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1464 struct fc_rport *rport = ndlp->rport;
1466 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1467 "rport delete: did:x%x flg:x%x type x%x",
1468 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1470 fc_remote_port_delete(rport);
1476 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
1478 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1480 spin_lock_irq(shost->host_lock);
1482 case NLP_STE_UNUSED_NODE:
1483 vport->fc_unused_cnt += count;
1485 case NLP_STE_PLOGI_ISSUE:
1486 vport->fc_plogi_cnt += count;
1488 case NLP_STE_ADISC_ISSUE:
1489 vport->fc_adisc_cnt += count;
1491 case NLP_STE_REG_LOGIN_ISSUE:
1492 vport->fc_reglogin_cnt += count;
1494 case NLP_STE_PRLI_ISSUE:
1495 vport->fc_prli_cnt += count;
1497 case NLP_STE_UNMAPPED_NODE:
1498 vport->fc_unmap_cnt += count;
1500 case NLP_STE_MAPPED_NODE:
1501 vport->fc_map_cnt += count;
1503 case NLP_STE_NPR_NODE:
1504 vport->fc_npr_cnt += count;
1507 spin_unlock_irq(shost->host_lock);
1511 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1512 int old_state, int new_state)
1514 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1516 if (new_state == NLP_STE_UNMAPPED_NODE) {
1517 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1518 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1519 ndlp->nlp_type |= NLP_FC_NODE;
1521 if (new_state == NLP_STE_MAPPED_NODE)
1522 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1523 if (new_state == NLP_STE_NPR_NODE)
1524 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1526 /* Transport interface */
1527 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1528 old_state == NLP_STE_UNMAPPED_NODE)) {
1529 vport->phba->nport_event_cnt++;
1530 lpfc_unregister_remote_port(ndlp);
1533 if (new_state == NLP_STE_MAPPED_NODE ||
1534 new_state == NLP_STE_UNMAPPED_NODE) {
1535 vport->phba->nport_event_cnt++;
1537 * Tell the fc transport about the port, if we haven't
1538 * already. If we have, and it's a scsi entity, be
1539 * sure to unblock any attached scsi devices
1541 lpfc_register_remote_port(vport, ndlp);
1544 * if we added to Mapped list, but the remote port
1545 * registration failed or assigned a target id outside
1546 * our presentable range - move the node to the
1549 if (new_state == NLP_STE_MAPPED_NODE &&
1551 ndlp->rport->scsi_target_id == -1 ||
1552 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1553 spin_lock_irq(shost->host_lock);
1554 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1555 spin_unlock_irq(shost->host_lock);
1556 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1561 lpfc_nlp_state_name(char *buffer, size_t size, int state)
1563 static char *states[] = {
1564 [NLP_STE_UNUSED_NODE] = "UNUSED",
1565 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1566 [NLP_STE_ADISC_ISSUE] = "ADISC",
1567 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1568 [NLP_STE_PRLI_ISSUE] = "PRLI",
1569 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1570 [NLP_STE_MAPPED_NODE] = "MAPPED",
1571 [NLP_STE_NPR_NODE] = "NPR",
1574 if (state < NLP_STE_MAX_STATE && states[state])
1575 strlcpy(buffer, states[state], size);
1577 snprintf(buffer, size, "unknown (%d)", state);
1582 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1585 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1586 int old_state = ndlp->nlp_state;
1587 char name1[16], name2[16];
1589 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1590 "0904 NPort state transition x%06x, %s -> %s\n",
1592 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1593 lpfc_nlp_state_name(name2, sizeof(name2), state));
1595 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1596 "node statechg did:x%x old:%d ste:%d",
1597 ndlp->nlp_DID, old_state, state);
1599 if (old_state == NLP_STE_NPR_NODE &&
1600 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1601 state != NLP_STE_NPR_NODE)
1602 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1603 if (old_state == NLP_STE_UNMAPPED_NODE) {
1604 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1605 ndlp->nlp_type &= ~NLP_FC_NODE;
1608 if ((old_state == NLP_STE_UNUSED_NODE) &&
1609 (state != NLP_STE_UNUSED_NODE) &&
1610 (ndlp->nlp_flag & NLP_DELAYED_RM)) {
1611 /* We are using the ndlp after all, so reverse
1612 * the delayed removal of it.
1614 ndlp->nlp_flag &= ~NLP_DELAYED_RM;
1618 if (list_empty(&ndlp->nlp_listp)) {
1619 spin_lock_irq(shost->host_lock);
1620 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1621 spin_unlock_irq(shost->host_lock);
1622 } else if (old_state)
1623 lpfc_nlp_counters(vport, old_state, -1);
1625 ndlp->nlp_state = state;
1626 lpfc_nlp_counters(vport, state, 1);
1627 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
1631 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1633 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1635 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1636 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1637 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1638 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1639 spin_lock_irq(shost->host_lock);
1640 list_del_init(&ndlp->nlp_listp);
1641 spin_unlock_irq(shost->host_lock);
1642 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1643 NLP_STE_UNUSED_NODE);
1647 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1649 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1650 if (!(ndlp->nlp_flag & NLP_DELAYED_RM))
1656 * Start / ReStart rescue timer for Discovery / RSCN handling
1659 lpfc_set_disctmo(struct lpfc_vport *vport)
1661 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1662 struct lpfc_hba *phba = vport->phba;
1665 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
1666 /* For FAN, timeout should be greater then edtov */
1667 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1669 /* Normal discovery timeout should be > then ELS/CT timeout
1670 * FC spec states we need 3 * ratov for CT requests
1672 tmo = ((phba->fc_ratov * 3) + 3);
1676 if (!timer_pending(&vport->fc_disctmo)) {
1677 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1678 "set disc timer: tmo:x%x state:x%x flg:x%x",
1679 tmo, vport->port_state, vport->fc_flag);
1682 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1683 spin_lock_irq(shost->host_lock);
1684 vport->fc_flag |= FC_DISC_TMO;
1685 spin_unlock_irq(shost->host_lock);
1687 /* Start Discovery Timer state <hba_state> */
1688 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1689 "0247 Start Discovery Timer state x%x "
1690 "Data: x%x x%lx x%x x%x\n",
1691 vport->port_state, tmo,
1692 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1693 vport->fc_adisc_cnt);
1699 * Cancel rescue timer for Discovery / RSCN handling
1702 lpfc_can_disctmo(struct lpfc_vport *vport)
1704 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1705 unsigned long iflags;
1707 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1708 "can disc timer: state:x%x rtry:x%x flg:x%x",
1709 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1711 /* Turn off discovery timer if its running */
1712 if (vport->fc_flag & FC_DISC_TMO) {
1713 spin_lock_irqsave(shost->host_lock, iflags);
1714 vport->fc_flag &= ~FC_DISC_TMO;
1715 spin_unlock_irqrestore(shost->host_lock, iflags);
1716 del_timer_sync(&vport->fc_disctmo);
1717 spin_lock_irqsave(&vport->work_port_lock, iflags);
1718 vport->work_port_events &= ~WORKER_DISC_TMO;
1719 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
1722 /* Cancel Discovery Timer state <hba_state> */
1723 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1724 "0248 Cancel Discovery Timer state x%x "
1725 "Data: x%x x%x x%x\n",
1726 vport->port_state, vport->fc_flag,
1727 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
1732 * Check specified ring for outstanding IOCB on the SLI queue
1733 * Return true if iocb matches the specified nport
1736 lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1737 struct lpfc_sli_ring *pring,
1738 struct lpfc_iocbq *iocb,
1739 struct lpfc_nodelist *ndlp)
1741 struct lpfc_sli *psli = &phba->sli;
1742 IOCB_t *icmd = &iocb->iocb;
1743 struct lpfc_vport *vport = ndlp->vport;
1745 if (iocb->vport != vport)
1748 if (pring->ringno == LPFC_ELS_RING) {
1749 switch (icmd->ulpCommand) {
1750 case CMD_GEN_REQUEST64_CR:
1751 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1753 case CMD_ELS_REQUEST64_CR:
1754 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1756 case CMD_XMIT_ELS_RSP64_CX:
1757 if (iocb->context1 == (uint8_t *) ndlp)
1760 } else if (pring->ringno == psli->extra_ring) {
1762 } else if (pring->ringno == psli->fcp_ring) {
1763 /* Skip match check if waiting to relogin to FCP target */
1764 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1765 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1768 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1771 } else if (pring->ringno == psli->next_ring) {
1778 * Free resources / clean up outstanding I/Os
1779 * associated with nlp_rpi in the LPFC_NODELIST entry.
1782 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1784 LIST_HEAD(completions);
1785 struct lpfc_sli *psli;
1786 struct lpfc_sli_ring *pring;
1787 struct lpfc_iocbq *iocb, *next_iocb;
1791 lpfc_fabric_abort_nport(ndlp);
1794 * Everything that matches on txcmplq will be returned
1795 * by firmware with a no rpi error.
1798 rpi = ndlp->nlp_rpi;
1800 /* Now process each ring */
1801 for (i = 0; i < psli->num_rings; i++) {
1802 pring = &psli->ring[i];
1804 spin_lock_irq(&phba->hbalock);
1805 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1808 * Check to see if iocb matches the nport we are
1811 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1813 /* It matches, so deque and call compl
1815 list_move_tail(&iocb->list,
1820 spin_unlock_irq(&phba->hbalock);
1824 while (!list_empty(&completions)) {
1825 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1826 list_del_init(&iocb->list);
1828 if (!iocb->iocb_cmpl)
1829 lpfc_sli_release_iocbq(phba, iocb);
1832 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1833 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1834 (iocb->iocb_cmpl)(phba, iocb, iocb);
1842 * Free rpi associated with LPFC_NODELIST entry.
1843 * This routine is called from lpfc_freenode(), when we are removing
1844 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1845 * LOGO that completes successfully, and we are waiting to PLOGI back
1846 * to the remote NPort. In addition, it is called after we receive
1847 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1848 * we are waiting to PLOGI back to the remote NPort.
1851 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1853 struct lpfc_hba *phba = vport->phba;
1857 if (ndlp->nlp_rpi) {
1858 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1860 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1861 mbox->vport = vport;
1862 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1863 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1864 if (rc == MBX_NOT_FINISHED)
1865 mempool_free(mbox, phba->mbox_mem_pool);
1867 lpfc_no_rpi(phba, ndlp);
1875 lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1877 struct lpfc_hba *phba = vport->phba;
1881 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1883 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1884 mbox->vport = vport;
1885 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1886 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1887 if (rc == MBX_NOT_FINISHED) {
1888 mempool_free(mbox, phba->mbox_mem_pool);
1894 lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1896 struct lpfc_hba *phba = vport->phba;
1900 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1902 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1903 mbox->vport = vport;
1904 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1905 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1906 if (rc == MBX_NOT_FINISHED) {
1907 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1908 "1815 Could not issue "
1909 "unreg_did (default rpis)\n");
1910 mempool_free(mbox, phba->mbox_mem_pool);
1916 * Free resources associated with LPFC_NODELIST entry
1917 * so it can be freed.
1920 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1922 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1923 struct lpfc_hba *phba = vport->phba;
1924 LPFC_MBOXQ_t *mb, *nextmb;
1925 struct lpfc_dmabuf *mp;
1927 /* Cleanup node for NPort <nlp_DID> */
1928 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1929 "0900 Cleanup node for NPort x%x "
1930 "Data: x%x x%x x%x\n",
1931 ndlp->nlp_DID, ndlp->nlp_flag,
1932 ndlp->nlp_state, ndlp->nlp_rpi);
1933 lpfc_dequeue_node(vport, ndlp);
1935 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1936 if ((mb = phba->sli.mbox_active)) {
1937 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1938 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1939 mb->context2 = NULL;
1940 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1944 spin_lock_irq(&phba->hbalock);
1945 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1946 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1947 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1948 mp = (struct lpfc_dmabuf *) (mb->context1);
1950 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1953 list_del(&mb->list);
1954 mempool_free(mb, phba->mbox_mem_pool);
1958 spin_unlock_irq(&phba->hbalock);
1960 lpfc_els_abort(phba,ndlp);
1961 spin_lock_irq(shost->host_lock);
1962 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1963 spin_unlock_irq(shost->host_lock);
1965 ndlp->nlp_last_elscmd = 0;
1966 del_timer_sync(&ndlp->nlp_delayfunc);
1968 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1969 list_del_init(&ndlp->els_retry_evt.evt_listp);
1970 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
1971 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1973 lpfc_unreg_rpi(vport, ndlp);
1979 * Check to see if we can free the nlp back to the freelist.
1980 * If we are in the middle of using the nlp in the discovery state
1981 * machine, defer the free till we reach the end of the state machine.
1984 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1986 struct lpfc_rport_data *rdata;
1988 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1989 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1992 lpfc_cleanup_node(vport, ndlp);
1995 * We can get here with a non-NULL ndlp->rport because when we
1996 * unregister a rport we don't break the rport/node linkage. So if we
1997 * do, make sure we don't leaving any dangling pointers behind.
2000 rdata = ndlp->rport->dd_data;
2001 rdata->pnode = NULL;
2007 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2010 D_ID mydid, ndlpdid, matchdid;
2012 if (did == Bcast_DID)
2015 if (ndlp->nlp_DID == 0) {
2019 /* First check for Direct match */
2020 if (ndlp->nlp_DID == did)
2023 /* Next check for area/domain identically equals 0 match */
2024 mydid.un.word = vport->fc_myDID;
2025 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2029 matchdid.un.word = did;
2030 ndlpdid.un.word = ndlp->nlp_DID;
2031 if (matchdid.un.b.id == ndlpdid.un.b.id) {
2032 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
2033 (mydid.un.b.area == matchdid.un.b.area)) {
2034 if ((ndlpdid.un.b.domain == 0) &&
2035 (ndlpdid.un.b.area == 0)) {
2036 if (ndlpdid.un.b.id)
2042 matchdid.un.word = ndlp->nlp_DID;
2043 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
2044 (mydid.un.b.area == ndlpdid.un.b.area)) {
2045 if ((matchdid.un.b.domain == 0) &&
2046 (matchdid.un.b.area == 0)) {
2047 if (matchdid.un.b.id)
2055 /* Search for a nodelist entry */
2056 static struct lpfc_nodelist *
2057 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2059 struct lpfc_nodelist *ndlp;
2062 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2063 if (lpfc_matchdid(vport, ndlp, did)) {
2064 data1 = (((uint32_t) ndlp->nlp_state << 24) |
2065 ((uint32_t) ndlp->nlp_xri << 16) |
2066 ((uint32_t) ndlp->nlp_type << 8) |
2067 ((uint32_t) ndlp->nlp_rpi & 0xff));
2068 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2069 "0929 FIND node DID "
2070 "Data: x%p x%x x%x x%x\n",
2071 ndlp, ndlp->nlp_DID,
2072 ndlp->nlp_flag, data1);
2077 /* FIND node did <did> NOT FOUND */
2078 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2079 "0932 FIND node did x%x NOT FOUND.\n", did);
2083 struct lpfc_nodelist *
2084 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2086 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2087 struct lpfc_nodelist *ndlp;
2089 spin_lock_irq(shost->host_lock);
2090 ndlp = __lpfc_findnode_did(vport, did);
2091 spin_unlock_irq(shost->host_lock);
2095 struct lpfc_nodelist *
2096 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2098 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2099 struct lpfc_nodelist *ndlp;
2101 ndlp = lpfc_findnode_did(vport, did);
2103 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
2104 lpfc_rscn_payload_check(vport, did) == 0)
2106 ndlp = (struct lpfc_nodelist *)
2107 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
2110 lpfc_nlp_init(vport, ndlp, did);
2111 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2112 spin_lock_irq(shost->host_lock);
2113 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2114 spin_unlock_irq(shost->host_lock);
2117 if (vport->fc_flag & FC_RSCN_MODE) {
2118 if (lpfc_rscn_payload_check(vport, did)) {
2119 spin_lock_irq(shost->host_lock);
2120 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2121 spin_unlock_irq(shost->host_lock);
2123 /* Since this node is marked for discovery,
2124 * delay timeout is not needed.
2126 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2127 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2131 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
2132 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
2134 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2135 spin_lock_irq(shost->host_lock);
2136 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2137 spin_unlock_irq(shost->host_lock);
2142 /* Build a list of nodes to discover based on the loopmap */
2144 lpfc_disc_list_loopmap(struct lpfc_vport *vport)
2146 struct lpfc_hba *phba = vport->phba;
2148 uint32_t alpa, index;
2150 if (!lpfc_is_link_up(phba))
2153 if (phba->fc_topology != TOPOLOGY_LOOP)
2156 /* Check for loop map present or not */
2157 if (phba->alpa_map[0]) {
2158 for (j = 1; j <= phba->alpa_map[0]; j++) {
2159 alpa = phba->alpa_map[j];
2160 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
2162 lpfc_setup_disc_node(vport, alpa);
2165 /* No alpamap, so try all alpa's */
2166 for (j = 0; j < FC_MAXLOOP; j++) {
2167 /* If cfg_scan_down is set, start from highest
2168 * ALPA (0xef) to lowest (0x1).
2170 if (vport->cfg_scan_down)
2173 index = FC_MAXLOOP - j - 1;
2174 alpa = lpfcAlpaArray[index];
2175 if ((vport->fc_myDID & 0xff) == alpa)
2177 lpfc_setup_disc_node(vport, alpa);
2184 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2187 struct lpfc_sli *psli = &phba->sli;
2188 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2189 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
2190 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
2194 * if it's not a physical port or if we already send
2195 * clear_la then don't send it.
2197 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2198 (vport->port_type != LPFC_PHYSICAL_PORT))
2201 /* Link up discovery */
2202 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2203 phba->link_state = LPFC_CLEAR_LA;
2204 lpfc_clear_la(phba, mbox);
2205 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2206 mbox->vport = vport;
2207 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2208 if (rc == MBX_NOT_FINISHED) {
2209 mempool_free(mbox, phba->mbox_mem_pool);
2210 lpfc_disc_flush_list(vport);
2211 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2212 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2213 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2214 phba->link_state = LPFC_HBA_ERROR;
2219 /* Reg_vpi to tell firmware to resume normal operations */
2221 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2223 LPFC_MBOXQ_t *regvpimbox;
2225 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2227 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2228 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2229 regvpimbox->vport = vport;
2230 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
2231 == MBX_NOT_FINISHED) {
2232 mempool_free(regvpimbox, phba->mbox_mem_pool);
2237 /* Start Link up / RSCN discovery on NPR nodes */
2239 lpfc_disc_start(struct lpfc_vport *vport)
2241 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2242 struct lpfc_hba *phba = vport->phba;
2244 uint32_t clear_la_pending;
2247 if (!lpfc_is_link_up(phba))
2250 if (phba->link_state == LPFC_CLEAR_LA)
2251 clear_la_pending = 1;
2253 clear_la_pending = 0;
2255 if (vport->port_state < LPFC_VPORT_READY)
2256 vport->port_state = LPFC_DISC_AUTH;
2258 lpfc_set_disctmo(vport);
2260 if (vport->fc_prevDID == vport->fc_myDID)
2265 vport->fc_prevDID = vport->fc_myDID;
2266 vport->num_disc_nodes = 0;
2268 /* Start Discovery state <hba_state> */
2269 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2270 "0202 Start Discovery hba state x%x "
2271 "Data: x%x x%x x%x\n",
2272 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
2273 vport->fc_adisc_cnt);
2275 /* First do ADISCs - if any */
2276 num_sent = lpfc_els_disc_adisc(vport);
2282 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2283 * continue discovery.
2285 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2286 !(vport->fc_flag & FC_RSCN_MODE)) {
2287 lpfc_issue_reg_vpi(phba, vport);
2292 * For SLI2, we need to set port_state to READY and continue
2295 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
2296 /* If we get here, there is nothing to ADISC */
2297 if (vport->port_type == LPFC_PHYSICAL_PORT)
2298 lpfc_issue_clear_la(phba, vport);
2300 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2301 vport->num_disc_nodes = 0;
2302 /* go thru NPR nodes and issue ELS PLOGIs */
2303 if (vport->fc_npr_cnt)
2304 lpfc_els_disc_plogi(vport);
2306 if (!vport->num_disc_nodes) {
2307 spin_lock_irq(shost->host_lock);
2308 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2309 spin_unlock_irq(shost->host_lock);
2310 lpfc_can_disctmo(vport);
2313 vport->port_state = LPFC_VPORT_READY;
2315 /* Next do PLOGIs - if any */
2316 num_sent = lpfc_els_disc_plogi(vport);
2321 if (vport->fc_flag & FC_RSCN_MODE) {
2322 /* Check to see if more RSCNs came in while we
2323 * were processing this one.
2325 if ((vport->fc_rscn_id_cnt == 0) &&
2326 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
2327 spin_lock_irq(shost->host_lock);
2328 vport->fc_flag &= ~FC_RSCN_MODE;
2329 spin_unlock_irq(shost->host_lock);
2330 lpfc_can_disctmo(vport);
2332 lpfc_els_handle_rscn(vport);
2339 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2340 * ring the match the sppecified nodelist.
2343 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2345 LIST_HEAD(completions);
2346 struct lpfc_sli *psli;
2348 struct lpfc_iocbq *iocb, *next_iocb;
2349 struct lpfc_sli_ring *pring;
2352 pring = &psli->ring[LPFC_ELS_RING];
2354 /* Error matching iocb on txq or txcmplq
2355 * First check the txq.
2357 spin_lock_irq(&phba->hbalock);
2358 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2359 if (iocb->context1 != ndlp) {
2363 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2364 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2366 list_move_tail(&iocb->list, &completions);
2371 /* Next check the txcmplq */
2372 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2373 if (iocb->context1 != ndlp) {
2377 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2378 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2379 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2382 spin_unlock_irq(&phba->hbalock);
2384 while (!list_empty(&completions)) {
2385 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2386 list_del_init(&iocb->list);
2388 if (!iocb->iocb_cmpl)
2389 lpfc_sli_release_iocbq(phba, iocb);
2392 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2393 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2394 (iocb->iocb_cmpl) (phba, iocb, iocb);
2400 lpfc_disc_flush_list(struct lpfc_vport *vport)
2402 struct lpfc_nodelist *ndlp, *next_ndlp;
2403 struct lpfc_hba *phba = vport->phba;
2405 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2406 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2408 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2409 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2410 lpfc_free_tx(phba, ndlp);
2417 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2419 lpfc_els_flush_rscn(vport);
2420 lpfc_els_flush_cmd(vport);
2421 lpfc_disc_flush_list(vport);
2424 /*****************************************************************************/
2426 * NAME: lpfc_disc_timeout
2428 * FUNCTION: Fibre Channel driver discovery timeout routine.
2430 * EXECUTION ENVIRONMENT: interrupt only
2438 /*****************************************************************************/
2440 lpfc_disc_timeout(unsigned long ptr)
2442 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2443 struct lpfc_hba *phba = vport->phba;
2444 unsigned long flags = 0;
2446 if (unlikely(!phba))
2449 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
2450 spin_lock_irqsave(&vport->work_port_lock, flags);
2451 vport->work_port_events |= WORKER_DISC_TMO;
2452 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2454 spin_lock_irqsave(&phba->hbalock, flags);
2455 if (phba->work_wait)
2456 lpfc_worker_wake_up(phba);
2457 spin_unlock_irqrestore(&phba->hbalock, flags);
2463 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2465 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2466 struct lpfc_hba *phba = vport->phba;
2467 struct lpfc_sli *psli = &phba->sli;
2468 struct lpfc_nodelist *ndlp, *next_ndlp;
2469 LPFC_MBOXQ_t *initlinkmbox;
2470 int rc, clrlaerr = 0;
2472 if (!(vport->fc_flag & FC_DISC_TMO))
2475 spin_lock_irq(shost->host_lock);
2476 vport->fc_flag &= ~FC_DISC_TMO;
2477 spin_unlock_irq(shost->host_lock);
2479 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2480 "disc timeout: state:x%x rtry:x%x flg:x%x",
2481 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2483 switch (vport->port_state) {
2485 case LPFC_LOCAL_CFG_LINK:
2486 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2490 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
2491 "0221 FAN timeout\n");
2492 /* Start discovery by sending FLOGI, clean up old rpis */
2493 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2495 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2497 if (ndlp->nlp_type & NLP_FABRIC) {
2498 /* Clean up the ndlp on Fabric connections */
2499 lpfc_drop_node(vport, ndlp);
2500 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2501 /* Fail outstanding IO now since device
2502 * is marked for PLOGI.
2504 lpfc_unreg_rpi(vport, ndlp);
2507 if (vport->port_state != LPFC_FLOGI) {
2508 lpfc_initial_flogi(vport);
2514 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2515 /* Initial FLOGI timeout */
2516 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2517 "0222 Initial %s timeout\n",
2518 vport->vpi ? "FLOGI" : "FDISC");
2520 /* Assume no Fabric and go on with discovery.
2521 * Check for outstanding ELS FLOGI to abort.
2524 /* FLOGI failed, so just use loop map to make discovery list */
2525 lpfc_disc_list_loopmap(vport);
2527 /* Start discovery */
2528 lpfc_disc_start(vport);
2531 case LPFC_FABRIC_CFG_LINK:
2532 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2534 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2535 "0223 Timeout while waiting for "
2536 "NameServer login\n");
2537 /* Next look for NameServer ndlp */
2538 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2541 /* Start discovery */
2542 lpfc_disc_start(vport);
2546 /* Check for wait for NameServer Rsp timeout */
2547 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2548 "0224 NameServer Query timeout "
2550 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2552 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2553 /* Try it one more time */
2554 vport->fc_ns_retry++;
2555 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2556 vport->fc_ns_retry, 0);
2560 vport->fc_ns_retry = 0;
2563 * Discovery is over.
2564 * set port_state to PORT_READY if SLI2.
2565 * cmpl_reg_vpi will set port_state to READY for SLI3.
2567 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2568 lpfc_issue_reg_vpi(phba, vport);
2569 else { /* NPIV Not enabled */
2570 lpfc_issue_clear_la(phba, vport);
2571 vport->port_state = LPFC_VPORT_READY;
2574 /* Setup and issue mailbox INITIALIZE LINK command */
2575 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2576 if (!initlinkmbox) {
2577 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2578 "0206 Device Discovery "
2579 "completion error\n");
2580 phba->link_state = LPFC_HBA_ERROR;
2584 lpfc_linkdown(phba);
2585 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2586 phba->cfg_link_speed);
2587 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2588 initlinkmbox->vport = vport;
2589 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2590 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
2591 lpfc_set_loopback_flag(phba);
2592 if (rc == MBX_NOT_FINISHED)
2593 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2597 case LPFC_DISC_AUTH:
2598 /* Node Authentication timeout */
2599 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2600 "0227 Node Authentication timeout\n");
2601 lpfc_disc_flush_list(vport);
2604 * set port_state to PORT_READY if SLI2.
2605 * cmpl_reg_vpi will set port_state to READY for SLI3.
2607 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2608 lpfc_issue_reg_vpi(phba, vport);
2609 else { /* NPIV Not enabled */
2610 lpfc_issue_clear_la(phba, vport);
2611 vport->port_state = LPFC_VPORT_READY;
2615 case LPFC_VPORT_READY:
2616 if (vport->fc_flag & FC_RSCN_MODE) {
2617 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2618 "0231 RSCN timeout Data: x%x "
2620 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2622 /* Cleanup any outstanding ELS commands */
2623 lpfc_els_flush_cmd(vport);
2625 lpfc_els_flush_rscn(vport);
2626 lpfc_disc_flush_list(vport);
2631 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2632 "0229 Unexpected discovery timeout, "
2633 "vport State x%x\n", vport->port_state);
2637 switch (phba->link_state) {
2639 /* CLEAR LA timeout */
2640 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2641 "0228 CLEAR LA timeout\n");
2645 case LPFC_LINK_UNKNOWN:
2646 case LPFC_WARM_START:
2647 case LPFC_INIT_START:
2648 case LPFC_INIT_MBX_CMDS:
2649 case LPFC_LINK_DOWN:
2651 case LPFC_HBA_ERROR:
2652 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2653 "0230 Unexpected timeout, hba link "
2654 "state x%x\n", phba->link_state);
2658 case LPFC_HBA_READY:
2663 lpfc_disc_flush_list(vport);
2664 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2665 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2666 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2667 vport->port_state = LPFC_VPORT_READY;
2674 * This routine handles processing a NameServer REG_LOGIN mailbox
2675 * command upon completion. It is setup in the LPFC_MBOXQ
2676 * as the completion routine when the command is
2677 * handed off to the SLI layer.
2680 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2682 MAILBOX_t *mb = &pmb->mb;
2683 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2684 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2685 struct lpfc_vport *vport = pmb->vport;
2687 pmb->context1 = NULL;
2689 ndlp->nlp_rpi = mb->un.varWords[0];
2690 ndlp->nlp_type |= NLP_FABRIC;
2691 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2694 * Start issuing Fabric-Device Management Interface (FDMI) command to
2695 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2696 * fdmi-on=2 (supporting RPA/hostnmae)
2699 if (vport->cfg_fdmi_on == 1)
2700 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2702 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2704 /* Mailbox took a reference to the node */
2706 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2708 mempool_free(pmb, phba->mbox_mem_pool);
2714 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2716 uint16_t *rpi = param;
2718 return ndlp->nlp_rpi == *rpi;
2722 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2724 return memcmp(&ndlp->nlp_portname, param,
2725 sizeof(ndlp->nlp_portname)) == 0;
2728 struct lpfc_nodelist *
2729 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2731 struct lpfc_nodelist *ndlp;
2733 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2734 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
2735 filter(ndlp, param))
2742 * Search node lists for a remote port matching filter criteria
2743 * Caller needs to hold host_lock before calling this routine.
2745 struct lpfc_nodelist *
2746 lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2748 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2749 struct lpfc_nodelist *ndlp;
2751 spin_lock_irq(shost->host_lock);
2752 ndlp = __lpfc_find_node(vport, filter, param);
2753 spin_unlock_irq(shost->host_lock);
2758 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2759 * returns the node list element pointer else return NULL.
2761 struct lpfc_nodelist *
2762 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2764 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2767 struct lpfc_nodelist *
2768 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2770 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2771 struct lpfc_nodelist *ndlp;
2773 spin_lock_irq(shost->host_lock);
2774 ndlp = __lpfc_findnode_rpi(vport, rpi);
2775 spin_unlock_irq(shost->host_lock);
2780 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2781 * returns the node element list pointer else return NULL.
2783 struct lpfc_nodelist *
2784 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2786 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2787 struct lpfc_nodelist *ndlp;
2789 spin_lock_irq(shost->host_lock);
2790 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2791 spin_unlock_irq(shost->host_lock);
2796 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2799 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2800 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2801 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2802 init_timer(&ndlp->nlp_delayfunc);
2803 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2804 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2805 ndlp->nlp_DID = did;
2806 ndlp->vport = vport;
2807 ndlp->nlp_sid = NLP_NO_SID;
2808 INIT_LIST_HEAD(&ndlp->nlp_listp);
2809 kref_init(&ndlp->kref);
2811 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2812 "node init: did:x%x",
2813 ndlp->nlp_DID, 0, 0);
2818 /* This routine releases all resources associated with a specifc NPort's ndlp
2819 * and mempool_free's the nodelist.
2822 lpfc_nlp_release(struct kref *kref)
2824 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2827 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2828 "node release: did:x%x flg:x%x type:x%x",
2829 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2831 lpfc_nlp_remove(ndlp->vport, ndlp);
2832 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2835 /* This routine bumps the reference count for a ndlp structure to ensure
2836 * that one discovery thread won't free a ndlp while another discovery thread
2839 struct lpfc_nodelist *
2840 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2843 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2844 "node get: did:x%x flg:x%x refcnt:x%x",
2845 ndlp->nlp_DID, ndlp->nlp_flag,
2846 atomic_read(&ndlp->kref.refcount));
2847 kref_get(&ndlp->kref);
2853 /* This routine decrements the reference count for a ndlp structure. If the
2854 * count goes to 0, this indicates the the associated nodelist should be freed.
2857 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2860 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2861 "node put: did:x%x flg:x%x refcnt:x%x",
2862 ndlp->nlp_DID, ndlp->nlp_flag,
2863 atomic_read(&ndlp->kref.refcount));
2865 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2868 /* This routine free's the specified nodelist if it is not in use
2869 * by any other discovery thread. This routine returns 1 if the ndlp
2870 * is not being used by anyone and has been freed. A return value of
2871 * 0 indicates it is being used by another discovery thread and the
2872 * refcount is left unchanged.
2875 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
2877 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2878 "node not used: did:x%x flg:x%x refcnt:x%x",
2879 ndlp->nlp_DID, ndlp->nlp_flag,
2880 atomic_read(&ndlp->kref.refcount));
2882 if (atomic_read(&ndlp->kref.refcount) == 1) {