1 /******************************************************************************
2 * QLOGIC LINUX SOFTWARE
4 * QLogic ISP2x00 device driver for Linux 2.6.x
5 * Copyright (C) 2003-2005 QLogic Corporation
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 ******************************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
25 #include <scsi/scsi_tcq.h>
27 static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
28 static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
29 static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
30 static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
33 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
36 * Returns the proper CF_* direction based on CDB.
38 static inline uint16_t
39 qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
45 /* Set transfer direction */
46 if (cmd->sc_data_direction == DMA_TO_DEVICE)
48 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
54 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
55 * Continuation Type 0 IOCBs to allocate.
57 * @dsds: number of data segment decriptors needed
59 * Returns the number of IOCB entries needed to store @dsds.
62 qla2x00_calc_iocbs_32(uint16_t dsds)
68 iocbs += (dsds - 3) / 7;
76 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
77 * Continuation Type 1 IOCBs to allocate.
79 * @dsds: number of data segment decriptors needed
81 * Returns the number of IOCB entries needed to store @dsds.
84 qla2x00_calc_iocbs_64(uint16_t dsds)
90 iocbs += (dsds - 2) / 5;
98 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
101 * Returns a pointer to the Continuation Type 0 IOCB packet.
103 static inline cont_entry_t *
104 qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
106 cont_entry_t *cont_pkt;
108 /* Adjust ring index. */
109 ha->req_ring_index++;
110 if (ha->req_ring_index == ha->request_q_length) {
111 ha->req_ring_index = 0;
112 ha->request_ring_ptr = ha->request_ring;
114 ha->request_ring_ptr++;
117 cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
119 /* Load packet defaults. */
120 *((uint32_t *)(&cont_pkt->entry_type)) =
121 __constant_cpu_to_le32(CONTINUE_TYPE);
127 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
130 * Returns a pointer to the continuation type 1 IOCB packet.
132 static inline cont_a64_entry_t *
133 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
135 cont_a64_entry_t *cont_pkt;
137 /* Adjust ring index. */
138 ha->req_ring_index++;
139 if (ha->req_ring_index == ha->request_q_length) {
140 ha->req_ring_index = 0;
141 ha->request_ring_ptr = ha->request_ring;
143 ha->request_ring_ptr++;
146 cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
148 /* Load packet defaults. */
149 *((uint32_t *)(&cont_pkt->entry_type)) =
150 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
156 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
157 * capable IOCB types.
159 * @sp: SRB command to process
160 * @cmd_pkt: Command type 2 IOCB
161 * @tot_dsds: Total number of segments to transfer
163 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
169 struct scsi_cmnd *cmd;
173 /* Update entry type to indicate Command Type 2 IOCB */
174 *((uint32_t *)(&cmd_pkt->entry_type)) =
175 __constant_cpu_to_le32(COMMAND_TYPE);
177 /* No data transfer */
178 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
179 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
185 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
187 /* Three DSDs are available in the Command Type 2 IOCB */
189 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
191 /* Load data segments */
192 if (cmd->use_sg != 0) {
193 struct scatterlist *cur_seg;
194 struct scatterlist *end_seg;
196 cur_seg = (struct scatterlist *)cmd->request_buffer;
197 end_seg = cur_seg + tot_dsds;
198 while (cur_seg < end_seg) {
199 cont_entry_t *cont_pkt;
201 /* Allocate additional continuation packets? */
202 if (avail_dsds == 0) {
204 * Seven DSDs are available in the Continuation
207 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
208 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
212 *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
213 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
219 *cur_dsd++ = cpu_to_le32(sp->dma_handle);
220 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
225 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
226 * capable IOCB types.
228 * @sp: SRB command to process
229 * @cmd_pkt: Command type 3 IOCB
230 * @tot_dsds: Total number of segments to transfer
232 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
238 struct scsi_cmnd *cmd;
242 /* Update entry type to indicate Command Type 3 IOCB */
243 *((uint32_t *)(&cmd_pkt->entry_type)) =
244 __constant_cpu_to_le32(COMMAND_A64_TYPE);
246 /* No data transfer */
247 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
248 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
254 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
256 /* Two DSDs are available in the Command Type 3 IOCB */
258 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
260 /* Load data segments */
261 if (cmd->use_sg != 0) {
262 struct scatterlist *cur_seg;
263 struct scatterlist *end_seg;
265 cur_seg = (struct scatterlist *)cmd->request_buffer;
266 end_seg = cur_seg + tot_dsds;
267 while (cur_seg < end_seg) {
269 cont_a64_entry_t *cont_pkt;
271 /* Allocate additional continuation packets? */
272 if (avail_dsds == 0) {
274 * Five DSDs are available in the Continuation
277 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
278 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
282 sle_dma = sg_dma_address(cur_seg);
283 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
284 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
285 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
291 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
292 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
293 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
298 * qla2x00_start_scsi() - Send a SCSI command to the ISP
299 * @sp: command to send to the ISP
301 * Returns non-zero if a failure occured, else zero.
304 qla2x00_start_scsi(srb_t *sp)
309 struct scsi_cmnd *cmd;
313 cmd_entry_t *cmd_pkt;
314 struct scatterlist *sg;
318 struct device_reg_2xxx __iomem *reg;
321 /* Setup device pointers. */
324 reg = &ha->iobase->isp;
326 /* So we know we haven't pci_map'ed anything yet */
329 /* Send marker if required */
330 if (ha->marker_needed != 0) {
331 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
332 return (QLA_FUNCTION_FAILED);
334 ha->marker_needed = 0;
337 /* Acquire ring specific lock */
338 spin_lock_irqsave(&ha->hardware_lock, flags);
340 /* Check for room in outstanding command list. */
341 handle = ha->current_outstanding_cmd;
342 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
344 if (handle == MAX_OUTSTANDING_COMMANDS)
346 if (ha->outstanding_cmds[handle] == 0)
349 if (index == MAX_OUTSTANDING_COMMANDS)
352 /* Map the sg table so we have an accurate count of sg entries needed */
354 sg = (struct scatterlist *) cmd->request_buffer;
355 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
356 cmd->sc_data_direction);
359 } else if (cmd->request_bufflen) {
362 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
363 cmd->request_bufflen, cmd->sc_data_direction);
364 if (dma_mapping_error(req_dma))
367 sp->dma_handle = req_dma;
371 /* Calculate the number of request entries needed. */
372 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
373 if (ha->req_q_cnt < (req_cnt + 2)) {
374 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
375 if (ha->req_ring_index < cnt)
376 ha->req_q_cnt = cnt - ha->req_ring_index;
378 ha->req_q_cnt = ha->request_q_length -
379 (ha->req_ring_index - cnt);
381 if (ha->req_q_cnt < (req_cnt + 2))
384 /* Build command packet */
385 ha->current_outstanding_cmd = handle;
386 ha->outstanding_cmds[handle] = sp;
388 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
389 ha->req_q_cnt -= req_cnt;
391 cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
392 cmd_pkt->handle = handle;
393 /* Zero out remaining portion of packet. */
394 clr_ptr = (uint32_t *)cmd_pkt + 2;
395 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
396 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
398 /* Set target ID and LUN number*/
399 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
400 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
402 /* Update tagged queuing modifier */
403 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
404 if (scsi_populate_tag_msg(cmd, tag)) {
407 cmd_pkt->control_flags =
408 __constant_cpu_to_le16(CF_HEAD_TAG);
410 case MSG_ORDERED_TAG:
411 cmd_pkt->control_flags =
412 __constant_cpu_to_le16(CF_ORDERED_TAG);
417 /* Load SCSI command packet. */
418 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
419 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
421 /* Build IOCB segments */
422 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
424 /* Set total data segment count. */
425 cmd_pkt->entry_count = (uint8_t)req_cnt;
428 /* Adjust ring index. */
429 ha->req_ring_index++;
430 if (ha->req_ring_index == ha->request_q_length) {
431 ha->req_ring_index = 0;
432 ha->request_ring_ptr = ha->request_ring;
434 ha->request_ring_ptr++;
436 sp->flags |= SRB_DMA_VALID;
437 sp->state = SRB_ACTIVE_STATE;
439 /* Set chip new ring index. */
440 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
441 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
443 /* Manage unprocessed RIO/ZIO commands in response queue. */
444 if (ha->flags.process_response_queue &&
445 ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
446 qla2x00_process_response_queue(ha);
448 spin_unlock_irqrestore(&ha->hardware_lock, flags);
449 return (QLA_SUCCESS);
452 if (cmd->use_sg && tot_dsds) {
453 sg = (struct scatterlist *) cmd->request_buffer;
454 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
455 cmd->sc_data_direction);
456 } else if (tot_dsds) {
457 pci_unmap_single(ha->pdev, sp->dma_handle,
458 cmd->request_bufflen, cmd->sc_data_direction);
460 spin_unlock_irqrestore(&ha->hardware_lock, flags);
462 return (QLA_FUNCTION_FAILED);
466 * qla2x00_marker() - Send a marker IOCB to the firmware.
470 * @type: marker modifier
472 * Can be called from both normal and interrupt context.
474 * Returns non-zero if a failure occured, else zero.
477 __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
481 struct mrk_entry_24xx *mrk24;
484 mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
486 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
487 __func__, ha->host_no));
489 return (QLA_FUNCTION_FAILED);
492 mrk->entry_type = MARKER_TYPE;
493 mrk->modifier = type;
494 if (type != MK_SYNC_ALL) {
495 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
496 mrk24 = (struct mrk_entry_24xx *) mrk;
497 mrk24->nport_handle = cpu_to_le16(loop_id);
498 mrk24->lun[1] = LSB(lun);
499 mrk24->lun[2] = MSB(lun);
501 SET_TARGET_ID(ha, mrk->target, loop_id);
502 mrk->lun = cpu_to_le16(lun);
509 return (QLA_SUCCESS);
513 qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
517 unsigned long flags = 0;
519 spin_lock_irqsave(&ha->hardware_lock, flags);
520 ret = __qla2x00_marker(ha, loop_id, lun, type);
521 spin_unlock_irqrestore(&ha->hardware_lock, flags);
527 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
530 * Note: The caller must hold the hardware lock before calling this routine.
532 * Returns NULL if function failed, else, a pointer to the request packet.
535 qla2x00_req_pkt(scsi_qla_host_t *ha)
537 device_reg_t __iomem *reg = ha->iobase;
538 request_t *pkt = NULL;
542 uint16_t req_cnt = 1;
544 /* Wait 1 second for slot. */
545 for (timer = HZ; timer; timer--) {
546 if ((req_cnt + 2) >= ha->req_q_cnt) {
547 /* Calculate number of free request entries. */
548 if (IS_QLA24XX(ha) || IS_QLA25XX(ha))
549 cnt = (uint16_t)RD_REG_DWORD(
550 ®->isp24.req_q_out);
552 cnt = qla2x00_debounce_register(
553 ISP_REQ_Q_OUT(ha, ®->isp));
554 if (ha->req_ring_index < cnt)
555 ha->req_q_cnt = cnt - ha->req_ring_index;
557 ha->req_q_cnt = ha->request_q_length -
558 (ha->req_ring_index - cnt);
560 /* If room for request in request ring. */
561 if ((req_cnt + 2) < ha->req_q_cnt) {
563 pkt = ha->request_ring_ptr;
565 /* Zero out packet. */
566 dword_ptr = (uint32_t *)pkt;
567 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
570 /* Set system defined field. */
571 pkt->sys_define = (uint8_t)ha->req_ring_index;
573 /* Set entry count. */
574 pkt->entry_count = 1;
579 /* Release ring specific lock */
580 spin_unlock(&ha->hardware_lock);
582 udelay(2); /* 2 us */
584 /* Check for pending interrupts. */
585 /* During init we issue marker directly */
586 if (!ha->marker_needed)
589 spin_lock_irq(&ha->hardware_lock);
592 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
599 * qla2x00_isp_cmd() - Modify the request ring pointer.
602 * Note: The caller must hold the hardware lock before calling this routine.
605 qla2x00_isp_cmd(scsi_qla_host_t *ha)
607 device_reg_t __iomem *reg = ha->iobase;
609 DEBUG5(printk("%s(): IOCB data:\n", __func__));
610 DEBUG5(qla2x00_dump_buffer(
611 (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
613 /* Adjust ring index. */
614 ha->req_ring_index++;
615 if (ha->req_ring_index == ha->request_q_length) {
616 ha->req_ring_index = 0;
617 ha->request_ring_ptr = ha->request_ring;
619 ha->request_ring_ptr++;
621 /* Set chip new ring index. */
622 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
623 WRT_REG_DWORD(®->isp24.req_q_in, ha->req_ring_index);
624 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
626 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), ha->req_ring_index);
627 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
633 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
634 * Continuation Type 1 IOCBs to allocate.
636 * @dsds: number of data segment decriptors needed
638 * Returns the number of IOCB entries needed to store @dsds.
640 static inline uint16_t
641 qla24xx_calc_iocbs(uint16_t dsds)
647 iocbs += (dsds - 1) / 5;
655 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
658 * @sp: SRB command to process
659 * @cmd_pkt: Command type 3 IOCB
660 * @tot_dsds: Total number of segments to transfer
663 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
669 struct scsi_cmnd *cmd;
673 /* Update entry type to indicate Command Type 3 IOCB */
674 *((uint32_t *)(&cmd_pkt->entry_type)) =
675 __constant_cpu_to_le32(COMMAND_TYPE_7);
677 /* No data transfer */
678 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
679 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
685 /* Set transfer direction */
686 if (cmd->sc_data_direction == DMA_TO_DEVICE)
687 cmd_pkt->task_mgmt_flags =
688 __constant_cpu_to_le16(TMF_WRITE_DATA);
689 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
690 cmd_pkt->task_mgmt_flags =
691 __constant_cpu_to_le16(TMF_READ_DATA);
693 /* One DSD is available in the Command Type 3 IOCB */
695 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
697 /* Load data segments */
698 if (cmd->use_sg != 0) {
699 struct scatterlist *cur_seg;
700 struct scatterlist *end_seg;
702 cur_seg = (struct scatterlist *)cmd->request_buffer;
703 end_seg = cur_seg + tot_dsds;
704 while (cur_seg < end_seg) {
706 cont_a64_entry_t *cont_pkt;
708 /* Allocate additional continuation packets? */
709 if (avail_dsds == 0) {
711 * Five DSDs are available in the Continuation
714 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
715 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
719 sle_dma = sg_dma_address(cur_seg);
720 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
721 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
722 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
728 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
729 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
730 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
736 * qla24xx_start_scsi() - Send a SCSI command to the ISP
737 * @sp: command to send to the ISP
739 * Returns non-zero if a failure occured, else zero.
742 qla24xx_start_scsi(srb_t *sp)
747 struct scsi_cmnd *cmd;
751 struct cmd_type_7 *cmd_pkt;
752 struct scatterlist *sg;
756 struct device_reg_24xx __iomem *reg;
759 /* Setup device pointers. */
762 reg = &ha->iobase->isp24;
764 /* So we know we haven't pci_map'ed anything yet */
767 /* Send marker if required */
768 if (ha->marker_needed != 0) {
769 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
770 return QLA_FUNCTION_FAILED;
772 ha->marker_needed = 0;
775 /* Acquire ring specific lock */
776 spin_lock_irqsave(&ha->hardware_lock, flags);
778 /* Check for room in outstanding command list. */
779 handle = ha->current_outstanding_cmd;
780 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
782 if (handle == MAX_OUTSTANDING_COMMANDS)
784 if (ha->outstanding_cmds[handle] == 0)
787 if (index == MAX_OUTSTANDING_COMMANDS)
790 /* Map the sg table so we have an accurate count of sg entries needed */
792 sg = (struct scatterlist *) cmd->request_buffer;
793 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
794 cmd->sc_data_direction);
797 } else if (cmd->request_bufflen) {
800 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
801 cmd->request_bufflen, cmd->sc_data_direction);
802 if (dma_mapping_error(req_dma))
805 sp->dma_handle = req_dma;
809 req_cnt = qla24xx_calc_iocbs(tot_dsds);
810 if (ha->req_q_cnt < (req_cnt + 2)) {
811 cnt = (uint16_t)RD_REG_DWORD_RELAXED(®->req_q_out);
812 if (ha->req_ring_index < cnt)
813 ha->req_q_cnt = cnt - ha->req_ring_index;
815 ha->req_q_cnt = ha->request_q_length -
816 (ha->req_ring_index - cnt);
818 if (ha->req_q_cnt < (req_cnt + 2))
821 /* Build command packet. */
822 ha->current_outstanding_cmd = handle;
823 ha->outstanding_cmds[handle] = sp;
825 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
826 ha->req_q_cnt -= req_cnt;
828 cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
829 cmd_pkt->handle = handle;
831 /* Zero out remaining portion of packet. */
832 clr_ptr = (uint32_t *)cmd_pkt + 2;
833 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
834 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
836 /* Set NPORT-ID and LUN number*/
837 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
838 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
839 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
840 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
842 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
844 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
845 if (scsi_populate_tag_msg(cmd, tag)) {
848 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
850 case MSG_ORDERED_TAG:
851 cmd_pkt->task = TSK_ORDERED;
856 /* Load SCSI command packet. */
857 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
858 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
860 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
862 /* Build IOCB segments */
863 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
865 /* Set total data segment count. */
866 cmd_pkt->entry_count = (uint8_t)req_cnt;
869 /* Adjust ring index. */
870 ha->req_ring_index++;
871 if (ha->req_ring_index == ha->request_q_length) {
872 ha->req_ring_index = 0;
873 ha->request_ring_ptr = ha->request_ring;
875 ha->request_ring_ptr++;
877 sp->flags |= SRB_DMA_VALID;
878 sp->state = SRB_ACTIVE_STATE;
880 /* Set chip new ring index. */
881 WRT_REG_DWORD(®->req_q_in, ha->req_ring_index);
882 RD_REG_DWORD_RELAXED(®->req_q_in); /* PCI Posting. */
884 /* Manage unprocessed RIO/ZIO commands in response queue. */
885 if (ha->flags.process_response_queue &&
886 ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
887 qla24xx_process_response_queue(ha);
889 spin_unlock_irqrestore(&ha->hardware_lock, flags);
893 if (cmd->use_sg && tot_dsds) {
894 sg = (struct scatterlist *) cmd->request_buffer;
895 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
896 cmd->sc_data_direction);
897 } else if (tot_dsds) {
898 pci_unmap_single(ha->pdev, sp->dma_handle,
899 cmd->request_bufflen, cmd->sc_data_direction);
901 spin_unlock_irqrestore(&ha->hardware_lock, flags);
903 return QLA_FUNCTION_FAILED;