2 * iSCSI Initiator over TCP/IP Data-Path
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 Mike Christie
7 * maintained by open-iscsi@googlegroups.com
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published
11 * by the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * See the file COPYING included with this distribution for more details.
28 #include <linux/types.h>
29 #include <linux/list.h>
30 #include <linux/inet.h>
31 #include <linux/blkdev.h>
32 #include <linux/crypto.h>
33 #include <linux/delay.h>
34 #include <linux/kfifo.h>
35 #include <linux/scatterlist.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_device.h>
39 #include <scsi/scsi_eh.h>
40 #include <scsi/scsi_request.h>
41 #include <scsi/scsi_tcq.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_transport_iscsi.h>
46 #include "iscsi_tcp.h"
48 MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
49 "Alex Aizman <itn780@yahoo.com>");
50 MODULE_DESCRIPTION("iSCSI/TCP data-path");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION("0:4.409");
53 /* #define DEBUG_TCP */
54 /* #define DEBUG_SCSI */
58 #define debug_tcp(fmt...) printk(KERN_DEBUG "tcp: " fmt)
60 #define debug_tcp(fmt...)
64 #define debug_scsi(fmt...) printk(KERN_DEBUG "scsi: " fmt)
66 #define debug_scsi(fmt...)
76 #define INVALID_SN_DELTA 0xffff
78 static unsigned int iscsi_max_lun = 512;
79 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
82 static kmem_cache_t *taskcache;
85 iscsi_buf_init_virt(struct iscsi_buf *ibuf, char *vbuf, int size)
87 sg_init_one(&ibuf->sg, (u8 *)vbuf, size);
92 iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
94 ibuf->sg.page = (void*)vbuf;
95 ibuf->sg.offset = (unsigned int)-1;
96 ibuf->sg.length = size;
101 iscsi_buf_iov_base(struct iscsi_buf *ibuf)
103 return (char*)ibuf->sg.page + ibuf->sent;
107 iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
110 * Fastpath: sg element fits into single page
112 if (sg->length + sg->offset <= PAGE_SIZE && page_count(sg->page) >= 2) {
113 ibuf->sg.page = sg->page;
114 ibuf->sg.offset = sg->offset;
115 ibuf->sg.length = sg->length;
117 iscsi_buf_init_iov(ibuf, page_address(sg->page), sg->length);
122 iscsi_buf_left(struct iscsi_buf *ibuf)
126 rc = ibuf->sg.length - ibuf->sent;
132 iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
135 crypto_digest_digest(conn->tx_tfm, &buf->sg, 1, crc);
136 buf->sg.length += sizeof(uint32_t);
140 iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
142 struct iscsi_session *session = conn->session;
145 spin_lock_irqsave(&session->lock, flags);
146 if (session->conn_cnt == 1 || session->leadconn == conn)
147 session->state = ISCSI_STATE_FAILED;
148 spin_unlock_irqrestore(&session->lock, flags);
149 set_bit(SUSPEND_BIT, &conn->suspend_tx);
150 set_bit(SUSPEND_BIT, &conn->suspend_rx);
151 iscsi_conn_error(iscsi_handle(conn), err);
155 iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
157 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
158 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
160 if (max_cmdsn < exp_cmdsn -1 &&
161 max_cmdsn > exp_cmdsn - INVALID_SN_DELTA)
162 return ISCSI_ERR_MAX_CMDSN;
163 if (max_cmdsn > session->max_cmdsn ||
164 max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA)
165 session->max_cmdsn = max_cmdsn;
166 if (exp_cmdsn > session->exp_cmdsn ||
167 exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA)
168 session->exp_cmdsn = exp_cmdsn;
174 iscsi_hdr_extract(struct iscsi_conn *conn)
176 struct sk_buff *skb = conn->in.skb;
178 if (conn->in.copy >= conn->hdr_size &&
179 conn->in_progress == IN_PROGRESS_WAIT_HEADER) {
181 * Zero-copy PDU Header: using connection context
182 * to store header pointer.
184 if (skb_shinfo(skb)->frag_list == NULL &&
185 !skb_shinfo(skb)->nr_frags)
186 conn->in.hdr = (struct iscsi_hdr *)
187 ((char*)skb->data + conn->in.offset);
189 /* ignoring return code since we checked
191 skb_copy_bits(skb, conn->in.offset,
192 &conn->hdr, conn->hdr_size);
193 conn->in.hdr = &conn->hdr;
195 conn->in.offset += conn->hdr_size;
196 conn->in.copy -= conn->hdr_size;
202 * PDU header scattered across SKB's,
203 * copying it... This'll happen quite rarely.
206 if (conn->in_progress == IN_PROGRESS_WAIT_HEADER)
207 conn->in.hdr_offset = 0;
209 hdr_remains = conn->hdr_size - conn->in.hdr_offset;
210 BUG_ON(hdr_remains <= 0);
212 copylen = min(conn->in.copy, hdr_remains);
213 skb_copy_bits(skb, conn->in.offset,
214 (char*)&conn->hdr + conn->in.hdr_offset, copylen);
216 debug_tcp("PDU gather offset %d bytes %d in.offset %d "
217 "in.copy %d\n", conn->in.hdr_offset, copylen,
218 conn->in.offset, conn->in.copy);
220 conn->in.offset += copylen;
221 conn->in.copy -= copylen;
222 if (copylen < hdr_remains) {
223 conn->in_progress = IN_PROGRESS_HEADER_GATHER;
224 conn->in.hdr_offset += copylen;
227 conn->in.hdr = &conn->hdr;
228 conn->discontiguous_hdr_cnt++;
229 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
236 iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
238 struct scsi_cmnd *sc = ctask->sc;
239 struct iscsi_session *session = conn->session;
241 spin_lock(&session->lock);
243 spin_unlock(&session->lock);
246 if (sc->sc_data_direction == DMA_TO_DEVICE) {
247 struct iscsi_data_task *dtask, *n;
248 /* WRITE: cleanup Data-Out's if any */
249 spin_lock(&conn->lock);
250 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
251 list_del(&dtask->item);
252 mempool_free(dtask, ctask->datapool);
254 spin_unlock(&conn->lock);
256 ctask->xmstate = XMSTATE_IDLE;
259 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
260 spin_unlock(&session->lock);
264 * iscsi_cmd_rsp - SCSI Command Response processing
265 * @conn: iscsi connection
266 * @ctask: scsi command task
269 iscsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
272 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)conn->in.hdr;
273 struct iscsi_session *session = conn->session;
274 struct scsi_cmnd *sc = ctask->sc;
276 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
278 sc->result = (DID_ERROR << 16);
282 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
284 sc->result = (DID_OK << 16) | rhdr->cmd_status;
286 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
287 sc->result = (DID_ERROR << 16);
291 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION && conn->senselen) {
292 int sensecopy = min(conn->senselen, SCSI_SENSE_BUFFERSIZE);
294 memcpy(sc->sense_buffer, conn->data + 2, sensecopy);
295 debug_scsi("copied %d bytes of sense\n", sensecopy);
298 if (sc->sc_data_direction == DMA_TO_DEVICE)
301 if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
302 int res_count = be32_to_cpu(rhdr->residual_count);
304 if (res_count > 0 && res_count <= sc->request_bufflen)
305 sc->resid = res_count;
307 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
308 } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
309 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
310 else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
311 sc->resid = be32_to_cpu(rhdr->residual_count);
314 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
315 (long)sc, sc->result, ctask->itt);
316 conn->scsirsp_pdus_cnt++;
317 iscsi_ctask_cleanup(conn, ctask);
323 * iscsi_data_rsp - SCSI Data-In Response processing
324 * @conn: iscsi connection
325 * @ctask: scsi command task
328 iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
331 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)conn->in.hdr;
332 struct iscsi_session *session = conn->session;
333 int datasn = be32_to_cpu(rhdr->datasn);
335 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
339 * setup Data-In byte counter (gets decremented..)
341 ctask->data_count = conn->in.datalen;
343 if (conn->in.datalen == 0)
346 if (ctask->datasn != datasn)
347 return ISCSI_ERR_DATASN;
351 ctask->data_offset = be32_to_cpu(rhdr->offset);
352 if (ctask->data_offset + conn->in.datalen > ctask->total_length)
353 return ISCSI_ERR_DATA_OFFSET;
355 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
356 struct scsi_cmnd *sc = ctask->sc;
358 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
359 if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
360 int res_count = be32_to_cpu(rhdr->residual_count);
363 res_count <= sc->request_bufflen) {
364 sc->resid = res_count;
365 sc->result = (DID_OK << 16) | rhdr->cmd_status;
367 sc->result = (DID_BAD_TARGET << 16) |
369 } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
370 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
371 else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW) {
372 sc->resid = be32_to_cpu(rhdr->residual_count);
373 sc->result = (DID_OK << 16) | rhdr->cmd_status;
375 sc->result = (DID_OK << 16) | rhdr->cmd_status;
378 conn->datain_pdus_cnt++;
383 * iscsi_solicit_data_init - initialize first Data-Out
384 * @conn: iscsi connection
385 * @ctask: scsi command task
389 * Initialize first Data-Out within this R2T sequence and finds
390 * proper data_offset within this SCSI command.
392 * This function is called with connection lock taken.
395 iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
396 struct iscsi_r2t_info *r2t)
398 struct iscsi_data *hdr;
399 struct iscsi_data_task *dtask;
400 struct scsi_cmnd *sc = ctask->sc;
402 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
405 memset(hdr, 0, sizeof(struct iscsi_data));
407 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
408 r2t->solicit_datasn++;
409 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
410 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
411 hdr->itt = ctask->hdr.itt;
412 hdr->exp_statsn = r2t->exp_statsn;
413 hdr->offset = cpu_to_be32(r2t->data_offset);
414 if (r2t->data_length > conn->max_xmit_dlength) {
415 hton24(hdr->dlength, conn->max_xmit_dlength);
416 r2t->data_count = conn->max_xmit_dlength;
419 hton24(hdr->dlength, r2t->data_length);
420 r2t->data_count = r2t->data_length;
421 hdr->flags = ISCSI_FLAG_CMD_FINAL;
423 conn->dataout_pdus_cnt++;
427 iscsi_buf_init_virt(&r2t->headbuf, (char*)hdr,
428 sizeof(struct iscsi_hdr));
434 struct scatterlist *sg = sc->request_buffer;
437 for (i = 0; i < sc->use_sg; i++, sg += 1) {
438 /* FIXME: prefetch ? */
439 if (sg_count + sg->length > r2t->data_offset) {
444 /* offset within this page */
445 page_offset = r2t->data_offset - sg_count;
447 /* fill in this buffer */
448 iscsi_buf_init_sg(&r2t->sendbuf, sg);
449 r2t->sendbuf.sg.offset += page_offset;
450 r2t->sendbuf.sg.length -= page_offset;
452 /* xmit logic will continue with next one */
456 sg_count += sg->length;
458 BUG_ON(r2t->sg == NULL);
460 iscsi_buf_init_iov(&ctask->sendbuf,
461 (char*)sc->request_buffer + r2t->data_offset,
464 list_add(&dtask->item, &ctask->dataqueue);
468 * iscsi_r2t_rsp - iSCSI R2T Response processing
469 * @conn: iscsi connection
470 * @ctask: scsi command task
473 iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
475 struct iscsi_r2t_info *r2t;
476 struct iscsi_session *session = conn->session;
477 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)conn->in.hdr;
478 int r2tsn = be32_to_cpu(rhdr->r2tsn);
482 return ISCSI_ERR_AHSLEN;
484 if (conn->in.datalen)
485 return ISCSI_ERR_DATALEN;
487 if (ctask->exp_r2tsn && ctask->exp_r2tsn != r2tsn)
488 return ISCSI_ERR_R2TSN;
490 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
494 /* FIXME: use R2TSN to detect missing R2T */
496 /* fill-in new R2T associated with the task */
497 spin_lock(&session->lock);
498 if (!ctask->sc || ctask->mtask ||
499 session->state != ISCSI_STATE_LOGGED_IN) {
500 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
501 "recovery...\n", ctask->itt);
502 spin_unlock(&session->lock);
505 rc = __kfifo_get(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
508 r2t->exp_statsn = rhdr->statsn;
509 r2t->data_length = be32_to_cpu(rhdr->data_length);
510 if (r2t->data_length == 0 ||
511 r2t->data_length > session->max_burst) {
512 spin_unlock(&session->lock);
513 return ISCSI_ERR_DATALEN;
516 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
517 if (r2t->data_offset + r2t->data_length > ctask->total_length) {
518 spin_unlock(&session->lock);
519 return ISCSI_ERR_DATALEN;
522 r2t->ttt = rhdr->ttt; /* no flip */
523 r2t->solicit_datasn = 0;
525 iscsi_solicit_data_init(conn, ctask, r2t);
527 ctask->exp_r2tsn = r2tsn + 1;
528 ctask->xmstate |= XMSTATE_SOL_HDR;
529 __kfifo_put(ctask->r2tqueue, (void*)&r2t, sizeof(void*));
530 __kfifo_put(conn->writequeue, (void*)&ctask, sizeof(void*));
532 schedule_work(&conn->xmitwork);
533 conn->r2t_pdus_cnt++;
534 spin_unlock(&session->lock);
540 iscsi_hdr_recv(struct iscsi_conn *conn)
543 struct iscsi_hdr *hdr;
544 struct iscsi_cmd_task *ctask;
545 struct iscsi_session *session = conn->session;
546 uint32_t cdgst, rdgst = 0;
550 /* verify PDU length */
551 conn->in.datalen = ntoh24(hdr->dlength);
552 if (conn->in.datalen > conn->max_recv_dlength) {
553 printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
554 conn->in.datalen, conn->max_recv_dlength);
555 return ISCSI_ERR_DATALEN;
557 conn->data_copied = 0;
560 conn->in.ahslen = hdr->hlength * 4;
561 conn->in.offset += conn->in.ahslen;
562 conn->in.copy -= conn->in.ahslen;
563 if (conn->in.copy < 0) {
564 printk(KERN_ERR "iscsi_tcp: can't handle AHS with length "
565 "%d bytes\n", conn->in.ahslen);
566 return ISCSI_ERR_AHSLEN;
569 /* calculate read padding */
570 conn->in.padding = conn->in.datalen & (ISCSI_PAD_LEN-1);
571 if (conn->in.padding) {
572 conn->in.padding = ISCSI_PAD_LEN - conn->in.padding;
573 debug_scsi("read padding %d bytes\n", conn->in.padding);
576 if (conn->hdrdgst_en) {
577 struct scatterlist sg;
579 sg_init_one(&sg, (u8 *)hdr,
580 sizeof(struct iscsi_hdr) + conn->in.ahslen);
581 crypto_digest_digest(conn->rx_tfm, &sg, 1, (u8 *)&cdgst);
582 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
586 /* save opcode for later */
587 conn->in.opcode = hdr->opcode & ISCSI_OPCODE_MASK;
589 /* verify itt (itt encoding: age+cid+itt) */
590 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
591 if ((hdr->itt & AGE_MASK) !=
592 (session->age << AGE_SHIFT)) {
593 printk(KERN_ERR "iscsi_tcp: received itt %x expected "
594 "session age (%x)\n", hdr->itt,
595 session->age & AGE_MASK);
596 return ISCSI_ERR_BAD_ITT;
599 if ((hdr->itt & CID_MASK) != (conn->id << CID_SHIFT)) {
600 printk(KERN_ERR "iscsi_tcp: received itt %x, expected "
601 "CID (%x)\n", hdr->itt, conn->id);
602 return ISCSI_ERR_BAD_ITT;
604 conn->in.itt = hdr->itt & ITT_MASK;
606 conn->in.itt = hdr->itt;
608 debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
609 hdr->opcode, conn->in.offset, conn->in.copy,
610 conn->in.ahslen, conn->in.datalen);
612 if (conn->in.itt < session->cmds_max) {
613 if (conn->hdrdgst_en && cdgst != rdgst) {
614 printk(KERN_ERR "iscsi_tcp: itt %x: hdrdgst error "
615 "recv 0x%x calc 0x%x\n", conn->in.itt, rdgst,
617 return ISCSI_ERR_HDR_DGST;
620 ctask = (struct iscsi_cmd_task *)session->cmds[conn->in.itt];
623 printk(KERN_INFO "iscsi_tcp: dropping ctask with "
624 "itt 0x%x\n", ctask->itt);
625 conn->in.datalen = 0; /* force drop */
629 if (ctask->sc->SCp.phase != session->age) {
630 printk(KERN_ERR "iscsi_tcp: ctask's session age %d, "
631 "expected %d\n", ctask->sc->SCp.phase,
633 return ISCSI_ERR_SESSION_FAILED;
636 conn->in.ctask = ctask;
638 debug_scsi("rsp [op 0x%x cid %d sc %lx itt 0x%x len %d]\n",
639 hdr->opcode, conn->id, (long)ctask->sc,
640 ctask->itt, conn->in.datalen);
642 switch(conn->in.opcode) {
643 case ISCSI_OP_SCSI_CMD_RSP:
644 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
645 if (!conn->in.datalen)
646 rc = iscsi_cmd_rsp(conn, ctask);
649 * got sense or response data; copying PDU
650 * Header to the connection's header
653 memcpy(&conn->hdr, hdr,
654 sizeof(struct iscsi_hdr));
656 case ISCSI_OP_SCSI_DATA_IN:
657 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
658 /* save flags for non-exceptional status */
659 conn->in.flags = hdr->flags;
660 /* save cmd_status for sense data */
661 conn->in.cmd_status =
662 ((struct iscsi_data_rsp*)hdr)->cmd_status;
663 rc = iscsi_data_rsp(conn, ctask);
666 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
667 if (ctask->sc->sc_data_direction == DMA_TO_DEVICE)
668 rc = iscsi_r2t_rsp(conn, ctask);
670 rc = ISCSI_ERR_PROTO;
673 rc = ISCSI_ERR_BAD_OPCODE;
676 } else if (conn->in.itt >= ISCSI_MGMT_ITT_OFFSET &&
677 conn->in.itt < ISCSI_MGMT_ITT_OFFSET +
678 session->mgmtpool_max) {
679 struct iscsi_mgmt_task *mtask = (struct iscsi_mgmt_task *)
680 session->mgmt_cmds[conn->in.itt -
681 ISCSI_MGMT_ITT_OFFSET];
683 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
684 conn->in.opcode, conn->id, mtask->itt,
687 switch(conn->in.opcode) {
688 case ISCSI_OP_LOGIN_RSP:
689 case ISCSI_OP_TEXT_RSP:
690 case ISCSI_OP_LOGOUT_RSP:
691 rc = iscsi_check_assign_cmdsn(session,
692 (struct iscsi_nopin*)hdr);
696 if (!conn->in.datalen) {
697 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr,
699 if (conn->login_mtask != mtask) {
700 spin_lock(&session->lock);
701 __kfifo_put(session->mgmtpool.queue,
702 (void*)&mtask, sizeof(void*));
703 spin_unlock(&session->lock);
707 case ISCSI_OP_SCSI_TMFUNC_RSP:
708 rc = iscsi_check_assign_cmdsn(session,
709 (struct iscsi_nopin*)hdr);
713 if (conn->in.datalen || conn->in.ahslen) {
714 rc = ISCSI_ERR_PROTO;
717 conn->tmfrsp_pdus_cnt++;
718 spin_lock(&session->lock);
719 if (conn->tmabort_state == TMABORT_INITIAL) {
720 __kfifo_put(session->mgmtpool.queue,
721 (void*)&mtask, sizeof(void*));
722 conn->tmabort_state =
723 ((struct iscsi_tm_rsp *)hdr)->
724 response == ISCSI_TMF_RSP_COMPLETE ?
725 TMABORT_SUCCESS:TMABORT_FAILED;
726 /* unblock eh_abort() */
727 wake_up(&conn->ehwait);
729 spin_unlock(&session->lock);
731 case ISCSI_OP_NOOP_IN:
732 if (hdr->ttt != ISCSI_RESERVED_TAG) {
733 rc = ISCSI_ERR_PROTO;
736 rc = iscsi_check_assign_cmdsn(session,
737 (struct iscsi_nopin*)hdr);
740 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
742 if (!conn->in.datalen) {
743 struct iscsi_mgmt_task *mtask;
745 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr,
747 mtask = (struct iscsi_mgmt_task *)
748 session->mgmt_cmds[conn->in.itt -
749 ISCSI_MGMT_ITT_OFFSET];
750 if (conn->login_mtask != mtask) {
751 spin_lock(&session->lock);
752 __kfifo_put(session->mgmtpool.queue,
753 (void*)&mtask, sizeof(void*));
754 spin_unlock(&session->lock);
759 rc = ISCSI_ERR_BAD_OPCODE;
762 } else if (conn->in.itt == ISCSI_RESERVED_TAG) {
763 switch(conn->in.opcode) {
764 case ISCSI_OP_NOOP_IN:
765 if (!conn->in.datalen) {
766 rc = iscsi_check_assign_cmdsn(session,
767 (struct iscsi_nopin*)hdr);
768 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
769 rc = iscsi_recv_pdu(iscsi_handle(conn),
772 rc = ISCSI_ERR_PROTO;
774 case ISCSI_OP_REJECT:
775 /* we need sth like iscsi_reject_rsp()*/
776 case ISCSI_OP_ASYNC_EVENT:
777 /* we need sth like iscsi_async_event_rsp() */
778 rc = ISCSI_ERR_BAD_OPCODE;
781 rc = ISCSI_ERR_BAD_OPCODE;
785 rc = ISCSI_ERR_BAD_ITT;
791 * iscsi_ctask_copy - copy skb bits to the destanation cmd task
792 * @conn: iscsi connection
793 * @ctask: scsi command task
794 * @buf: buffer to copy to
795 * @buf_size: size of buffer
796 * @offset: offset within the buffer
799 * The function calls skb_copy_bits() and updates per-connection and
800 * per-cmd byte counters.
802 * Read counters (in bytes):
804 * conn->in.offset offset within in progress SKB
805 * conn->in.copy left to copy from in progress SKB
807 * conn->in.copied copied already from in progress SKB
808 * conn->data_copied copied already from in progress buffer
809 * ctask->sent total bytes sent up to the MidLayer
810 * ctask->data_count left to copy from in progress Data-In
811 * buf_left left to copy from in progress buffer
814 iscsi_ctask_copy(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
815 void *buf, int buf_size, int offset)
817 int buf_left = buf_size - (conn->data_copied + offset);
818 int size = min(conn->in.copy, buf_left);
821 size = min(size, ctask->data_count);
823 debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
824 size, conn->in.offset, conn->in.copied);
827 BUG_ON(ctask->sent + size > ctask->total_length);
829 rc = skb_copy_bits(conn->in.skb, conn->in.offset,
830 (char*)buf + (offset + conn->data_copied), size);
831 /* must fit into skb->len */
834 conn->in.offset += size;
835 conn->in.copy -= size;
836 conn->in.copied += size;
837 conn->data_copied += size;
839 ctask->data_count -= size;
841 BUG_ON(conn->in.copy < 0);
842 BUG_ON(ctask->data_count < 0);
844 if (buf_size != (conn->data_copied + offset)) {
845 if (!ctask->data_count) {
846 BUG_ON(buf_size - conn->data_copied < 0);
847 /* done with this PDU */
848 return buf_size - conn->data_copied;
853 /* done with this buffer or with both - PDU and buffer */
854 conn->data_copied = 0;
859 * iscsi_tcp_copy - copy skb bits to the destanation buffer
860 * @conn: iscsi connection
861 * @buf: buffer to copy to
862 * @buf_size: number of bytes to copy
865 * The function calls skb_copy_bits() and updates per-connection
869 iscsi_tcp_copy(struct iscsi_conn *conn, void *buf, int buf_size)
871 int buf_left = buf_size - conn->data_copied;
872 int size = min(conn->in.copy, buf_left);
875 debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
876 size, conn->in.offset, conn->data_copied);
879 rc = skb_copy_bits(conn->in.skb, conn->in.offset,
880 (char*)buf + conn->data_copied, size);
883 conn->in.offset += size;
884 conn->in.copy -= size;
885 conn->in.copied += size;
886 conn->data_copied += size;
888 if (buf_size != conn->data_copied)
895 partial_sg_digest_update(struct iscsi_conn *conn, struct scatterlist *sg,
896 int offset, int length)
898 struct scatterlist temp;
900 memcpy(&temp, sg, sizeof(struct scatterlist));
901 temp.offset = offset;
902 temp.length = length;
903 crypto_digest_update(conn->data_rx_tfm, &temp, 1);
906 static int iscsi_scsi_data_in(struct iscsi_conn *conn)
908 struct iscsi_cmd_task *ctask = conn->in.ctask;
909 struct scsi_cmnd *sc = ctask->sc;
910 struct scatterlist tmp, *sg;
911 int i, offset, rc = 0;
913 BUG_ON((void*)ctask != sc->SCp.ptr);
916 * copying Data-In into the Scsi_Cmnd
919 i = ctask->data_count;
920 rc = iscsi_ctask_copy(conn, ctask, sc->request_buffer,
921 sc->request_bufflen, ctask->data_offset);
924 if (conn->datadgst_en) {
925 sg_init_one(&tmp, sc->request_buffer, i);
926 crypto_digest_update(conn->data_rx_tfm, &tmp, 1);
932 offset = ctask->data_offset;
933 sg = sc->request_buffer;
935 if (ctask->data_offset)
936 for (i = 0; i < ctask->sg_count; i++)
937 offset -= sg[i].length;
938 /* we've passed through partial sg*/
942 for (i = ctask->sg_count; i < sc->use_sg; i++) {
945 dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
946 rc = iscsi_ctask_copy(conn, ctask, dest + sg[i].offset,
947 sg[i].length, offset);
948 kunmap_atomic(dest, KM_SOFTIRQ0);
950 /* continue with the next SKB/PDU */
953 if (conn->datadgst_en) {
955 crypto_digest_update(conn->data_rx_tfm,
958 partial_sg_digest_update(conn, &sg[i],
959 sg[i].offset + offset,
960 sg[i].length - offset);
966 if (!ctask->data_count) {
967 if (rc && conn->datadgst_en)
969 * data-in is complete, but buffer not...
971 partial_sg_digest_update(conn, &sg[i],
972 sg[i].offset, sg[i].length-rc);
980 BUG_ON(ctask->data_count);
983 /* check for non-exceptional status */
984 if (conn->in.flags & ISCSI_FLAG_DATA_STATUS) {
985 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
986 (long)sc, sc->result, ctask->itt);
987 conn->scsirsp_pdus_cnt++;
988 iscsi_ctask_cleanup(conn, ctask);
996 iscsi_data_recv(struct iscsi_conn *conn)
998 struct iscsi_session *session = conn->session;
1001 switch(conn->in.opcode) {
1002 case ISCSI_OP_SCSI_DATA_IN:
1003 rc = iscsi_scsi_data_in(conn);
1005 case ISCSI_OP_SCSI_CMD_RSP: {
1008 * copying the entire Data Segment.
1010 if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) {
1018 conn->in.hdr = &conn->hdr;
1019 conn->senselen = (conn->data[0] << 8) | conn->data[1];
1020 rc = iscsi_cmd_rsp(conn, conn->in.ctask);
1023 case ISCSI_OP_TEXT_RSP:
1024 case ISCSI_OP_LOGIN_RSP:
1025 case ISCSI_OP_NOOP_IN: {
1026 struct iscsi_mgmt_task *mtask = NULL;
1028 if (conn->in.itt != ISCSI_RESERVED_TAG)
1029 mtask = (struct iscsi_mgmt_task *)
1030 session->mgmt_cmds[conn->in.itt -
1031 ISCSI_MGMT_ITT_OFFSET];
1034 * Collect data segment to the connection's data
1037 if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) {
1042 rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr,
1043 conn->data, conn->in.datalen);
1045 if (mtask && conn->login_mtask != mtask) {
1046 spin_lock(&session->lock);
1047 __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
1049 spin_unlock(&session->lock);
1061 * iscsi_tcp_data_recv - TCP receive in sendfile fashion
1062 * @rd_desc: read descriptor
1063 * @skb: socket buffer
1064 * @offset: offset in skb
1065 * @len: skb->len - offset
1068 iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
1069 unsigned int offset, size_t len)
1072 struct iscsi_conn *conn = rd_desc->arg.data;
1074 char pad[ISCSI_PAD_LEN];
1075 struct scatterlist sg;
1078 * Save current SKB and its offset in the corresponding
1079 * connection context.
1081 conn->in.copy = skb->len - offset;
1082 conn->in.offset = offset;
1084 conn->in.len = conn->in.copy;
1085 BUG_ON(conn->in.copy <= 0);
1086 debug_tcp("in %d bytes\n", conn->in.copy);
1089 conn->in.copied = 0;
1092 if (unlikely(conn->suspend_rx)) {
1093 debug_tcp("conn %d Rx suspended!\n", conn->id);
1097 if (conn->in_progress == IN_PROGRESS_WAIT_HEADER ||
1098 conn->in_progress == IN_PROGRESS_HEADER_GATHER) {
1099 rc = iscsi_hdr_extract(conn);
1104 iscsi_conn_failure(conn, rc);
1110 * Verify and process incoming PDU header.
1112 rc = iscsi_hdr_recv(conn);
1113 if (!rc && conn->in.datalen) {
1114 if (conn->datadgst_en &&
1115 conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) {
1116 BUG_ON(!conn->data_rx_tfm);
1117 crypto_digest_init(conn->data_rx_tfm);
1119 conn->in_progress = IN_PROGRESS_DATA_RECV;
1121 iscsi_conn_failure(conn, rc);
1126 if (conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
1127 debug_tcp("extra data_recv offset %d copy %d\n",
1128 conn->in.offset, conn->in.copy);
1129 if (conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) {
1130 uint32_t recv_digest;
1131 skb_copy_bits(conn->in.skb, conn->in.offset,
1133 conn->in.offset += 4;
1135 if (recv_digest != conn->in.datadgst) {
1136 debug_tcp("iscsi_tcp: data digest error!"
1137 "0x%x != 0x%x\n", recv_digest,
1139 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1142 debug_tcp("iscsi_tcp: data digest match!"
1143 "0x%x == 0x%x\n", recv_digest,
1145 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1150 if (conn->in_progress == IN_PROGRESS_DATA_RECV && conn->in.copy) {
1152 debug_tcp("data_recv offset %d copy %d\n",
1153 conn->in.offset, conn->in.copy);
1155 rc = iscsi_data_recv(conn);
1157 if (rc == -EAGAIN) {
1158 rd_desc->count = conn->in.datalen -
1159 conn->in.ctask->data_count;
1162 iscsi_conn_failure(conn, rc);
1165 conn->in.copy -= conn->in.padding;
1166 conn->in.offset += conn->in.padding;
1167 if (conn->datadgst_en &&
1168 conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) {
1169 if (conn->in.padding) {
1170 debug_tcp("padding -> %d\n", conn->in.padding);
1171 memset(pad, 0, conn->in.padding);
1172 sg_init_one(&sg, pad, conn->in.padding);
1173 crypto_digest_update(conn->data_rx_tfm, &sg, 1);
1175 crypto_digest_final(conn->data_rx_tfm,
1176 (u8 *) & conn->in.datadgst);
1177 debug_tcp("rx digest 0x%x\n", conn->in.datadgst);
1178 conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
1180 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1183 debug_tcp("f, processed %d from out of %d padding %d\n",
1184 conn->in.offset - offset, (int)len, conn->in.padding);
1185 BUG_ON(conn->in.offset - offset > len);
1187 if (conn->in.offset - offset != len) {
1188 debug_tcp("continue to process %d bytes\n",
1189 (int)len - (conn->in.offset - offset));
1194 processed = conn->in.offset - offset;
1195 BUG_ON(processed == 0);
1199 processed = conn->in.offset - offset;
1200 debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
1201 processed, (int)len, (int)rd_desc->count);
1202 BUG_ON(processed == 0);
1203 BUG_ON(processed > len);
1205 conn->rxdata_octets += processed;
1210 iscsi_tcp_data_ready(struct sock *sk, int flag)
1212 struct iscsi_conn *conn = sk->sk_user_data;
1213 read_descriptor_t rd_desc;
1215 read_lock(&sk->sk_callback_lock);
1217 /* use rd_desc to pass 'conn' to iscsi_tcp_data_recv */
1218 rd_desc.arg.data = conn;
1220 tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv);
1222 read_unlock(&sk->sk_callback_lock);
1226 iscsi_tcp_state_change(struct sock *sk)
1228 struct iscsi_conn *conn;
1229 struct iscsi_session *session;
1230 void (*old_state_change)(struct sock *);
1232 read_lock(&sk->sk_callback_lock);
1234 conn = (struct iscsi_conn*)sk->sk_user_data;
1235 session = conn->session;
1237 if ((sk->sk_state == TCP_CLOSE_WAIT ||
1238 sk->sk_state == TCP_CLOSE) &&
1239 !atomic_read(&sk->sk_rmem_alloc)) {
1240 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
1241 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1244 old_state_change = conn->old_state_change;
1246 read_unlock(&sk->sk_callback_lock);
1248 old_state_change(sk);
1252 * iscsi_write_space - Called when more output buffer space is available
1253 * @sk: socket space is available for
1256 iscsi_write_space(struct sock *sk)
1258 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
1259 conn->old_write_space(sk);
1260 debug_tcp("iscsi_write_space: cid %d\n", conn->id);
1261 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
1262 schedule_work(&conn->xmitwork);
1266 iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1268 struct sock *sk = conn->sock->sk;
1270 /* assign new callbacks */
1271 write_lock_bh(&sk->sk_callback_lock);
1272 sk->sk_user_data = conn;
1273 conn->old_data_ready = sk->sk_data_ready;
1274 conn->old_state_change = sk->sk_state_change;
1275 conn->old_write_space = sk->sk_write_space;
1276 sk->sk_data_ready = iscsi_tcp_data_ready;
1277 sk->sk_state_change = iscsi_tcp_state_change;
1278 sk->sk_write_space = iscsi_write_space;
1279 write_unlock_bh(&sk->sk_callback_lock);
1283 iscsi_conn_restore_callbacks(struct iscsi_conn *conn)
1285 struct sock *sk = conn->sock->sk;
1287 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1288 write_lock_bh(&sk->sk_callback_lock);
1289 sk->sk_user_data = NULL;
1290 sk->sk_data_ready = conn->old_data_ready;
1291 sk->sk_state_change = conn->old_state_change;
1292 sk->sk_write_space = conn->old_write_space;
1293 sk->sk_no_check = 0;
1294 write_unlock_bh(&sk->sk_callback_lock);
1298 * iscsi_send - generic send routine
1299 * @sk: kernel's socket
1300 * @buf: buffer to write from
1301 * @size: actual size to write
1302 * @flags: socket's flags
1305 * depending on buffer will use tcp_sendpage() or tcp_sendmsg().
1306 * buf->sg.offset == -1 tells us that buffer is non S/G and forces
1307 * to use tcp_sendmsg().
1310 iscsi_send(struct socket *sk, struct iscsi_buf *buf, int size, int flags)
1314 if ((int)buf->sg.offset >= 0) {
1315 int offset = buf->sg.offset + buf->sent;
1318 res = sk->ops->sendpage(sk, buf->sg.page, offset, size, flags);
1322 buf->iov.iov_base = iscsi_buf_iov_base(buf);
1323 buf->iov.iov_len = size;
1325 memset(&msg, 0, sizeof(struct msghdr));
1328 res = kernel_sendmsg(sk, &msg, &buf->iov, 1, size);
1335 * iscsi_sendhdr - send PDU Header via tcp_sendpage()
1336 * @conn: iscsi connection
1337 * @buf: buffer to write from
1338 * @datalen: lenght of data to be sent after the header
1344 iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
1346 struct socket *sk = conn->sock;
1347 int flags = 0; /* MSG_DONTWAIT; */
1350 size = buf->sg.length - buf->sent;
1351 BUG_ON(buf->sent + size > buf->sg.length);
1352 if (buf->sent + size != buf->sg.length || datalen)
1355 res = iscsi_send(sk, buf, size, flags);
1356 debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
1358 conn->txdata_octets += res;
1363 } else if (res == -EAGAIN) {
1364 conn->sendpage_failures_cnt++;
1365 set_bit(SUSPEND_BIT, &conn->suspend_tx);
1366 } else if (res == -EPIPE)
1367 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1373 * iscsi_sendpage - send one page of iSCSI Data-Out.
1374 * @conn: iscsi connection
1375 * @buf: buffer to write from
1376 * @count: remaining data
1377 * @sent: number of bytes sent
1383 iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
1384 int *count, int *sent)
1386 struct socket *sk = conn->sock;
1387 int flags = 0; /* MSG_DONTWAIT; */
1390 size = buf->sg.length - buf->sent;
1391 BUG_ON(buf->sent + size > buf->sg.length);
1394 if (buf->sent + size != buf->sg.length || *count != size)
1397 res = iscsi_send(sk, buf, size, flags);
1398 debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
1399 size, buf->sent, *count, *sent, res);
1401 conn->txdata_octets += res;
1408 } else if (res == -EAGAIN) {
1409 conn->sendpage_failures_cnt++;
1410 set_bit(SUSPEND_BIT, &conn->suspend_tx);
1411 } else if (res == -EPIPE)
1412 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1418 iscsi_data_digest_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1420 BUG_ON(!conn->data_tx_tfm);
1421 crypto_digest_init(conn->data_tx_tfm);
1422 ctask->digest_count = 4;
1426 iscsi_buf_data_digest_update(struct iscsi_conn *conn, struct iscsi_buf *buf)
1428 struct scatterlist sg;
1430 if (buf->sg.offset != -1)
1431 crypto_digest_update(conn->data_tx_tfm, &buf->sg, 1);
1433 sg_init_one(&sg, (char *)buf->sg.page, buf->sg.length);
1434 crypto_digest_update(conn->data_tx_tfm, &sg, 1);
1439 iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1440 struct iscsi_buf *buf, uint32_t *digest, int final)
1446 crypto_digest_final(conn->data_tx_tfm, (u8*)digest);
1448 iscsi_buf_init_virt(buf, (char*)digest, 4);
1449 rc = iscsi_sendpage(conn, buf, &ctask->digest_count, &sent);
1451 ctask->datadigest = *digest;
1452 ctask->xmstate |= XMSTATE_DATA_DIGEST;
1454 ctask->digest_count = 4;
1459 * iscsi_solicit_data_cont - initialize next Data-Out
1460 * @conn: iscsi connection
1461 * @ctask: scsi command task
1463 * @left: bytes left to transfer
1466 * Initialize next Data-Out within this R2T sequence and continue
1467 * to process next Scatter-Gather element(if any) of this SCSI command.
1469 * Called under connection lock.
1472 iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1473 struct iscsi_r2t_info *r2t, int left)
1475 struct iscsi_data *hdr;
1476 struct iscsi_data_task *dtask;
1477 struct scsi_cmnd *sc = ctask->sc;
1480 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
1483 memset(hdr, 0, sizeof(struct iscsi_data));
1484 hdr->ttt = r2t->ttt;
1485 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
1486 r2t->solicit_datasn++;
1487 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1488 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
1489 hdr->itt = ctask->hdr.itt;
1490 hdr->exp_statsn = r2t->exp_statsn;
1491 new_offset = r2t->data_offset + r2t->sent;
1492 hdr->offset = cpu_to_be32(new_offset);
1493 if (left > conn->max_xmit_dlength) {
1494 hton24(hdr->dlength, conn->max_xmit_dlength);
1495 r2t->data_count = conn->max_xmit_dlength;
1497 hton24(hdr->dlength, left);
1498 r2t->data_count = left;
1499 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1501 conn->dataout_pdus_cnt++;
1503 iscsi_buf_init_virt(&r2t->headbuf, (char*)hdr,
1504 sizeof(struct iscsi_hdr));
1508 if (sc->use_sg && !iscsi_buf_left(&r2t->sendbuf)) {
1509 BUG_ON(ctask->bad_sg == r2t->sg);
1510 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1513 iscsi_buf_init_iov(&ctask->sendbuf,
1514 (char*)sc->request_buffer + new_offset,
1517 list_add(&dtask->item, &ctask->dataqueue);
1521 iscsi_unsolicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1523 struct iscsi_data *hdr;
1524 struct iscsi_data_task *dtask;
1526 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
1529 memset(hdr, 0, sizeof(struct iscsi_data));
1530 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
1531 hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
1532 ctask->unsol_datasn++;
1533 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1534 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
1535 hdr->itt = ctask->hdr.itt;
1536 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
1537 hdr->offset = cpu_to_be32(ctask->total_length -
1538 ctask->r2t_data_count -
1539 ctask->unsol_count);
1540 if (ctask->unsol_count > conn->max_xmit_dlength) {
1541 hton24(hdr->dlength, conn->max_xmit_dlength);
1542 ctask->data_count = conn->max_xmit_dlength;
1545 hton24(hdr->dlength, ctask->unsol_count);
1546 ctask->data_count = ctask->unsol_count;
1547 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1550 iscsi_buf_init_virt(&ctask->headbuf, (char*)hdr,
1551 sizeof(struct iscsi_hdr));
1553 list_add(&dtask->item, &ctask->dataqueue);
1555 ctask->dtask = dtask;
1559 * iscsi_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1560 * @conn: iscsi connection
1561 * @ctask: scsi command task
1565 iscsi_cmd_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1566 struct scsi_cmnd *sc)
1568 struct iscsi_session *session = conn->session;
1570 BUG_ON(__kfifo_len(ctask->r2tqueue));
1574 ctask->hdr.opcode = ISCSI_OP_SCSI_CMD;
1575 ctask->hdr.flags = ISCSI_ATTR_SIMPLE;
1576 int_to_scsilun(sc->device->lun, (struct scsi_lun *)ctask->hdr.lun);
1577 ctask->hdr.itt = ctask->itt | (conn->id << CID_SHIFT) |
1578 (session->age << AGE_SHIFT);
1579 ctask->hdr.data_length = cpu_to_be32(sc->request_bufflen);
1580 ctask->hdr.cmdsn = cpu_to_be32(session->cmdsn); session->cmdsn++;
1581 ctask->hdr.exp_statsn = cpu_to_be32(conn->exp_statsn);
1582 memcpy(ctask->hdr.cdb, sc->cmnd, sc->cmd_len);
1583 memset(&ctask->hdr.cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
1585 ctask->mtask = NULL;
1587 ctask->sg_count = 0;
1589 ctask->total_length = sc->request_bufflen;
1591 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1592 ctask->exp_r2tsn = 0;
1593 ctask->hdr.flags |= ISCSI_FLAG_CMD_WRITE;
1594 BUG_ON(ctask->total_length == 0);
1596 struct scatterlist *sg = sc->request_buffer;
1598 iscsi_buf_init_sg(&ctask->sendbuf,
1599 &sg[ctask->sg_count++]);
1601 ctask->bad_sg = sg + sc->use_sg;
1603 iscsi_buf_init_iov(&ctask->sendbuf, sc->request_buffer,
1604 sc->request_bufflen);
1610 * imm_count bytes to be sent right after
1613 * unsol_count bytes(as Data-Out) to be sent
1614 * without R2T ack right after
1617 * r2t_data_count bytes to be sent via R2T ack's
1619 * pad_count bytes to be sent as zero-padding
1621 ctask->imm_count = 0;
1622 ctask->unsol_count = 0;
1623 ctask->unsol_datasn = 0;
1624 ctask->xmstate = XMSTATE_W_HDR;
1625 /* calculate write padding */
1626 ctask->pad_count = ctask->total_length & (ISCSI_PAD_LEN-1);
1627 if (ctask->pad_count) {
1628 ctask->pad_count = ISCSI_PAD_LEN - ctask->pad_count;
1629 debug_scsi("write padding %d bytes\n",
1631 ctask->xmstate |= XMSTATE_W_PAD;
1633 if (session->imm_data_en) {
1634 if (ctask->total_length >= session->first_burst)
1635 ctask->imm_count = min(session->first_burst,
1636 conn->max_xmit_dlength);
1638 ctask->imm_count = min(ctask->total_length,
1639 conn->max_xmit_dlength);
1640 hton24(ctask->hdr.dlength, ctask->imm_count);
1641 ctask->xmstate |= XMSTATE_IMM_DATA;
1643 zero_data(ctask->hdr.dlength);
1645 if (!session->initial_r2t_en)
1646 ctask->unsol_count = min(session->first_burst,
1647 ctask->total_length) - ctask->imm_count;
1648 if (!ctask->unsol_count)
1649 /* No unsolicit Data-Out's */
1650 ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
1652 ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
1654 ctask->r2t_data_count = ctask->total_length -
1658 debug_scsi("cmd [itt %x total %d imm %d imm_data %d "
1660 ctask->itt, ctask->total_length, ctask->imm_count,
1661 ctask->unsol_count, ctask->r2t_data_count);
1663 ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
1664 if (sc->sc_data_direction == DMA_FROM_DEVICE)
1665 ctask->hdr.flags |= ISCSI_FLAG_CMD_READ;
1667 ctask->xmstate = XMSTATE_R_HDR;
1668 zero_data(ctask->hdr.dlength);
1671 iscsi_buf_init_virt(&ctask->headbuf, (char*)&ctask->hdr,
1672 sizeof(struct iscsi_hdr));
1673 conn->scsicmd_pdus_cnt++;
1677 * iscsi_mtask_xmit - xmit management(immediate) task
1678 * @conn: iscsi connection
1679 * @mtask: task management task
1682 * The function can return -EAGAIN in which case caller must
1683 * call it again later, or recover. '0' return code means successful
1686 * Management xmit state machine consists of two states:
1687 * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
1688 * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
1691 iscsi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1694 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1695 conn->id, mtask->xmstate, mtask->itt);
1697 if (mtask->xmstate & XMSTATE_IMM_HDR) {
1698 mtask->xmstate &= ~XMSTATE_IMM_HDR;
1699 if (mtask->data_count)
1700 mtask->xmstate |= XMSTATE_IMM_DATA;
1701 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
1702 conn->stop_stage != STOP_CONN_RECOVER &&
1704 iscsi_hdr_digest(conn, &mtask->headbuf,
1705 (u8*)mtask->hdrext);
1706 if (iscsi_sendhdr(conn, &mtask->headbuf, mtask->data_count)) {
1707 mtask->xmstate |= XMSTATE_IMM_HDR;
1708 if (mtask->data_count)
1709 mtask->xmstate &= ~XMSTATE_IMM_DATA;
1714 if (mtask->xmstate & XMSTATE_IMM_DATA) {
1715 BUG_ON(!mtask->data_count);
1716 mtask->xmstate &= ~XMSTATE_IMM_DATA;
1717 /* FIXME: implement.
1718 * Virtual buffer could be spreaded across multiple pages...
1721 if (iscsi_sendpage(conn, &mtask->sendbuf,
1722 &mtask->data_count, &mtask->sent)) {
1723 mtask->xmstate |= XMSTATE_IMM_DATA;
1726 } while (mtask->data_count);
1729 BUG_ON(mtask->xmstate != XMSTATE_IDLE);
1734 handle_xmstate_r_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1736 ctask->xmstate &= ~XMSTATE_R_HDR;
1737 if (conn->hdrdgst_en)
1738 iscsi_hdr_digest(conn, &ctask->headbuf, (u8*)ctask->hdrext);
1739 if (!iscsi_sendhdr(conn, &ctask->headbuf, 0)) {
1740 BUG_ON(ctask->xmstate != XMSTATE_IDLE);
1741 return 0; /* wait for Data-In */
1743 ctask->xmstate |= XMSTATE_R_HDR;
1748 handle_xmstate_w_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1750 ctask->xmstate &= ~XMSTATE_W_HDR;
1751 if (conn->hdrdgst_en)
1752 iscsi_hdr_digest(conn, &ctask->headbuf, (u8*)ctask->hdrext);
1753 if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->imm_count)) {
1754 ctask->xmstate |= XMSTATE_W_HDR;
1761 handle_xmstate_data_digest(struct iscsi_conn *conn,
1762 struct iscsi_cmd_task *ctask)
1764 ctask->xmstate &= ~XMSTATE_DATA_DIGEST;
1765 debug_tcp("resent data digest 0x%x\n", ctask->datadigest);
1766 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
1767 &ctask->datadigest, 0)) {
1768 ctask->xmstate |= XMSTATE_DATA_DIGEST;
1769 debug_tcp("resent data digest 0x%x fail!\n",
1777 handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1779 BUG_ON(!ctask->imm_count);
1780 ctask->xmstate &= ~XMSTATE_IMM_DATA;
1782 if (conn->datadgst_en) {
1783 iscsi_data_digest_init(conn, ctask);
1784 ctask->immdigest = 0;
1788 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->imm_count,
1790 ctask->xmstate |= XMSTATE_IMM_DATA;
1791 if (conn->datadgst_en) {
1792 crypto_digest_final(conn->data_tx_tfm,
1793 (u8*)&ctask->immdigest);
1794 debug_tcp("tx imm sendpage fail 0x%x\n",
1799 if (conn->datadgst_en)
1800 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
1802 if (!ctask->imm_count)
1804 iscsi_buf_init_sg(&ctask->sendbuf,
1805 &ctask->sg[ctask->sg_count++]);
1808 if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) {
1809 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
1810 &ctask->immdigest, 1)) {
1811 debug_tcp("sending imm digest 0x%x fail!\n",
1815 debug_tcp("sending imm digest 0x%x\n", ctask->immdigest);
1822 handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1824 struct iscsi_data_task *dtask;
1826 ctask->xmstate |= XMSTATE_UNS_DATA;
1827 if (ctask->xmstate & XMSTATE_UNS_INIT) {
1828 iscsi_unsolicit_data_init(conn, ctask);
1829 BUG_ON(!ctask->dtask);
1830 dtask = ctask->dtask;
1831 if (conn->hdrdgst_en)
1832 iscsi_hdr_digest(conn, &ctask->headbuf,
1833 (u8*)dtask->hdrext);
1834 ctask->xmstate &= ~XMSTATE_UNS_INIT;
1836 if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->data_count)) {
1837 ctask->xmstate &= ~XMSTATE_UNS_DATA;
1838 ctask->xmstate |= XMSTATE_UNS_HDR;
1842 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1843 ctask->itt, ctask->unsol_count, ctask->sent);
1848 handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1850 struct iscsi_data_task *dtask = ctask->dtask;
1852 BUG_ON(!ctask->data_count);
1853 ctask->xmstate &= ~XMSTATE_UNS_DATA;
1855 if (conn->datadgst_en) {
1856 iscsi_data_digest_init(conn, ctask);
1861 int start = ctask->sent;
1863 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->data_count,
1865 ctask->unsol_count -= ctask->sent - start;
1866 ctask->xmstate |= XMSTATE_UNS_DATA;
1867 /* will continue with this ctask later.. */
1868 if (conn->datadgst_en) {
1869 crypto_digest_final(conn->data_tx_tfm,
1870 (u8 *)&dtask->digest);
1871 debug_tcp("tx uns data fail 0x%x\n",
1877 BUG_ON(ctask->sent > ctask->total_length);
1878 ctask->unsol_count -= ctask->sent - start;
1881 * XXX:we may run here with un-initial sendbuf.
1884 if (conn->datadgst_en && ctask->sent - start > 0)
1885 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
1887 if (!ctask->data_count)
1889 iscsi_buf_init_sg(&ctask->sendbuf,
1890 &ctask->sg[ctask->sg_count++]);
1892 BUG_ON(ctask->unsol_count < 0);
1895 * Done with the Data-Out. Next, check if we need
1896 * to send another unsolicited Data-Out.
1898 if (ctask->unsol_count) {
1899 if (conn->datadgst_en) {
1900 if (iscsi_digest_final_send(conn, ctask,
1902 &dtask->digest, 1)) {
1903 debug_tcp("send uns digest 0x%x fail\n",
1907 debug_tcp("sending uns digest 0x%x, more uns\n",
1910 ctask->xmstate |= XMSTATE_UNS_INIT;
1914 if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) {
1915 if (iscsi_digest_final_send(conn, ctask,
1917 &dtask->digest, 1)) {
1918 debug_tcp("send last uns digest 0x%x fail\n",
1922 debug_tcp("sending uns digest 0x%x\n",dtask->digest);
1929 handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1931 struct iscsi_session *session = conn->session;
1932 struct iscsi_r2t_info *r2t = ctask->r2t;
1933 struct iscsi_data_task *dtask = r2t->dtask;
1936 ctask->xmstate &= ~XMSTATE_SOL_DATA;
1937 ctask->dtask = dtask;
1939 if (conn->datadgst_en) {
1940 iscsi_data_digest_init(conn, ctask);
1945 * send Data-Out whitnin this R2T sequence.
1947 if (!r2t->data_count)
1950 if (iscsi_sendpage(conn, &r2t->sendbuf, &r2t->data_count, &r2t->sent)) {
1951 ctask->xmstate |= XMSTATE_SOL_DATA;
1952 /* will continue with this ctask later.. */
1953 if (conn->datadgst_en) {
1954 crypto_digest_final(conn->data_tx_tfm,
1955 (u8 *)&dtask->digest);
1956 debug_tcp("r2t data send fail 0x%x\n", dtask->digest);
1961 BUG_ON(r2t->data_count < 0);
1962 if (conn->datadgst_en)
1963 iscsi_buf_data_digest_update(conn, &r2t->sendbuf);
1965 if (r2t->data_count) {
1966 BUG_ON(ctask->sc->use_sg == 0);
1967 if (!iscsi_buf_left(&r2t->sendbuf)) {
1968 BUG_ON(ctask->bad_sg == r2t->sg);
1969 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1977 * Done with this Data-Out. Next, check if we have
1978 * to send another Data-Out for this R2T.
1980 BUG_ON(r2t->data_length - r2t->sent < 0);
1981 left = r2t->data_length - r2t->sent;
1983 if (conn->datadgst_en) {
1984 if (iscsi_digest_final_send(conn, ctask,
1986 &dtask->digest, 1)) {
1987 debug_tcp("send r2t data digest 0x%x"
1988 "fail\n", dtask->digest);
1991 debug_tcp("r2t data send digest 0x%x\n",
1994 iscsi_solicit_data_cont(conn, ctask, r2t, left);
1995 ctask->xmstate |= XMSTATE_SOL_DATA;
1996 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2001 * Done with this R2T. Check if there are more
2002 * outstanding R2Ts ready to be processed.
2004 BUG_ON(ctask->r2t_data_count - r2t->data_length < 0);
2005 if (conn->datadgst_en) {
2006 if (iscsi_digest_final_send(conn, ctask, &dtask->digestbuf,
2007 &dtask->digest, 1)) {
2008 debug_tcp("send last r2t data digest 0x%x"
2009 "fail\n", dtask->digest);
2012 debug_tcp("r2t done dout digest 0x%x\n", dtask->digest);
2015 ctask->r2t_data_count -= r2t->data_length;
2017 spin_lock_bh(&session->lock);
2018 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
2019 spin_unlock_bh(&session->lock);
2020 if (__kfifo_get(ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
2022 ctask->xmstate |= XMSTATE_SOL_DATA;
2023 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2031 handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2033 struct iscsi_data_task *dtask = ctask->dtask;
2036 ctask->xmstate &= ~XMSTATE_W_PAD;
2037 iscsi_buf_init_virt(&ctask->sendbuf, (char*)&ctask->pad,
2039 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->pad_count, &sent)) {
2040 ctask->xmstate |= XMSTATE_W_PAD;
2044 if (conn->datadgst_en) {
2045 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
2048 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
2049 &ctask->immdigest, 1)) {
2050 debug_tcp("send padding digest 0x%x"
2051 "fail!\n", ctask->immdigest);
2054 debug_tcp("done with padding, digest 0x%x\n",
2057 if (iscsi_digest_final_send(conn, ctask,
2059 &dtask->digest, 1)) {
2060 debug_tcp("send padding digest 0x%x"
2061 "fail\n", dtask->digest);
2064 debug_tcp("done with padding, digest 0x%x\n",
2073 iscsi_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2077 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
2078 conn->id, ctask->xmstate, ctask->itt);
2081 * serialize with TMF AbortTask
2086 if (ctask->xmstate & XMSTATE_R_HDR) {
2087 rc = handle_xmstate_r_hdr(conn, ctask);
2091 if (ctask->xmstate & XMSTATE_W_HDR) {
2092 rc = handle_xmstate_w_hdr(conn, ctask);
2097 /* XXX: for data digest xmit recover */
2098 if (ctask->xmstate & XMSTATE_DATA_DIGEST) {
2099 rc = handle_xmstate_data_digest(conn, ctask);
2104 if (ctask->xmstate & XMSTATE_IMM_DATA) {
2105 rc = handle_xmstate_imm_data(conn, ctask);
2110 if (ctask->xmstate & XMSTATE_UNS_HDR) {
2111 BUG_ON(!ctask->unsol_count);
2112 ctask->xmstate &= ~XMSTATE_UNS_HDR;
2113 unsolicit_head_again:
2114 rc = handle_xmstate_uns_hdr(conn, ctask);
2119 if (ctask->xmstate & XMSTATE_UNS_DATA) {
2120 rc = handle_xmstate_uns_data(conn, ctask);
2122 goto unsolicit_head_again;
2128 if (ctask->xmstate & XMSTATE_SOL_HDR) {
2129 struct iscsi_r2t_info *r2t;
2131 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2132 ctask->xmstate |= XMSTATE_SOL_DATA;
2134 __kfifo_get(ctask->r2tqueue, (void*)&ctask->r2t,
2138 if (conn->hdrdgst_en)
2139 iscsi_hdr_digest(conn, &r2t->headbuf,
2140 (u8*)r2t->dtask->hdrext);
2141 if (iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count)) {
2142 ctask->xmstate &= ~XMSTATE_SOL_DATA;
2143 ctask->xmstate |= XMSTATE_SOL_HDR;
2147 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
2148 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
2152 if (ctask->xmstate & XMSTATE_SOL_DATA) {
2153 rc = handle_xmstate_sol_data(conn, ctask);
2155 goto solicit_head_again;
2162 * Last thing to check is whether we need to send write
2163 * padding. Note that we check for xmstate equality, not just the bit.
2165 if (ctask->xmstate == XMSTATE_W_PAD)
2166 rc = handle_xmstate_w_pad(conn, ctask);
2172 * iscsi_data_xmit - xmit any command into the scheduled connection
2173 * @conn: iscsi connection
2176 * The function can return -EAGAIN in which case the caller must
2177 * re-schedule it again later or recover. '0' return code means
2181 iscsi_data_xmit(struct iscsi_conn *conn)
2183 if (unlikely(conn->suspend_tx)) {
2184 debug_tcp("conn %d Tx suspended!\n", conn->id);
2189 * Transmit in the following order:
2191 * 1) un-finished xmit (ctask or mtask)
2192 * 2) immediate control PDUs
2195 * 5) non-immediate control PDUs
2197 * No need to lock around __kfifo_get as long as
2198 * there's one producer and one consumer.
2201 BUG_ON(conn->ctask && conn->mtask);
2204 if (iscsi_ctask_xmit(conn, conn->ctask))
2206 /* done with this in-progress ctask */
2210 if (iscsi_mtask_xmit(conn, conn->mtask))
2212 /* done with this in-progress mtask */
2216 /* process immediate first */
2217 if (unlikely(__kfifo_len(conn->immqueue))) {
2218 struct iscsi_session *session = conn->session;
2219 while (__kfifo_get(conn->immqueue, (void*)&conn->mtask,
2221 if (iscsi_mtask_xmit(conn, conn->mtask))
2224 if (conn->mtask->hdr.itt ==
2225 cpu_to_be32(ISCSI_RESERVED_TAG)) {
2226 spin_lock_bh(&session->lock);
2227 __kfifo_put(session->mgmtpool.queue,
2228 (void*)&conn->mtask, sizeof(void*));
2229 spin_unlock_bh(&session->lock);
2232 /* done with this mtask */
2236 /* process write queue */
2237 while (__kfifo_get(conn->writequeue, (void*)&conn->ctask,
2239 if (iscsi_ctask_xmit(conn, conn->ctask))
2243 /* process command queue */
2244 while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask,
2246 if (iscsi_ctask_xmit(conn, conn->ctask))
2249 /* done with this ctask */
2252 /* process the rest control plane PDUs, if any */
2253 if (unlikely(__kfifo_len(conn->mgmtqueue))) {
2254 struct iscsi_session *session = conn->session;
2256 while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
2258 if (iscsi_mtask_xmit(conn, conn->mtask))
2261 if (conn->mtask->hdr.itt ==
2262 cpu_to_be32(ISCSI_RESERVED_TAG)) {
2263 spin_lock_bh(&session->lock);
2264 __kfifo_put(session->mgmtpool.queue,
2265 (void*)&conn->mtask,
2267 spin_unlock_bh(&session->lock);
2270 /* done with this mtask */
2277 if (unlikely(conn->suspend_tx))
2284 iscsi_xmitworker(void *data)
2286 struct iscsi_conn *conn = data;
2289 * serialize Xmit worker on a per-connection basis.
2291 down(&conn->xmitsema);
2292 if (iscsi_data_xmit(conn))
2293 schedule_work(&conn->xmitwork);
2294 up(&conn->xmitsema);
2297 #define FAILURE_BAD_HOST 1
2298 #define FAILURE_SESSION_FAILED 2
2299 #define FAILURE_SESSION_FREED 3
2300 #define FAILURE_WINDOW_CLOSED 4
2301 #define FAILURE_SESSION_TERMINATE 5
2304 iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
2306 struct Scsi_Host *host;
2308 struct iscsi_session *session;
2309 struct iscsi_conn *conn = NULL;
2310 struct iscsi_cmd_task *ctask = NULL;
2312 sc->scsi_done = done;
2315 host = sc->device->host;
2316 session = iscsi_hostdata(host->hostdata);
2317 BUG_ON(host != session->host);
2319 spin_lock(&session->lock);
2321 if (session->state != ISCSI_STATE_LOGGED_IN) {
2322 if (session->state == ISCSI_STATE_FAILED) {
2323 reason = FAILURE_SESSION_FAILED;
2325 } else if (session->state == ISCSI_STATE_TERMINATE) {
2326 reason = FAILURE_SESSION_TERMINATE;
2329 reason = FAILURE_SESSION_FREED;
2334 * Check for iSCSI window and take care of CmdSN wrap-around
2336 if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
2337 reason = FAILURE_WINDOW_CLOSED;
2341 conn = session->leadconn;
2343 __kfifo_get(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
2346 sc->SCp.phase = session->age;
2347 sc->SCp.ptr = (char*)ctask;
2348 iscsi_cmd_init(conn, ctask, sc);
2350 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*));
2352 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
2353 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
2354 conn->id, (long)sc, ctask->itt, sc->request_bufflen,
2355 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
2356 spin_unlock(&session->lock);
2358 if (!in_interrupt() && !down_trylock(&conn->xmitsema)) {
2359 spin_unlock_irq(host->host_lock);
2360 if (iscsi_data_xmit(conn))
2361 schedule_work(&conn->xmitwork);
2362 up(&conn->xmitsema);
2363 spin_lock_irq(host->host_lock);
2365 schedule_work(&conn->xmitwork);
2370 spin_unlock(&session->lock);
2371 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
2372 return SCSI_MLQUEUE_HOST_BUSY;
2375 spin_unlock(&session->lock);
2376 printk(KERN_ERR "iscsi_tcp: cmd 0x%x is not queued (%d)\n",
2377 sc->cmnd[0], reason);
2378 sc->sense_buffer[0] = 0x70;
2379 sc->sense_buffer[2] = NOT_READY;
2380 sc->sense_buffer[7] = 0x6;
2381 sc->sense_buffer[12] = 0x08;
2382 sc->sense_buffer[13] = 0x00;
2383 sc->result = (DID_NO_CONNECT << 16);
2384 sc->resid = sc->request_bufflen;
2390 iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size)
2394 *items = kmalloc(max * sizeof(void*), GFP_KERNEL);
2399 q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL);
2400 if (q->pool == NULL) {
2405 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
2407 if (q->queue == ERR_PTR(-ENOMEM)) {
2413 for (i = 0; i < max; i++) {
2414 q->pool[i] = kmalloc(item_size, GFP_KERNEL);
2415 if (q->pool[i] == NULL) {
2418 for (j = 0; j < i; j++)
2421 kfifo_free(q->queue);
2426 memset(q->pool[i], 0, item_size);
2427 (*items)[i] = q->pool[i];
2428 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
2434 iscsi_pool_free(struct iscsi_queue *q, void **items)
2438 for (i = 0; i < q->max; i++)
2444 static iscsi_connh_t
2445 iscsi_conn_create(iscsi_sessionh_t sessionh, uint32_t conn_idx)
2447 struct iscsi_session *session = iscsi_ptr(sessionh);
2448 struct iscsi_conn *conn = NULL;
2450 conn = kmalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
2452 goto conn_alloc_fail;
2453 memset(conn, 0, sizeof(struct iscsi_conn));
2455 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
2456 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2457 conn->id = conn_idx;
2458 conn->exp_statsn = 0;
2459 conn->tmabort_state = TMABORT_INITIAL;
2461 /* initial operational parameters */
2462 conn->hdr_size = sizeof(struct iscsi_hdr);
2463 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2464 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2466 spin_lock_init(&conn->lock);
2468 /* initialize general xmit PDU commands queue */
2469 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
2471 if (conn->xmitqueue == ERR_PTR(-ENOMEM))
2472 goto xmitqueue_alloc_fail;
2474 /* initialize write response PDU commands queue */
2475 conn->writequeue = kfifo_alloc(session->cmds_max * sizeof(void*),
2477 if (conn->writequeue == ERR_PTR(-ENOMEM))
2478 goto writequeue_alloc_fail;
2480 /* initialize general immediate & non-immediate PDU commands queue */
2481 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
2483 if (conn->immqueue == ERR_PTR(-ENOMEM))
2484 goto immqueue_alloc_fail;
2486 conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
2488 if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
2489 goto mgmtqueue_alloc_fail;
2491 INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn);
2493 /* allocate login_mtask used for the login/text sequences */
2494 spin_lock_bh(&session->lock);
2495 if (!__kfifo_get(session->mgmtpool.queue,
2496 (void*)&conn->login_mtask,
2498 spin_unlock_bh(&session->lock);
2499 goto login_mtask_alloc_fail;
2501 spin_unlock_bh(&session->lock);
2503 /* allocate initial PDU receive place holder */
2504 if (conn->data_size <= PAGE_SIZE)
2505 conn->data = kmalloc(conn->data_size, GFP_KERNEL);
2507 conn->data = (void*)__get_free_pages(GFP_KERNEL,
2508 get_order(conn->data_size));
2510 goto max_recv_dlenght_alloc_fail;
2512 init_timer(&conn->tmabort_timer);
2513 init_MUTEX(&conn->xmitsema);
2514 init_waitqueue_head(&conn->ehwait);
2516 return iscsi_handle(conn);
2518 max_recv_dlenght_alloc_fail:
2519 spin_lock_bh(&session->lock);
2520 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2522 spin_unlock_bh(&session->lock);
2523 login_mtask_alloc_fail:
2524 kfifo_free(conn->mgmtqueue);
2525 mgmtqueue_alloc_fail:
2526 kfifo_free(conn->immqueue);
2527 immqueue_alloc_fail:
2528 kfifo_free(conn->writequeue);
2529 writequeue_alloc_fail:
2530 kfifo_free(conn->xmitqueue);
2531 xmitqueue_alloc_fail:
2534 return iscsi_handle(NULL);
2538 iscsi_conn_destroy(iscsi_connh_t connh)
2540 struct iscsi_conn *conn = iscsi_ptr(connh);
2541 struct iscsi_session *session = conn->session;
2543 down(&conn->xmitsema);
2544 set_bit(SUSPEND_BIT, &conn->suspend_tx);
2545 if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE && conn->sock) {
2546 struct sock *sk = conn->sock->sk;
2549 * conn_start() has never been called!
2550 * need to cleanup the socket.
2552 write_lock_bh(&sk->sk_callback_lock);
2553 set_bit(SUSPEND_BIT, &conn->suspend_rx);
2554 write_unlock_bh(&sk->sk_callback_lock);
2556 sock_hold(conn->sock->sk);
2557 iscsi_conn_restore_callbacks(conn);
2558 sock_put(conn->sock->sk);
2559 sock_release(conn->sock);
2563 spin_lock_bh(&session->lock);
2564 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2565 if (session->leadconn == conn) {
2567 * leading connection? then give up on recovery.
2569 session->state = ISCSI_STATE_TERMINATE;
2570 wake_up(&conn->ehwait);
2572 spin_unlock_bh(&session->lock);
2574 up(&conn->xmitsema);
2577 * Block until all in-progress commands for this connection
2581 spin_lock_bh(&conn->lock);
2582 if (!session->host->host_busy) { /* OK for ERL == 0 */
2583 spin_unlock_bh(&conn->lock);
2586 spin_unlock_bh(&conn->lock);
2587 msleep_interruptible(500);
2588 printk("conn_destroy(): host_busy %d host_failed %d\n",
2589 session->host->host_busy, session->host->host_failed);
2591 * force eh_abort() to unblock
2593 wake_up(&conn->ehwait);
2596 /* now free crypto */
2597 if (conn->hdrdgst_en || conn->datadgst_en) {
2599 crypto_free_tfm(conn->tx_tfm);
2601 crypto_free_tfm(conn->rx_tfm);
2602 if (conn->data_tx_tfm)
2603 crypto_free_tfm(conn->data_tx_tfm);
2604 if (conn->data_rx_tfm)
2605 crypto_free_tfm(conn->data_rx_tfm);
2608 /* free conn->data, size = MaxRecvDataSegmentLength */
2609 if (conn->data_size <= PAGE_SIZE)
2612 free_pages((unsigned long)conn->data,
2613 get_order(conn->data_size));
2615 spin_lock_bh(&session->lock);
2616 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2618 list_del(&conn->item);
2619 if (list_empty(&session->connections))
2620 session->leadconn = NULL;
2621 if (session->leadconn && session->leadconn == conn)
2622 session->leadconn = container_of(session->connections.next,
2623 struct iscsi_conn, item);
2625 if (session->leadconn == NULL)
2626 /* none connections exits.. reset sequencing */
2627 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
2628 spin_unlock_bh(&session->lock);
2630 kfifo_free(conn->xmitqueue);
2631 kfifo_free(conn->writequeue);
2632 kfifo_free(conn->immqueue);
2633 kfifo_free(conn->mgmtqueue);
2638 iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh,
2639 uint32_t transport_fd, int is_leading)
2641 struct iscsi_session *session = iscsi_ptr(sessionh);
2642 struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh);
2644 struct socket *sock;
2647 /* lookup for existing socket */
2648 sock = sockfd_lookup(transport_fd, &err);
2650 printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
2654 /* lookup for existing connection */
2655 spin_lock_bh(&session->lock);
2656 list_for_each_entry(tmp, &session->connections, item) {
2658 if (conn->c_stage != ISCSI_CONN_STOPPED ||
2659 conn->stop_stage == STOP_CONN_TERM) {
2660 printk(KERN_ERR "iscsi_tcp: can't bind "
2661 "non-stopped connection (%d:%d)\n",
2662 conn->c_stage, conn->stop_stage);
2663 spin_unlock_bh(&session->lock);
2670 /* bind new iSCSI connection to session */
2671 conn->session = session;
2673 list_add(&conn->item, &session->connections);
2675 spin_unlock_bh(&session->lock);
2677 if (conn->stop_stage != STOP_CONN_SUSPEND) {
2678 /* bind iSCSI connection and socket */
2681 /* setup Socket parameters */
2684 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
2685 sk->sk_allocation = GFP_ATOMIC;
2687 /* FIXME: disable Nagle's algorithm */
2690 * Intercept TCP callbacks for sendfile like receive
2693 iscsi_conn_set_callbacks(conn);
2696 * set receive state machine into initial state
2698 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2702 session->leadconn = conn;
2705 * Unblock xmitworker(), Login Phase will pass through.
2707 clear_bit(SUSPEND_BIT, &conn->suspend_rx);
2708 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
2714 iscsi_conn_start(iscsi_connh_t connh)
2716 struct iscsi_conn *conn = iscsi_ptr(connh);
2717 struct iscsi_session *session = conn->session;
2720 /* FF phase warming up... */
2722 if (session == NULL) {
2723 printk(KERN_ERR "iscsi_tcp: can't start unbound connection\n");
2727 sk = conn->sock->sk;
2729 write_lock_bh(&sk->sk_callback_lock);
2730 spin_lock_bh(&session->lock);
2731 conn->c_stage = ISCSI_CONN_STARTED;
2732 session->state = ISCSI_STATE_LOGGED_IN;
2734 switch(conn->stop_stage) {
2735 case STOP_CONN_RECOVER:
2737 * unblock eh_abort() if it is blocked. re-try all
2738 * commands after successful recovery
2740 session->conn_cnt++;
2741 conn->stop_stage = 0;
2742 conn->tmabort_state = TMABORT_INITIAL;
2744 wake_up(&conn->ehwait);
2746 case STOP_CONN_TERM:
2747 session->conn_cnt++;
2748 conn->stop_stage = 0;
2750 case STOP_CONN_SUSPEND:
2751 conn->stop_stage = 0;
2752 clear_bit(SUSPEND_BIT, &conn->suspend_rx);
2753 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
2758 spin_unlock_bh(&session->lock);
2759 write_unlock_bh(&sk->sk_callback_lock);
2765 iscsi_conn_stop(iscsi_connh_t connh, int flag)
2767 struct iscsi_conn *conn = iscsi_ptr(connh);
2768 struct iscsi_session *session = conn->session;
2770 unsigned long flags;
2772 BUG_ON(!conn->sock);
2773 sk = conn->sock->sk;
2774 write_lock_bh(&sk->sk_callback_lock);
2775 set_bit(SUSPEND_BIT, &conn->suspend_rx);
2776 write_unlock_bh(&sk->sk_callback_lock);
2778 down(&conn->xmitsema);
2780 spin_lock_irqsave(session->host->host_lock, flags);
2781 spin_lock(&session->lock);
2782 conn->stop_stage = flag;
2783 conn->c_stage = ISCSI_CONN_STOPPED;
2784 set_bit(SUSPEND_BIT, &conn->suspend_tx);
2786 if (flag != STOP_CONN_SUSPEND)
2787 session->conn_cnt--;
2789 if (session->conn_cnt == 0 || session->leadconn == conn)
2790 session->state = ISCSI_STATE_FAILED;
2792 spin_unlock(&session->lock);
2793 spin_unlock_irqrestore(session->host->host_lock, flags);
2795 if (flag == STOP_CONN_TERM || flag == STOP_CONN_RECOVER) {
2796 struct iscsi_cmd_task *ctask;
2797 struct iscsi_mgmt_task *mtask;
2800 * Socket must go now.
2802 sock_hold(conn->sock->sk);
2803 iscsi_conn_restore_callbacks(conn);
2804 sock_put(conn->sock->sk);
2807 * flush xmit queues.
2809 spin_lock_bh(&session->lock);
2810 while (__kfifo_get(conn->writequeue, (void*)&ctask,
2812 __kfifo_get(conn->xmitqueue, (void*)&ctask,
2814 struct iscsi_r2t_info *r2t;
2817 * flush ctask's r2t queues
2819 while (__kfifo_get(ctask->r2tqueue, (void*)&r2t,
2821 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t,
2824 spin_unlock_bh(&session->lock);
2826 iscsi_ctask_cleanup(conn, ctask);
2828 spin_lock_bh(&session->lock);
2831 while (__kfifo_get(conn->immqueue, (void*)&mtask,
2833 __kfifo_get(conn->mgmtqueue, (void*)&mtask,
2835 __kfifo_put(session->mgmtpool.queue,
2836 (void*)&mtask, sizeof(void*));
2839 spin_unlock_bh(&session->lock);
2842 * release socket only after we stopped data_xmit()
2843 * activity and flushed all outstandings
2845 sock_release(conn->sock);
2849 * for connection level recovery we should not calculate
2850 * header digest. conn->hdr_size used for optimization
2851 * in hdr_extract() and will be re-negotiated at
2854 if (flag == STOP_CONN_RECOVER)
2855 conn->hdr_size = sizeof(struct iscsi_hdr);
2857 up(&conn->xmitsema);
2861 iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
2862 char *data, uint32_t data_size)
2864 struct iscsi_session *session = conn->session;
2865 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
2866 struct iscsi_mgmt_task *mtask;
2868 spin_lock_bh(&session->lock);
2869 if (session->state == ISCSI_STATE_TERMINATE) {
2870 spin_unlock_bh(&session->lock);
2873 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
2874 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
2876 * Login and Text are sent serially, in
2877 * request-followed-by-response sequence.
2878 * Same mtask can be used. Same ITT must be used.
2879 * Note that login_mtask is preallocated at conn_create().
2881 mtask = conn->login_mtask;
2883 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
2884 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
2886 if (!__kfifo_get(session->mgmtpool.queue,
2887 (void*)&mtask, sizeof(void*))) {
2888 spin_unlock_bh(&session->lock);
2894 * pre-format CmdSN and ExpStatSN for outgoing PDU.
2896 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
2897 hdr->itt = mtask->itt | (conn->id << CID_SHIFT) |
2898 (session->age << AGE_SHIFT);
2899 nop->cmdsn = cpu_to_be32(session->cmdsn);
2900 if (conn->c_stage == ISCSI_CONN_STARTED &&
2901 !(hdr->opcode & ISCSI_OP_IMMEDIATE))
2904 /* do not advance CmdSN */
2905 nop->cmdsn = cpu_to_be32(session->cmdsn);
2907 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
2909 memcpy(&mtask->hdr, hdr, sizeof(struct iscsi_hdr));
2911 iscsi_buf_init_virt(&mtask->headbuf, (char*)&mtask->hdr,
2912 sizeof(struct iscsi_hdr));
2914 spin_unlock_bh(&session->lock);
2917 memcpy(mtask->data, data, data_size);
2918 mtask->data_count = data_size;
2920 mtask->data_count = 0;
2922 mtask->xmstate = XMSTATE_IMM_HDR;
2924 if (mtask->data_count) {
2925 iscsi_buf_init_iov(&mtask->sendbuf, (char*)mtask->data,
2929 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
2930 hdr->opcode, hdr->itt, data_size);
2933 * since send_pdu() could be called at least from two contexts,
2934 * we need to serialize __kfifo_put, so we don't have to take
2935 * additional lock on fast data-path
2937 if (hdr->opcode & ISCSI_OP_IMMEDIATE)
2938 __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
2940 __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
2942 schedule_work(&conn->xmitwork);
2948 iscsi_eh_host_reset(struct scsi_cmnd *sc)
2950 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
2951 struct iscsi_conn *conn = ctask->conn;
2952 struct iscsi_session *session = conn->session;
2954 spin_lock_bh(&session->lock);
2955 if (session->state == ISCSI_STATE_TERMINATE) {
2956 debug_scsi("failing host reset: session terminated "
2957 "[CID %d age %d]", conn->id, session->age);
2958 spin_unlock_bh(&session->lock);
2961 spin_unlock_bh(&session->lock);
2963 debug_scsi("failing connection CID %d due to SCSI host reset "
2964 "[itt 0x%x age %d]", conn->id, ctask->itt,
2966 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
2972 iscsi_tmabort_timedout(unsigned long data)
2974 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data;
2975 struct iscsi_conn *conn = ctask->conn;
2976 struct iscsi_session *session = conn->session;
2978 spin_lock(&session->lock);
2979 if (conn->tmabort_state == TMABORT_INITIAL) {
2980 __kfifo_put(session->mgmtpool.queue,
2981 (void*)&ctask->mtask, sizeof(void*));
2982 conn->tmabort_state = TMABORT_TIMEDOUT;
2983 debug_scsi("tmabort timedout [sc %lx itt 0x%x]\n",
2984 (long)ctask->sc, ctask->itt);
2985 /* unblock eh_abort() */
2986 wake_up(&conn->ehwait);
2988 spin_unlock(&session->lock);
2992 iscsi_eh_abort(struct scsi_cmnd *sc)
2995 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
2996 struct iscsi_conn *conn = ctask->conn;
2997 struct iscsi_session *session = conn->session;
2999 conn->eh_abort_cnt++;
3000 debug_scsi("aborting [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3003 * two cases for ERL=0 here:
3005 * 1) connection-level failure;
3006 * 2) recovery due protocol error;
3008 down(&conn->xmitsema);
3009 spin_lock_bh(&session->lock);
3010 if (session->state != ISCSI_STATE_LOGGED_IN) {
3011 if (session->state == ISCSI_STATE_TERMINATE) {
3012 spin_unlock_bh(&session->lock);
3013 up(&conn->xmitsema);
3016 spin_unlock_bh(&session->lock);
3018 struct iscsi_tm *hdr = &conn->tmhdr;
3021 * Still LOGGED_IN...
3024 if (!ctask->sc || sc->SCp.phase != session->age) {
3026 * 1) ctask completed before time out. But session
3027 * is still ok => Happy Retry.
3028 * 2) session was re-open during time out of ctask.
3030 spin_unlock_bh(&session->lock);
3031 up(&conn->xmitsema);
3034 conn->tmabort_state = TMABORT_INITIAL;
3035 spin_unlock_bh(&session->lock);
3038 * ctask timed out but session is OK
3039 * ERL=0 requires task mgmt abort to be issued on each
3040 * failed command. requests must be serialized.
3042 memset(hdr, 0, sizeof(struct iscsi_tm));
3043 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
3044 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
3045 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3046 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
3047 hdr->rtt = ctask->hdr.itt;
3048 hdr->refcmdsn = ctask->hdr.cmdsn;
3050 rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr,
3053 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3054 debug_scsi("abort sent failure [itt 0x%x]", ctask->itt);
3056 struct iscsi_r2t_info *r2t;
3059 * TMF abort vs. TMF response race logic
3061 spin_lock_bh(&session->lock);
3062 ctask->mtask = (struct iscsi_mgmt_task *)
3063 session->mgmt_cmds[(hdr->itt & ITT_MASK) -
3064 ISCSI_MGMT_ITT_OFFSET];
3066 * have to flush r2tqueue to avoid r2t leaks
3068 while (__kfifo_get(ctask->r2tqueue, (void*)&r2t,
3070 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t,
3073 if (conn->tmabort_state == TMABORT_INITIAL) {
3074 conn->tmfcmd_pdus_cnt++;
3075 conn->tmabort_timer.expires = 3*HZ + jiffies;
3076 conn->tmabort_timer.function =
3077 iscsi_tmabort_timedout;
3078 conn->tmabort_timer.data = (unsigned long)ctask;
3079 add_timer(&conn->tmabort_timer);
3080 debug_scsi("abort sent [itt 0x%x]", ctask->itt);
3083 conn->tmabort_state == TMABORT_SUCCESS) {
3084 conn->tmabort_state = TMABORT_INITIAL;
3085 spin_unlock_bh(&session->lock);
3086 up(&conn->xmitsema);
3089 conn->tmabort_state = TMABORT_INITIAL;
3090 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3092 spin_unlock_bh(&session->lock);
3095 up(&conn->xmitsema);
3099 * block eh thread until:
3101 * 1) abort response;
3103 * 3) session re-opened;
3104 * 4) session terminated;
3107 int p_state = session->state;
3109 rc = wait_event_interruptible(conn->ehwait,
3110 (p_state == ISCSI_STATE_LOGGED_IN ?
3111 (session->state == ISCSI_STATE_TERMINATE ||
3112 conn->tmabort_state != TMABORT_INITIAL) :
3113 (session->state == ISCSI_STATE_TERMINATE ||
3114 session->state == ISCSI_STATE_LOGGED_IN)));
3117 session->state = ISCSI_STATE_TERMINATE;
3121 if (signal_pending(current))
3122 flush_signals(current);
3124 if (session->state == ISCSI_STATE_TERMINATE)
3127 spin_lock_bh(&session->lock);
3128 if (sc->SCp.phase == session->age &&
3129 (conn->tmabort_state == TMABORT_TIMEDOUT ||
3130 conn->tmabort_state == TMABORT_FAILED)) {
3131 conn->tmabort_state = TMABORT_INITIAL;
3134 * ctask completed before tmf abort response or
3136 * But session is still ok => Happy Retry.
3138 spin_unlock_bh(&session->lock);
3141 spin_unlock_bh(&session->lock);
3142 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3145 spin_unlock_bh(&session->lock);
3150 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3155 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3159 del_timer_sync(&conn->tmabort_timer);
3161 down(&conn->xmitsema);
3163 struct sock *sk = conn->sock->sk;
3165 write_lock_bh(&sk->sk_callback_lock);
3166 iscsi_ctask_cleanup(conn, ctask);
3167 write_unlock_bh(&sk->sk_callback_lock);
3169 up(&conn->xmitsema);
3174 iscsi_r2tpool_alloc(struct iscsi_session *session)
3180 * initialize per-task: R2T pool and xmit queue
3182 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
3183 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
3186 * pre-allocated x4 as much r2ts to handle race when
3187 * target acks DataOut faster than we data_xmit() queues
3188 * could replenish r2tqueue.
3192 if (iscsi_pool_init(&ctask->r2tpool, session->max_r2t * 4,
3193 (void***)&ctask->r2ts, sizeof(struct iscsi_r2t_info))) {
3194 goto r2t_alloc_fail;
3197 /* R2T xmit queue */
3198 ctask->r2tqueue = kfifo_alloc(
3199 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
3200 if (ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
3201 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
3202 goto r2t_alloc_fail;
3207 * Data-Out PDU's within R2T-sequence can be quite big;
3210 ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX,
3211 mempool_alloc_slab, mempool_free_slab, taskcache);
3212 if (ctask->datapool == NULL) {
3213 kfifo_free(ctask->r2tqueue);
3214 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
3215 goto r2t_alloc_fail;
3217 INIT_LIST_HEAD(&ctask->dataqueue);
3223 for (i = 0; i < cmd_i; i++) {
3224 mempool_destroy(session->cmds[i]->datapool);
3225 kfifo_free(session->cmds[i]->r2tqueue);
3226 iscsi_pool_free(&session->cmds[i]->r2tpool,
3227 (void**)session->cmds[i]->r2ts);
3233 iscsi_r2tpool_free(struct iscsi_session *session)
3237 for (i = 0; i < session->cmds_max; i++) {
3238 mempool_destroy(session->cmds[i]->datapool);
3239 kfifo_free(session->cmds[i]->r2tqueue);
3240 iscsi_pool_free(&session->cmds[i]->r2tpool,
3241 (void**)session->cmds[i]->r2ts);
3245 static struct scsi_host_template iscsi_sht = {
3246 .name = "iSCSI Initiator over TCP/IP, v."
3248 .queuecommand = iscsi_queuecommand,
3249 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
3250 .sg_tablesize = ISCSI_SG_TABLESIZE,
3251 .cmd_per_lun = ISCSI_CMD_PER_LUN,
3252 .eh_abort_handler = iscsi_eh_abort,
3253 .eh_host_reset_handler = iscsi_eh_host_reset,
3254 .use_clustering = DISABLE_CLUSTERING,
3255 .proc_name = "iscsi_tcp",
3259 static iscsi_sessionh_t
3260 iscsi_session_create(uint32_t initial_cmdsn, struct Scsi_Host *host)
3263 struct iscsi_session *session;
3265 session = iscsi_hostdata(host->hostdata);
3266 memset(session, 0, sizeof(struct iscsi_session));
3268 session->host = host;
3269 session->id = host->host_no;
3270 session->state = ISCSI_STATE_LOGGED_IN;
3271 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
3272 session->cmds_max = ISCSI_XMIT_CMDS_MAX;
3273 session->cmdsn = initial_cmdsn;
3274 session->exp_cmdsn = initial_cmdsn + 1;
3275 session->max_cmdsn = initial_cmdsn + 1;
3276 session->max_r2t = 1;
3278 /* initialize SCSI PDU commands pool */
3279 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
3280 (void***)&session->cmds, sizeof(struct iscsi_cmd_task)))
3281 goto cmdpool_alloc_fail;
3283 /* pre-format cmds pool with ITT */
3284 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++)
3285 session->cmds[cmd_i]->itt = cmd_i;
3287 spin_lock_init(&session->lock);
3288 INIT_LIST_HEAD(&session->connections);
3290 /* initialize immediate command pool */
3291 if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
3292 (void***)&session->mgmt_cmds, sizeof(struct iscsi_mgmt_task)))
3293 goto mgmtpool_alloc_fail;
3296 /* pre-format immediate cmds pool with ITT */
3297 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
3298 session->mgmt_cmds[cmd_i]->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
3299 session->mgmt_cmds[cmd_i]->data = kmalloc(
3300 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL);
3301 if (!session->mgmt_cmds[cmd_i]->data) {
3304 for (j = 0; j < cmd_i; j++)
3305 kfree(session->mgmt_cmds[j]->data);
3306 goto immdata_alloc_fail;
3310 if (iscsi_r2tpool_alloc(session))
3311 goto r2tpool_alloc_fail;
3313 return iscsi_handle(session);
3316 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
3317 kfree(session->mgmt_cmds[cmd_i]->data);
3318 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3320 mgmtpool_alloc_fail:
3321 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
3323 return iscsi_handle(NULL);
3327 iscsi_session_destroy(iscsi_sessionh_t sessionh)
3330 struct iscsi_data_task *dtask, *n;
3331 struct iscsi_session *session = iscsi_ptr(sessionh);
3333 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
3334 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
3335 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
3336 list_del(&dtask->item);
3337 mempool_free(dtask, ctask->datapool);
3341 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
3342 kfree(session->mgmt_cmds[cmd_i]->data);
3344 iscsi_r2tpool_free(session);
3345 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3346 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
3350 iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param,
3353 struct iscsi_conn *conn = iscsi_ptr(connh);
3354 struct iscsi_session *session = conn->session;
3356 spin_lock_bh(&session->lock);
3357 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
3358 conn->stop_stage != STOP_CONN_RECOVER) {
3359 printk(KERN_ERR "iscsi_tcp: can not change parameter [%d]\n",
3361 spin_unlock_bh(&session->lock);
3364 spin_unlock_bh(&session->lock);
3367 case ISCSI_PARAM_MAX_RECV_DLENGTH: {
3368 char *saveptr = conn->data;
3369 int flags = GFP_KERNEL;
3371 if (conn->data_size >= value) {
3372 conn->max_recv_dlength = value;
3376 spin_lock_bh(&session->lock);
3377 if (conn->stop_stage == STOP_CONN_RECOVER)
3379 spin_unlock_bh(&session->lock);
3381 if (value <= PAGE_SIZE)
3382 conn->data = kmalloc(value, flags);
3384 conn->data = (void*)__get_free_pages(flags,
3386 if (conn->data == NULL) {
3387 conn->data = saveptr;
3390 if (conn->data_size <= PAGE_SIZE)
3393 free_pages((unsigned long)saveptr,
3394 get_order(conn->data_size));
3395 conn->max_recv_dlength = value;
3396 conn->data_size = value;
3399 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3400 conn->max_xmit_dlength = value;
3402 case ISCSI_PARAM_HDRDGST_EN:
3403 conn->hdrdgst_en = value;
3404 conn->hdr_size = sizeof(struct iscsi_hdr);
3405 if (conn->hdrdgst_en) {
3406 conn->hdr_size += sizeof(__u32);
3408 conn->tx_tfm = crypto_alloc_tfm("crc32c", 0);
3412 conn->rx_tfm = crypto_alloc_tfm("crc32c", 0);
3413 if (!conn->rx_tfm) {
3414 crypto_free_tfm(conn->tx_tfm);
3419 crypto_free_tfm(conn->tx_tfm);
3421 crypto_free_tfm(conn->rx_tfm);
3424 case ISCSI_PARAM_DATADGST_EN:
3425 conn->datadgst_en = value;
3426 if (conn->datadgst_en) {
3427 if (!conn->data_tx_tfm)
3429 crypto_alloc_tfm("crc32c", 0);
3430 if (!conn->data_tx_tfm)
3432 if (!conn->data_rx_tfm)
3434 crypto_alloc_tfm("crc32c", 0);
3435 if (!conn->data_rx_tfm) {
3436 crypto_free_tfm(conn->data_tx_tfm);
3440 if (conn->data_tx_tfm)
3441 crypto_free_tfm(conn->data_tx_tfm);
3442 if (conn->data_rx_tfm)
3443 crypto_free_tfm(conn->data_rx_tfm);
3446 case ISCSI_PARAM_INITIAL_R2T_EN:
3447 session->initial_r2t_en = value;
3449 case ISCSI_PARAM_MAX_R2T:
3450 if (session->max_r2t == roundup_pow_of_two(value))
3452 iscsi_r2tpool_free(session);
3453 session->max_r2t = value;
3454 if (session->max_r2t & (session->max_r2t - 1))
3455 session->max_r2t = roundup_pow_of_two(session->max_r2t);
3456 if (iscsi_r2tpool_alloc(session))
3459 case ISCSI_PARAM_IMM_DATA_EN:
3460 session->imm_data_en = value;
3462 case ISCSI_PARAM_FIRST_BURST:
3463 session->first_burst = value;
3465 case ISCSI_PARAM_MAX_BURST:
3466 session->max_burst = value;
3468 case ISCSI_PARAM_PDU_INORDER_EN:
3469 session->pdu_inorder_en = value;
3471 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3472 session->dataseq_inorder_en = value;
3474 case ISCSI_PARAM_ERL:
3475 session->erl = value;
3477 case ISCSI_PARAM_IFMARKER_EN:
3479 session->ifmarker_en = value;
3481 case ISCSI_PARAM_OFMARKER_EN:
3483 session->ofmarker_en = value;
3493 iscsi_conn_get_param(iscsi_connh_t connh, enum iscsi_param param,
3496 struct iscsi_conn *conn = iscsi_ptr(connh);
3497 struct iscsi_session *session = conn->session;
3500 case ISCSI_PARAM_MAX_RECV_DLENGTH:
3501 *value = conn->max_recv_dlength;
3503 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3504 *value = conn->max_xmit_dlength;
3506 case ISCSI_PARAM_HDRDGST_EN:
3507 *value = conn->hdrdgst_en;
3509 case ISCSI_PARAM_DATADGST_EN:
3510 *value = conn->datadgst_en;
3512 case ISCSI_PARAM_INITIAL_R2T_EN:
3513 *value = session->initial_r2t_en;
3515 case ISCSI_PARAM_MAX_R2T:
3516 *value = session->max_r2t;
3518 case ISCSI_PARAM_IMM_DATA_EN:
3519 *value = session->imm_data_en;
3521 case ISCSI_PARAM_FIRST_BURST:
3522 *value = session->first_burst;
3524 case ISCSI_PARAM_MAX_BURST:
3525 *value = session->max_burst;
3527 case ISCSI_PARAM_PDU_INORDER_EN:
3528 *value = session->pdu_inorder_en;
3530 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3531 *value = session->dataseq_inorder_en;
3533 case ISCSI_PARAM_ERL:
3534 *value = session->erl;
3536 case ISCSI_PARAM_IFMARKER_EN:
3537 *value = session->ifmarker_en;
3539 case ISCSI_PARAM_OFMARKER_EN:
3540 *value = session->ofmarker_en;
3543 return ISCSI_ERR_PARAM_NOT_FOUND;
3550 iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats)
3552 struct iscsi_conn *conn = iscsi_ptr(connh);
3554 stats->txdata_octets = conn->txdata_octets;
3555 stats->rxdata_octets = conn->rxdata_octets;
3556 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
3557 stats->dataout_pdus = conn->dataout_pdus_cnt;
3558 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
3559 stats->datain_pdus = conn->datain_pdus_cnt;
3560 stats->r2t_pdus = conn->r2t_pdus_cnt;
3561 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
3562 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
3563 stats->custom_length = 3;
3564 strcpy(stats->custom[0].desc, "tx_sendpage_failures");
3565 stats->custom[0].value = conn->sendpage_failures_cnt;
3566 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
3567 stats->custom[1].value = conn->discontiguous_hdr_cnt;
3568 strcpy(stats->custom[2].desc, "eh_abort_cnt");
3569 stats->custom[2].value = conn->eh_abort_cnt;
3573 iscsi_conn_send_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, char *data,
3576 struct iscsi_conn *conn = iscsi_ptr(connh);
3579 down(&conn->xmitsema);
3580 rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
3581 up(&conn->xmitsema);
3586 static struct iscsi_transport iscsi_tcp_transport = {
3587 .owner = THIS_MODULE,
3589 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
3591 .host_template = &iscsi_sht,
3592 .hostdata_size = sizeof(struct iscsi_session),
3594 .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN,
3595 .create_session = iscsi_session_create,
3596 .destroy_session = iscsi_session_destroy,
3597 .create_conn = iscsi_conn_create,
3598 .bind_conn = iscsi_conn_bind,
3599 .destroy_conn = iscsi_conn_destroy,
3600 .set_param = iscsi_conn_set_param,
3601 .get_param = iscsi_conn_get_param,
3602 .start_conn = iscsi_conn_start,
3603 .stop_conn = iscsi_conn_stop,
3604 .send_pdu = iscsi_conn_send_pdu,
3605 .get_stats = iscsi_conn_get_stats,
3609 iscsi_tcp_init(void)
3613 if (iscsi_max_lun < 1) {
3614 printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun);
3617 iscsi_tcp_transport.max_lun = iscsi_max_lun;
3619 taskcache = kmem_cache_create("iscsi_taskcache",
3620 sizeof(struct iscsi_data_task), 0,
3621 SLAB_HWCACHE_ALIGN | SLAB_NO_REAP, NULL, NULL);
3625 error = iscsi_register_transport(&iscsi_tcp_transport);
3627 kmem_cache_destroy(taskcache);
3633 iscsi_tcp_exit(void)
3635 iscsi_unregister_transport(&iscsi_tcp_transport);
3636 kmem_cache_destroy(taskcache);
3639 module_init(iscsi_tcp_init);
3640 module_exit(iscsi_tcp_exit);