2 * iSCSI Initiator over TCP/IP Data-Path
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 Mike Christie
7 * maintained by open-iscsi@googlegroups.com
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published
11 * by the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * See the file COPYING included with this distribution for more details.
28 #include <linux/types.h>
29 #include <linux/list.h>
30 #include <linux/inet.h>
31 #include <linux/blkdev.h>
32 #include <linux/crypto.h>
33 #include <linux/delay.h>
34 #include <linux/kfifo.h>
35 #include <linux/scatterlist.h>
36 #include <linux/mutex.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_eh.h>
41 #include <scsi/scsi_request.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_host.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_transport_iscsi.h>
47 #include "iscsi_tcp.h"
49 MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
50 "Alex Aizman <itn780@yahoo.com>");
51 MODULE_DESCRIPTION("iSCSI/TCP data-path");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION("0:4.445");
54 /* #define DEBUG_TCP */
55 /* #define DEBUG_SCSI */
59 #define debug_tcp(fmt...) printk(KERN_DEBUG "tcp: " fmt)
61 #define debug_tcp(fmt...)
65 #define debug_scsi(fmt...) printk(KERN_DEBUG "scsi: " fmt)
67 #define debug_scsi(fmt...)
77 #define INVALID_SN_DELTA 0xffff
79 static unsigned int iscsi_max_lun = 512;
80 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
83 static kmem_cache_t *taskcache;
86 iscsi_buf_init_virt(struct iscsi_buf *ibuf, char *vbuf, int size)
88 sg_init_one(&ibuf->sg, (u8 *)vbuf, size);
93 iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
95 ibuf->sg.page = (void*)vbuf;
96 ibuf->sg.offset = (unsigned int)-1;
97 ibuf->sg.length = size;
102 iscsi_buf_iov_base(struct iscsi_buf *ibuf)
104 return (char*)ibuf->sg.page + ibuf->sent;
108 iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
111 * Fastpath: sg element fits into single page
113 if (sg->length + sg->offset <= PAGE_SIZE && page_count(sg->page) >= 2) {
114 ibuf->sg.page = sg->page;
115 ibuf->sg.offset = sg->offset;
116 ibuf->sg.length = sg->length;
118 iscsi_buf_init_iov(ibuf, page_address(sg->page), sg->length);
123 iscsi_buf_left(struct iscsi_buf *ibuf)
127 rc = ibuf->sg.length - ibuf->sent;
133 iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
136 crypto_digest_digest(conn->tx_tfm, &buf->sg, 1, crc);
137 buf->sg.length += sizeof(uint32_t);
141 iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
143 struct iscsi_session *session = conn->session;
146 spin_lock_irqsave(&session->lock, flags);
147 if (session->conn_cnt == 1 || session->leadconn == conn)
148 session->state = ISCSI_STATE_FAILED;
149 spin_unlock_irqrestore(&session->lock, flags);
150 set_bit(SUSPEND_BIT, &conn->suspend_tx);
151 set_bit(SUSPEND_BIT, &conn->suspend_rx);
152 iscsi_conn_error(iscsi_handle(conn), err);
156 iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
158 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
159 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
161 if (max_cmdsn < exp_cmdsn -1 &&
162 max_cmdsn > exp_cmdsn - INVALID_SN_DELTA)
163 return ISCSI_ERR_MAX_CMDSN;
164 if (max_cmdsn > session->max_cmdsn ||
165 max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA)
166 session->max_cmdsn = max_cmdsn;
167 if (exp_cmdsn > session->exp_cmdsn ||
168 exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA)
169 session->exp_cmdsn = exp_cmdsn;
175 iscsi_hdr_extract(struct iscsi_conn *conn)
177 struct sk_buff *skb = conn->in.skb;
179 if (conn->in.copy >= conn->hdr_size &&
180 conn->in_progress == IN_PROGRESS_WAIT_HEADER) {
182 * Zero-copy PDU Header: using connection context
183 * to store header pointer.
185 if (skb_shinfo(skb)->frag_list == NULL &&
186 !skb_shinfo(skb)->nr_frags)
187 conn->in.hdr = (struct iscsi_hdr *)
188 ((char*)skb->data + conn->in.offset);
190 /* ignoring return code since we checked
192 skb_copy_bits(skb, conn->in.offset,
193 &conn->hdr, conn->hdr_size);
194 conn->in.hdr = &conn->hdr;
196 conn->in.offset += conn->hdr_size;
197 conn->in.copy -= conn->hdr_size;
203 * PDU header scattered across SKB's,
204 * copying it... This'll happen quite rarely.
207 if (conn->in_progress == IN_PROGRESS_WAIT_HEADER)
208 conn->in.hdr_offset = 0;
210 hdr_remains = conn->hdr_size - conn->in.hdr_offset;
211 BUG_ON(hdr_remains <= 0);
213 copylen = min(conn->in.copy, hdr_remains);
214 skb_copy_bits(skb, conn->in.offset,
215 (char*)&conn->hdr + conn->in.hdr_offset, copylen);
217 debug_tcp("PDU gather offset %d bytes %d in.offset %d "
218 "in.copy %d\n", conn->in.hdr_offset, copylen,
219 conn->in.offset, conn->in.copy);
221 conn->in.offset += copylen;
222 conn->in.copy -= copylen;
223 if (copylen < hdr_remains) {
224 conn->in_progress = IN_PROGRESS_HEADER_GATHER;
225 conn->in.hdr_offset += copylen;
228 conn->in.hdr = &conn->hdr;
229 conn->discontiguous_hdr_cnt++;
230 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
237 iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
239 struct scsi_cmnd *sc = ctask->sc;
240 struct iscsi_session *session = conn->session;
242 spin_lock(&session->lock);
244 spin_unlock(&session->lock);
247 if (sc->sc_data_direction == DMA_TO_DEVICE) {
248 struct iscsi_data_task *dtask, *n;
249 /* WRITE: cleanup Data-Out's if any */
250 spin_lock(&conn->lock);
251 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
252 list_del(&dtask->item);
253 mempool_free(dtask, ctask->datapool);
255 spin_unlock(&conn->lock);
257 ctask->xmstate = XMSTATE_IDLE;
260 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
261 spin_unlock(&session->lock);
265 * iscsi_cmd_rsp - SCSI Command Response processing
266 * @conn: iscsi connection
267 * @ctask: scsi command task
270 iscsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
273 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)conn->in.hdr;
274 struct iscsi_session *session = conn->session;
275 struct scsi_cmnd *sc = ctask->sc;
277 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
279 sc->result = (DID_ERROR << 16);
283 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
285 sc->result = (DID_OK << 16) | rhdr->cmd_status;
287 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
288 sc->result = (DID_ERROR << 16);
292 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION && conn->senselen) {
293 int sensecopy = min(conn->senselen, SCSI_SENSE_BUFFERSIZE);
295 memcpy(sc->sense_buffer, conn->data + 2, sensecopy);
296 debug_scsi("copied %d bytes of sense\n", sensecopy);
299 if (sc->sc_data_direction == DMA_TO_DEVICE)
302 if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
303 int res_count = be32_to_cpu(rhdr->residual_count);
305 if (res_count > 0 && res_count <= sc->request_bufflen)
306 sc->resid = res_count;
308 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
309 } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
310 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
311 else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
312 sc->resid = be32_to_cpu(rhdr->residual_count);
315 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
316 (long)sc, sc->result, ctask->itt);
317 conn->scsirsp_pdus_cnt++;
318 iscsi_ctask_cleanup(conn, ctask);
324 * iscsi_data_rsp - SCSI Data-In Response processing
325 * @conn: iscsi connection
326 * @ctask: scsi command task
329 iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
332 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)conn->in.hdr;
333 struct iscsi_session *session = conn->session;
334 int datasn = be32_to_cpu(rhdr->datasn);
336 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
340 * setup Data-In byte counter (gets decremented..)
342 ctask->data_count = conn->in.datalen;
344 if (conn->in.datalen == 0)
347 if (ctask->datasn != datasn)
348 return ISCSI_ERR_DATASN;
352 ctask->data_offset = be32_to_cpu(rhdr->offset);
353 if (ctask->data_offset + conn->in.datalen > ctask->total_length)
354 return ISCSI_ERR_DATA_OFFSET;
356 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
357 struct scsi_cmnd *sc = ctask->sc;
359 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
360 if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) {
361 int res_count = be32_to_cpu(rhdr->residual_count);
364 res_count <= sc->request_bufflen) {
365 sc->resid = res_count;
366 sc->result = (DID_OK << 16) | rhdr->cmd_status;
368 sc->result = (DID_BAD_TARGET << 16) |
370 } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) {
371 sc->resid = be32_to_cpu(rhdr->residual_count);
372 sc->result = (DID_OK << 16) | rhdr->cmd_status;
374 sc->result = (DID_OK << 16) | rhdr->cmd_status;
377 conn->datain_pdus_cnt++;
382 * iscsi_solicit_data_init - initialize first Data-Out
383 * @conn: iscsi connection
384 * @ctask: scsi command task
388 * Initialize first Data-Out within this R2T sequence and finds
389 * proper data_offset within this SCSI command.
391 * This function is called with connection lock taken.
394 iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
395 struct iscsi_r2t_info *r2t)
397 struct iscsi_data *hdr;
398 struct iscsi_data_task *dtask;
399 struct scsi_cmnd *sc = ctask->sc;
401 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
404 memset(hdr, 0, sizeof(struct iscsi_data));
406 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
407 r2t->solicit_datasn++;
408 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
409 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
410 hdr->itt = ctask->hdr.itt;
411 hdr->exp_statsn = r2t->exp_statsn;
412 hdr->offset = cpu_to_be32(r2t->data_offset);
413 if (r2t->data_length > conn->max_xmit_dlength) {
414 hton24(hdr->dlength, conn->max_xmit_dlength);
415 r2t->data_count = conn->max_xmit_dlength;
418 hton24(hdr->dlength, r2t->data_length);
419 r2t->data_count = r2t->data_length;
420 hdr->flags = ISCSI_FLAG_CMD_FINAL;
422 conn->dataout_pdus_cnt++;
426 iscsi_buf_init_virt(&r2t->headbuf, (char*)hdr,
427 sizeof(struct iscsi_hdr));
433 struct scatterlist *sg = sc->request_buffer;
436 for (i = 0; i < sc->use_sg; i++, sg += 1) {
437 /* FIXME: prefetch ? */
438 if (sg_count + sg->length > r2t->data_offset) {
443 /* offset within this page */
444 page_offset = r2t->data_offset - sg_count;
446 /* fill in this buffer */
447 iscsi_buf_init_sg(&r2t->sendbuf, sg);
448 r2t->sendbuf.sg.offset += page_offset;
449 r2t->sendbuf.sg.length -= page_offset;
451 /* xmit logic will continue with next one */
455 sg_count += sg->length;
457 BUG_ON(r2t->sg == NULL);
459 iscsi_buf_init_iov(&ctask->sendbuf,
460 (char*)sc->request_buffer + r2t->data_offset,
463 list_add(&dtask->item, &ctask->dataqueue);
467 * iscsi_r2t_rsp - iSCSI R2T Response processing
468 * @conn: iscsi connection
469 * @ctask: scsi command task
472 iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
474 struct iscsi_r2t_info *r2t;
475 struct iscsi_session *session = conn->session;
476 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)conn->in.hdr;
477 int r2tsn = be32_to_cpu(rhdr->r2tsn);
481 return ISCSI_ERR_AHSLEN;
483 if (conn->in.datalen)
484 return ISCSI_ERR_DATALEN;
486 if (ctask->exp_r2tsn && ctask->exp_r2tsn != r2tsn)
487 return ISCSI_ERR_R2TSN;
489 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
493 /* FIXME: use R2TSN to detect missing R2T */
495 /* fill-in new R2T associated with the task */
496 spin_lock(&session->lock);
497 if (!ctask->sc || ctask->mtask ||
498 session->state != ISCSI_STATE_LOGGED_IN) {
499 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
500 "recovery...\n", ctask->itt);
501 spin_unlock(&session->lock);
504 rc = __kfifo_get(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
507 r2t->exp_statsn = rhdr->statsn;
508 r2t->data_length = be32_to_cpu(rhdr->data_length);
509 if (r2t->data_length == 0 ||
510 r2t->data_length > session->max_burst) {
511 spin_unlock(&session->lock);
512 return ISCSI_ERR_DATALEN;
515 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
516 if (r2t->data_offset + r2t->data_length > ctask->total_length) {
517 spin_unlock(&session->lock);
518 return ISCSI_ERR_DATALEN;
521 r2t->ttt = rhdr->ttt; /* no flip */
522 r2t->solicit_datasn = 0;
524 iscsi_solicit_data_init(conn, ctask, r2t);
526 ctask->exp_r2tsn = r2tsn + 1;
527 ctask->xmstate |= XMSTATE_SOL_HDR;
528 __kfifo_put(ctask->r2tqueue, (void*)&r2t, sizeof(void*));
529 __kfifo_put(conn->writequeue, (void*)&ctask, sizeof(void*));
531 schedule_work(&conn->xmitwork);
532 conn->r2t_pdus_cnt++;
533 spin_unlock(&session->lock);
539 iscsi_hdr_recv(struct iscsi_conn *conn)
542 struct iscsi_hdr *hdr;
543 struct iscsi_cmd_task *ctask;
544 struct iscsi_session *session = conn->session;
545 uint32_t cdgst, rdgst = 0;
549 /* verify PDU length */
550 conn->in.datalen = ntoh24(hdr->dlength);
551 if (conn->in.datalen > conn->max_recv_dlength) {
552 printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
553 conn->in.datalen, conn->max_recv_dlength);
554 return ISCSI_ERR_DATALEN;
556 conn->data_copied = 0;
559 conn->in.ahslen = hdr->hlength * 4;
560 conn->in.offset += conn->in.ahslen;
561 conn->in.copy -= conn->in.ahslen;
562 if (conn->in.copy < 0) {
563 printk(KERN_ERR "iscsi_tcp: can't handle AHS with length "
564 "%d bytes\n", conn->in.ahslen);
565 return ISCSI_ERR_AHSLEN;
568 /* calculate read padding */
569 conn->in.padding = conn->in.datalen & (ISCSI_PAD_LEN-1);
570 if (conn->in.padding) {
571 conn->in.padding = ISCSI_PAD_LEN - conn->in.padding;
572 debug_scsi("read padding %d bytes\n", conn->in.padding);
575 if (conn->hdrdgst_en) {
576 struct scatterlist sg;
578 sg_init_one(&sg, (u8 *)hdr,
579 sizeof(struct iscsi_hdr) + conn->in.ahslen);
580 crypto_digest_digest(conn->rx_tfm, &sg, 1, (u8 *)&cdgst);
581 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
583 if (cdgst != rdgst) {
584 printk(KERN_ERR "iscsi_tcp: itt %x: hdrdgst error "
585 "recv 0x%x calc 0x%x\n", conn->in.itt, rdgst,
587 return ISCSI_ERR_HDR_DGST;
591 /* save opcode for later */
592 conn->in.opcode = hdr->opcode & ISCSI_OPCODE_MASK;
594 /* verify itt (itt encoding: age+cid+itt) */
595 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
596 if ((hdr->itt & AGE_MASK) !=
597 (session->age << AGE_SHIFT)) {
598 printk(KERN_ERR "iscsi_tcp: received itt %x expected "
599 "session age (%x)\n", hdr->itt,
600 session->age & AGE_MASK);
601 return ISCSI_ERR_BAD_ITT;
604 if ((hdr->itt & CID_MASK) != (conn->id << CID_SHIFT)) {
605 printk(KERN_ERR "iscsi_tcp: received itt %x, expected "
606 "CID (%x)\n", hdr->itt, conn->id);
607 return ISCSI_ERR_BAD_ITT;
609 conn->in.itt = hdr->itt & ITT_MASK;
611 conn->in.itt = hdr->itt;
613 debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
614 hdr->opcode, conn->in.offset, conn->in.copy,
615 conn->in.ahslen, conn->in.datalen);
617 if (conn->in.itt < session->cmds_max) {
618 ctask = (struct iscsi_cmd_task *)session->cmds[conn->in.itt];
621 printk(KERN_INFO "iscsi_tcp: dropping ctask with "
622 "itt 0x%x\n", ctask->itt);
623 conn->in.datalen = 0; /* force drop */
627 if (ctask->sc->SCp.phase != session->age) {
628 printk(KERN_ERR "iscsi_tcp: ctask's session age %d, "
629 "expected %d\n", ctask->sc->SCp.phase,
631 return ISCSI_ERR_SESSION_FAILED;
634 conn->in.ctask = ctask;
636 debug_scsi("rsp [op 0x%x cid %d sc %lx itt 0x%x len %d]\n",
637 hdr->opcode, conn->id, (long)ctask->sc,
638 ctask->itt, conn->in.datalen);
640 switch(conn->in.opcode) {
641 case ISCSI_OP_SCSI_CMD_RSP:
642 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
643 if (!conn->in.datalen)
644 rc = iscsi_cmd_rsp(conn, ctask);
647 * got sense or response data; copying PDU
648 * Header to the connection's header
651 memcpy(&conn->hdr, hdr,
652 sizeof(struct iscsi_hdr));
654 case ISCSI_OP_SCSI_DATA_IN:
655 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
656 /* save flags for non-exceptional status */
657 conn->in.flags = hdr->flags;
658 /* save cmd_status for sense data */
659 conn->in.cmd_status =
660 ((struct iscsi_data_rsp*)hdr)->cmd_status;
661 rc = iscsi_data_rsp(conn, ctask);
664 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
665 if (ctask->sc->sc_data_direction == DMA_TO_DEVICE)
666 rc = iscsi_r2t_rsp(conn, ctask);
668 rc = ISCSI_ERR_PROTO;
671 rc = ISCSI_ERR_BAD_OPCODE;
674 } else if (conn->in.itt >= ISCSI_MGMT_ITT_OFFSET &&
675 conn->in.itt < ISCSI_MGMT_ITT_OFFSET +
676 session->mgmtpool_max) {
677 struct iscsi_mgmt_task *mtask = (struct iscsi_mgmt_task *)
678 session->mgmt_cmds[conn->in.itt -
679 ISCSI_MGMT_ITT_OFFSET];
681 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
682 conn->in.opcode, conn->id, mtask->itt,
685 switch(conn->in.opcode) {
686 case ISCSI_OP_LOGIN_RSP:
687 case ISCSI_OP_TEXT_RSP:
688 case ISCSI_OP_LOGOUT_RSP:
689 rc = iscsi_check_assign_cmdsn(session,
690 (struct iscsi_nopin*)hdr);
694 if (!conn->in.datalen) {
695 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr,
697 if (conn->login_mtask != mtask) {
698 spin_lock(&session->lock);
699 __kfifo_put(session->mgmtpool.queue,
700 (void*)&mtask, sizeof(void*));
701 spin_unlock(&session->lock);
705 case ISCSI_OP_SCSI_TMFUNC_RSP:
706 rc = iscsi_check_assign_cmdsn(session,
707 (struct iscsi_nopin*)hdr);
711 if (conn->in.datalen || conn->in.ahslen) {
712 rc = ISCSI_ERR_PROTO;
715 conn->tmfrsp_pdus_cnt++;
716 spin_lock(&session->lock);
717 if (conn->tmabort_state == TMABORT_INITIAL) {
718 __kfifo_put(session->mgmtpool.queue,
719 (void*)&mtask, sizeof(void*));
720 conn->tmabort_state =
721 ((struct iscsi_tm_rsp *)hdr)->
722 response == ISCSI_TMF_RSP_COMPLETE ?
723 TMABORT_SUCCESS:TMABORT_FAILED;
724 /* unblock eh_abort() */
725 wake_up(&conn->ehwait);
727 spin_unlock(&session->lock);
729 case ISCSI_OP_NOOP_IN:
730 if (hdr->ttt != ISCSI_RESERVED_TAG) {
731 rc = ISCSI_ERR_PROTO;
734 rc = iscsi_check_assign_cmdsn(session,
735 (struct iscsi_nopin*)hdr);
738 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
740 if (!conn->in.datalen) {
741 struct iscsi_mgmt_task *mtask;
743 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr,
745 mtask = (struct iscsi_mgmt_task *)
746 session->mgmt_cmds[conn->in.itt -
747 ISCSI_MGMT_ITT_OFFSET];
748 if (conn->login_mtask != mtask) {
749 spin_lock(&session->lock);
750 __kfifo_put(session->mgmtpool.queue,
751 (void*)&mtask, sizeof(void*));
752 spin_unlock(&session->lock);
757 rc = ISCSI_ERR_BAD_OPCODE;
760 } else if (conn->in.itt == ISCSI_RESERVED_TAG) {
761 switch(conn->in.opcode) {
762 case ISCSI_OP_NOOP_IN:
763 if (!conn->in.datalen) {
764 rc = iscsi_check_assign_cmdsn(session,
765 (struct iscsi_nopin*)hdr);
766 if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
767 rc = iscsi_recv_pdu(iscsi_handle(conn),
770 rc = ISCSI_ERR_PROTO;
772 case ISCSI_OP_REJECT:
773 /* we need sth like iscsi_reject_rsp()*/
774 case ISCSI_OP_ASYNC_EVENT:
775 /* we need sth like iscsi_async_event_rsp() */
776 rc = ISCSI_ERR_BAD_OPCODE;
779 rc = ISCSI_ERR_BAD_OPCODE;
783 rc = ISCSI_ERR_BAD_ITT;
789 * iscsi_ctask_copy - copy skb bits to the destanation cmd task
790 * @conn: iscsi connection
791 * @ctask: scsi command task
792 * @buf: buffer to copy to
793 * @buf_size: size of buffer
794 * @offset: offset within the buffer
797 * The function calls skb_copy_bits() and updates per-connection and
798 * per-cmd byte counters.
800 * Read counters (in bytes):
802 * conn->in.offset offset within in progress SKB
803 * conn->in.copy left to copy from in progress SKB
805 * conn->in.copied copied already from in progress SKB
806 * conn->data_copied copied already from in progress buffer
807 * ctask->sent total bytes sent up to the MidLayer
808 * ctask->data_count left to copy from in progress Data-In
809 * buf_left left to copy from in progress buffer
812 iscsi_ctask_copy(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
813 void *buf, int buf_size, int offset)
815 int buf_left = buf_size - (conn->data_copied + offset);
816 int size = min(conn->in.copy, buf_left);
819 size = min(size, ctask->data_count);
821 debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
822 size, conn->in.offset, conn->in.copied);
825 BUG_ON(ctask->sent + size > ctask->total_length);
827 rc = skb_copy_bits(conn->in.skb, conn->in.offset,
828 (char*)buf + (offset + conn->data_copied), size);
829 /* must fit into skb->len */
832 conn->in.offset += size;
833 conn->in.copy -= size;
834 conn->in.copied += size;
835 conn->data_copied += size;
837 ctask->data_count -= size;
839 BUG_ON(conn->in.copy < 0);
840 BUG_ON(ctask->data_count < 0);
842 if (buf_size != (conn->data_copied + offset)) {
843 if (!ctask->data_count) {
844 BUG_ON(buf_size - conn->data_copied < 0);
845 /* done with this PDU */
846 return buf_size - conn->data_copied;
851 /* done with this buffer or with both - PDU and buffer */
852 conn->data_copied = 0;
857 * iscsi_tcp_copy - copy skb bits to the destanation buffer
858 * @conn: iscsi connection
859 * @buf: buffer to copy to
860 * @buf_size: number of bytes to copy
863 * The function calls skb_copy_bits() and updates per-connection
867 iscsi_tcp_copy(struct iscsi_conn *conn, void *buf, int buf_size)
869 int buf_left = buf_size - conn->data_copied;
870 int size = min(conn->in.copy, buf_left);
873 debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
874 size, conn->in.offset, conn->data_copied);
877 rc = skb_copy_bits(conn->in.skb, conn->in.offset,
878 (char*)buf + conn->data_copied, size);
881 conn->in.offset += size;
882 conn->in.copy -= size;
883 conn->in.copied += size;
884 conn->data_copied += size;
886 if (buf_size != conn->data_copied)
893 partial_sg_digest_update(struct iscsi_conn *conn, struct scatterlist *sg,
894 int offset, int length)
896 struct scatterlist temp;
898 memcpy(&temp, sg, sizeof(struct scatterlist));
899 temp.offset = offset;
900 temp.length = length;
901 crypto_digest_update(conn->data_rx_tfm, &temp, 1);
905 iscsi_recv_digest_update(struct iscsi_conn *conn, char* buf, int len)
907 struct scatterlist tmp;
909 sg_init_one(&tmp, buf, len);
910 crypto_digest_update(conn->data_rx_tfm, &tmp, 1);
913 static int iscsi_scsi_data_in(struct iscsi_conn *conn)
915 struct iscsi_cmd_task *ctask = conn->in.ctask;
916 struct scsi_cmnd *sc = ctask->sc;
917 struct scatterlist *sg;
918 int i, offset, rc = 0;
920 BUG_ON((void*)ctask != sc->SCp.ptr);
923 * copying Data-In into the Scsi_Cmnd
926 i = ctask->data_count;
927 rc = iscsi_ctask_copy(conn, ctask, sc->request_buffer,
928 sc->request_bufflen, ctask->data_offset);
931 if (conn->datadgst_en)
932 iscsi_recv_digest_update(conn, sc->request_buffer, i);
937 offset = ctask->data_offset;
938 sg = sc->request_buffer;
940 if (ctask->data_offset)
941 for (i = 0; i < ctask->sg_count; i++)
942 offset -= sg[i].length;
943 /* we've passed through partial sg*/
947 for (i = ctask->sg_count; i < sc->use_sg; i++) {
950 dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
951 rc = iscsi_ctask_copy(conn, ctask, dest + sg[i].offset,
952 sg[i].length, offset);
953 kunmap_atomic(dest, KM_SOFTIRQ0);
955 /* continue with the next SKB/PDU */
958 if (conn->datadgst_en) {
960 crypto_digest_update(conn->data_rx_tfm,
963 partial_sg_digest_update(conn, &sg[i],
964 sg[i].offset + offset,
965 sg[i].length - offset);
971 if (!ctask->data_count) {
972 if (rc && conn->datadgst_en)
974 * data-in is complete, but buffer not...
976 partial_sg_digest_update(conn, &sg[i],
977 sg[i].offset, sg[i].length-rc);
985 BUG_ON(ctask->data_count);
988 /* check for non-exceptional status */
989 if (conn->in.flags & ISCSI_FLAG_DATA_STATUS) {
990 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
991 (long)sc, sc->result, ctask->itt);
992 conn->scsirsp_pdus_cnt++;
993 iscsi_ctask_cleanup(conn, ctask);
1001 iscsi_data_recv(struct iscsi_conn *conn)
1003 struct iscsi_session *session = conn->session;
1006 switch(conn->in.opcode) {
1007 case ISCSI_OP_SCSI_DATA_IN:
1008 rc = iscsi_scsi_data_in(conn);
1010 case ISCSI_OP_SCSI_CMD_RSP: {
1013 * copying the entire Data Segment.
1015 if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) {
1023 conn->in.hdr = &conn->hdr;
1024 conn->senselen = (conn->data[0] << 8) | conn->data[1];
1025 rc = iscsi_cmd_rsp(conn, conn->in.ctask);
1026 if (!rc && conn->datadgst_en)
1027 iscsi_recv_digest_update(conn, conn->data,
1031 case ISCSI_OP_TEXT_RSP:
1032 case ISCSI_OP_LOGIN_RSP:
1033 case ISCSI_OP_NOOP_IN: {
1034 struct iscsi_mgmt_task *mtask = NULL;
1036 if (conn->in.itt != ISCSI_RESERVED_TAG)
1037 mtask = (struct iscsi_mgmt_task *)
1038 session->mgmt_cmds[conn->in.itt -
1039 ISCSI_MGMT_ITT_OFFSET];
1042 * Collect data segment to the connection's data
1045 if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) {
1050 rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr,
1051 conn->data, conn->in.datalen);
1053 if (!rc && conn->datadgst_en &&
1054 conn->in.opcode != ISCSI_OP_LOGIN_RSP)
1055 iscsi_recv_digest_update(conn, conn->data,
1058 if (mtask && conn->login_mtask != mtask) {
1059 spin_lock(&session->lock);
1060 __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
1062 spin_unlock(&session->lock);
1066 case ISCSI_OP_ASYNC_EVENT:
1067 case ISCSI_OP_REJECT:
1076 * iscsi_tcp_data_recv - TCP receive in sendfile fashion
1077 * @rd_desc: read descriptor
1078 * @skb: socket buffer
1079 * @offset: offset in skb
1080 * @len: skb->len - offset
1083 iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
1084 unsigned int offset, size_t len)
1087 struct iscsi_conn *conn = rd_desc->arg.data;
1089 char pad[ISCSI_PAD_LEN];
1090 struct scatterlist sg;
1093 * Save current SKB and its offset in the corresponding
1094 * connection context.
1096 conn->in.copy = skb->len - offset;
1097 conn->in.offset = offset;
1099 conn->in.len = conn->in.copy;
1100 BUG_ON(conn->in.copy <= 0);
1101 debug_tcp("in %d bytes\n", conn->in.copy);
1104 conn->in.copied = 0;
1107 if (unlikely(conn->suspend_rx)) {
1108 debug_tcp("conn %d Rx suspended!\n", conn->id);
1112 if (conn->in_progress == IN_PROGRESS_WAIT_HEADER ||
1113 conn->in_progress == IN_PROGRESS_HEADER_GATHER) {
1114 rc = iscsi_hdr_extract(conn);
1119 iscsi_conn_failure(conn, rc);
1125 * Verify and process incoming PDU header.
1127 rc = iscsi_hdr_recv(conn);
1128 if (!rc && conn->in.datalen) {
1129 if (conn->datadgst_en) {
1130 BUG_ON(!conn->data_rx_tfm);
1131 crypto_digest_init(conn->data_rx_tfm);
1133 conn->in_progress = IN_PROGRESS_DATA_RECV;
1135 iscsi_conn_failure(conn, rc);
1140 if (conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
1141 uint32_t recv_digest;
1142 debug_tcp("extra data_recv offset %d copy %d\n",
1143 conn->in.offset, conn->in.copy);
1144 skb_copy_bits(conn->in.skb, conn->in.offset,
1146 conn->in.offset += 4;
1148 if (recv_digest != conn->in.datadgst) {
1149 debug_tcp("iscsi_tcp: data digest error!"
1150 "0x%x != 0x%x\n", recv_digest,
1152 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1155 debug_tcp("iscsi_tcp: data digest match!"
1156 "0x%x == 0x%x\n", recv_digest,
1158 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1162 if (conn->in_progress == IN_PROGRESS_DATA_RECV && conn->in.copy) {
1164 debug_tcp("data_recv offset %d copy %d\n",
1165 conn->in.offset, conn->in.copy);
1167 rc = iscsi_data_recv(conn);
1169 if (rc == -EAGAIN) {
1170 rd_desc->count = conn->in.datalen -
1171 conn->in.ctask->data_count;
1174 iscsi_conn_failure(conn, rc);
1177 conn->in.copy -= conn->in.padding;
1178 conn->in.offset += conn->in.padding;
1179 if (conn->datadgst_en) {
1180 if (conn->in.padding) {
1181 debug_tcp("padding -> %d\n", conn->in.padding);
1182 memset(pad, 0, conn->in.padding);
1183 sg_init_one(&sg, pad, conn->in.padding);
1184 crypto_digest_update(conn->data_rx_tfm, &sg, 1);
1186 crypto_digest_final(conn->data_rx_tfm,
1187 (u8 *) & conn->in.datadgst);
1188 debug_tcp("rx digest 0x%x\n", conn->in.datadgst);
1189 conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
1191 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1194 debug_tcp("f, processed %d from out of %d padding %d\n",
1195 conn->in.offset - offset, (int)len, conn->in.padding);
1196 BUG_ON(conn->in.offset - offset > len);
1198 if (conn->in.offset - offset != len) {
1199 debug_tcp("continue to process %d bytes\n",
1200 (int)len - (conn->in.offset - offset));
1205 processed = conn->in.offset - offset;
1206 BUG_ON(processed == 0);
1210 processed = conn->in.offset - offset;
1211 debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
1212 processed, (int)len, (int)rd_desc->count);
1213 BUG_ON(processed == 0);
1214 BUG_ON(processed > len);
1216 conn->rxdata_octets += processed;
1221 iscsi_tcp_data_ready(struct sock *sk, int flag)
1223 struct iscsi_conn *conn = sk->sk_user_data;
1224 read_descriptor_t rd_desc;
1226 read_lock(&sk->sk_callback_lock);
1228 /* use rd_desc to pass 'conn' to iscsi_tcp_data_recv */
1229 rd_desc.arg.data = conn;
1231 tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv);
1233 read_unlock(&sk->sk_callback_lock);
1237 iscsi_tcp_state_change(struct sock *sk)
1239 struct iscsi_conn *conn;
1240 struct iscsi_session *session;
1241 void (*old_state_change)(struct sock *);
1243 read_lock(&sk->sk_callback_lock);
1245 conn = (struct iscsi_conn*)sk->sk_user_data;
1246 session = conn->session;
1248 if ((sk->sk_state == TCP_CLOSE_WAIT ||
1249 sk->sk_state == TCP_CLOSE) &&
1250 !atomic_read(&sk->sk_rmem_alloc)) {
1251 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
1252 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1255 old_state_change = conn->old_state_change;
1257 read_unlock(&sk->sk_callback_lock);
1259 old_state_change(sk);
1263 * iscsi_write_space - Called when more output buffer space is available
1264 * @sk: socket space is available for
1267 iscsi_write_space(struct sock *sk)
1269 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
1270 conn->old_write_space(sk);
1271 debug_tcp("iscsi_write_space: cid %d\n", conn->id);
1272 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
1273 schedule_work(&conn->xmitwork);
1277 iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1279 struct sock *sk = conn->sock->sk;
1281 /* assign new callbacks */
1282 write_lock_bh(&sk->sk_callback_lock);
1283 sk->sk_user_data = conn;
1284 conn->old_data_ready = sk->sk_data_ready;
1285 conn->old_state_change = sk->sk_state_change;
1286 conn->old_write_space = sk->sk_write_space;
1287 sk->sk_data_ready = iscsi_tcp_data_ready;
1288 sk->sk_state_change = iscsi_tcp_state_change;
1289 sk->sk_write_space = iscsi_write_space;
1290 write_unlock_bh(&sk->sk_callback_lock);
1294 iscsi_conn_restore_callbacks(struct iscsi_conn *conn)
1296 struct sock *sk = conn->sock->sk;
1298 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1299 write_lock_bh(&sk->sk_callback_lock);
1300 sk->sk_user_data = NULL;
1301 sk->sk_data_ready = conn->old_data_ready;
1302 sk->sk_state_change = conn->old_state_change;
1303 sk->sk_write_space = conn->old_write_space;
1304 sk->sk_no_check = 0;
1305 write_unlock_bh(&sk->sk_callback_lock);
1309 * iscsi_send - generic send routine
1310 * @sk: kernel's socket
1311 * @buf: buffer to write from
1312 * @size: actual size to write
1313 * @flags: socket's flags
1316 * depending on buffer will use tcp_sendpage() or tcp_sendmsg().
1317 * buf->sg.offset == -1 tells us that buffer is non S/G and forces
1318 * to use tcp_sendmsg().
1321 iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
1323 struct socket *sk = conn->sock;
1326 if ((int)buf->sg.offset >= 0) {
1327 int offset = buf->sg.offset + buf->sent;
1329 res = conn->sendpage(sk, buf->sg.page, offset, size, flags);
1333 buf->iov.iov_base = iscsi_buf_iov_base(buf);
1334 buf->iov.iov_len = size;
1336 memset(&msg, 0, sizeof(struct msghdr));
1339 res = kernel_sendmsg(sk, &msg, &buf->iov, 1, size);
1346 * iscsi_sendhdr - send PDU Header via tcp_sendpage()
1347 * @conn: iscsi connection
1348 * @buf: buffer to write from
1349 * @datalen: lenght of data to be sent after the header
1355 iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
1357 int flags = 0; /* MSG_DONTWAIT; */
1360 size = buf->sg.length - buf->sent;
1361 BUG_ON(buf->sent + size > buf->sg.length);
1362 if (buf->sent + size != buf->sg.length || datalen)
1365 res = iscsi_send(conn, buf, size, flags);
1366 debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
1368 conn->txdata_octets += res;
1373 } else if (res == -EAGAIN) {
1374 conn->sendpage_failures_cnt++;
1375 set_bit(SUSPEND_BIT, &conn->suspend_tx);
1376 } else if (res == -EPIPE)
1377 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1383 * iscsi_sendpage - send one page of iSCSI Data-Out.
1384 * @conn: iscsi connection
1385 * @buf: buffer to write from
1386 * @count: remaining data
1387 * @sent: number of bytes sent
1393 iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
1394 int *count, int *sent)
1396 int flags = 0; /* MSG_DONTWAIT; */
1399 size = buf->sg.length - buf->sent;
1400 BUG_ON(buf->sent + size > buf->sg.length);
1403 if (buf->sent + size != buf->sg.length || *count != size)
1406 res = iscsi_send(conn, buf, size, flags);
1407 debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
1408 size, buf->sent, *count, *sent, res);
1410 conn->txdata_octets += res;
1417 } else if (res == -EAGAIN) {
1418 conn->sendpage_failures_cnt++;
1419 set_bit(SUSPEND_BIT, &conn->suspend_tx);
1420 } else if (res == -EPIPE)
1421 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1427 iscsi_data_digest_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1429 BUG_ON(!conn->data_tx_tfm);
1430 crypto_digest_init(conn->data_tx_tfm);
1431 ctask->digest_count = 4;
1435 iscsi_buf_data_digest_update(struct iscsi_conn *conn, struct iscsi_buf *buf)
1437 struct scatterlist sg;
1439 if (buf->sg.offset != -1)
1440 crypto_digest_update(conn->data_tx_tfm, &buf->sg, 1);
1442 sg_init_one(&sg, (char *)buf->sg.page, buf->sg.length);
1443 crypto_digest_update(conn->data_tx_tfm, &sg, 1);
1448 iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1449 struct iscsi_buf *buf, uint32_t *digest, int final)
1455 crypto_digest_final(conn->data_tx_tfm, (u8*)digest);
1457 iscsi_buf_init_virt(buf, (char*)digest, 4);
1458 rc = iscsi_sendpage(conn, buf, &ctask->digest_count, &sent);
1460 ctask->datadigest = *digest;
1461 ctask->xmstate |= XMSTATE_DATA_DIGEST;
1463 ctask->digest_count = 4;
1468 * iscsi_solicit_data_cont - initialize next Data-Out
1469 * @conn: iscsi connection
1470 * @ctask: scsi command task
1472 * @left: bytes left to transfer
1475 * Initialize next Data-Out within this R2T sequence and continue
1476 * to process next Scatter-Gather element(if any) of this SCSI command.
1478 * Called under connection lock.
1481 iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1482 struct iscsi_r2t_info *r2t, int left)
1484 struct iscsi_data *hdr;
1485 struct iscsi_data_task *dtask;
1486 struct scsi_cmnd *sc = ctask->sc;
1489 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
1492 memset(hdr, 0, sizeof(struct iscsi_data));
1493 hdr->ttt = r2t->ttt;
1494 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
1495 r2t->solicit_datasn++;
1496 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1497 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
1498 hdr->itt = ctask->hdr.itt;
1499 hdr->exp_statsn = r2t->exp_statsn;
1500 new_offset = r2t->data_offset + r2t->sent;
1501 hdr->offset = cpu_to_be32(new_offset);
1502 if (left > conn->max_xmit_dlength) {
1503 hton24(hdr->dlength, conn->max_xmit_dlength);
1504 r2t->data_count = conn->max_xmit_dlength;
1506 hton24(hdr->dlength, left);
1507 r2t->data_count = left;
1508 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1510 conn->dataout_pdus_cnt++;
1512 iscsi_buf_init_virt(&r2t->headbuf, (char*)hdr,
1513 sizeof(struct iscsi_hdr));
1517 if (sc->use_sg && !iscsi_buf_left(&r2t->sendbuf)) {
1518 BUG_ON(ctask->bad_sg == r2t->sg);
1519 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1522 iscsi_buf_init_iov(&ctask->sendbuf,
1523 (char*)sc->request_buffer + new_offset,
1526 list_add(&dtask->item, &ctask->dataqueue);
1530 iscsi_unsolicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1532 struct iscsi_data *hdr;
1533 struct iscsi_data_task *dtask;
1535 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
1538 memset(hdr, 0, sizeof(struct iscsi_data));
1539 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
1540 hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
1541 ctask->unsol_datasn++;
1542 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1543 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
1544 hdr->itt = ctask->hdr.itt;
1545 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
1546 hdr->offset = cpu_to_be32(ctask->total_length -
1547 ctask->r2t_data_count -
1548 ctask->unsol_count);
1549 if (ctask->unsol_count > conn->max_xmit_dlength) {
1550 hton24(hdr->dlength, conn->max_xmit_dlength);
1551 ctask->data_count = conn->max_xmit_dlength;
1554 hton24(hdr->dlength, ctask->unsol_count);
1555 ctask->data_count = ctask->unsol_count;
1556 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1559 iscsi_buf_init_virt(&ctask->headbuf, (char*)hdr,
1560 sizeof(struct iscsi_hdr));
1562 list_add(&dtask->item, &ctask->dataqueue);
1564 ctask->dtask = dtask;
1568 * iscsi_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1569 * @conn: iscsi connection
1570 * @ctask: scsi command task
1574 iscsi_cmd_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1575 struct scsi_cmnd *sc)
1577 struct iscsi_session *session = conn->session;
1579 BUG_ON(__kfifo_len(ctask->r2tqueue));
1583 ctask->hdr.opcode = ISCSI_OP_SCSI_CMD;
1584 ctask->hdr.flags = ISCSI_ATTR_SIMPLE;
1585 int_to_scsilun(sc->device->lun, (struct scsi_lun *)ctask->hdr.lun);
1586 ctask->hdr.itt = ctask->itt | (conn->id << CID_SHIFT) |
1587 (session->age << AGE_SHIFT);
1588 ctask->hdr.data_length = cpu_to_be32(sc->request_bufflen);
1589 ctask->hdr.cmdsn = cpu_to_be32(session->cmdsn); session->cmdsn++;
1590 ctask->hdr.exp_statsn = cpu_to_be32(conn->exp_statsn);
1591 memcpy(ctask->hdr.cdb, sc->cmnd, sc->cmd_len);
1592 memset(&ctask->hdr.cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
1594 ctask->mtask = NULL;
1596 ctask->sg_count = 0;
1598 ctask->total_length = sc->request_bufflen;
1600 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1601 ctask->exp_r2tsn = 0;
1602 ctask->hdr.flags |= ISCSI_FLAG_CMD_WRITE;
1603 BUG_ON(ctask->total_length == 0);
1605 struct scatterlist *sg = sc->request_buffer;
1607 iscsi_buf_init_sg(&ctask->sendbuf,
1608 &sg[ctask->sg_count++]);
1610 ctask->bad_sg = sg + sc->use_sg;
1612 iscsi_buf_init_iov(&ctask->sendbuf, sc->request_buffer,
1613 sc->request_bufflen);
1619 * imm_count bytes to be sent right after
1622 * unsol_count bytes(as Data-Out) to be sent
1623 * without R2T ack right after
1626 * r2t_data_count bytes to be sent via R2T ack's
1628 * pad_count bytes to be sent as zero-padding
1630 ctask->imm_count = 0;
1631 ctask->unsol_count = 0;
1632 ctask->unsol_datasn = 0;
1633 ctask->xmstate = XMSTATE_W_HDR;
1634 /* calculate write padding */
1635 ctask->pad_count = ctask->total_length & (ISCSI_PAD_LEN-1);
1636 if (ctask->pad_count) {
1637 ctask->pad_count = ISCSI_PAD_LEN - ctask->pad_count;
1638 debug_scsi("write padding %d bytes\n",
1640 ctask->xmstate |= XMSTATE_W_PAD;
1642 if (session->imm_data_en) {
1643 if (ctask->total_length >= session->first_burst)
1644 ctask->imm_count = min(session->first_burst,
1645 conn->max_xmit_dlength);
1647 ctask->imm_count = min(ctask->total_length,
1648 conn->max_xmit_dlength);
1649 hton24(ctask->hdr.dlength, ctask->imm_count);
1650 ctask->xmstate |= XMSTATE_IMM_DATA;
1652 zero_data(ctask->hdr.dlength);
1654 if (!session->initial_r2t_en)
1655 ctask->unsol_count = min(session->first_burst,
1656 ctask->total_length) - ctask->imm_count;
1657 if (!ctask->unsol_count)
1658 /* No unsolicit Data-Out's */
1659 ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
1661 ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
1663 ctask->r2t_data_count = ctask->total_length -
1667 debug_scsi("cmd [itt %x total %d imm %d imm_data %d "
1669 ctask->itt, ctask->total_length, ctask->imm_count,
1670 ctask->unsol_count, ctask->r2t_data_count);
1672 ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
1673 if (sc->sc_data_direction == DMA_FROM_DEVICE)
1674 ctask->hdr.flags |= ISCSI_FLAG_CMD_READ;
1676 ctask->xmstate = XMSTATE_R_HDR;
1677 zero_data(ctask->hdr.dlength);
1680 iscsi_buf_init_virt(&ctask->headbuf, (char*)&ctask->hdr,
1681 sizeof(struct iscsi_hdr));
1682 conn->scsicmd_pdus_cnt++;
1686 * iscsi_mtask_xmit - xmit management(immediate) task
1687 * @conn: iscsi connection
1688 * @mtask: task management task
1691 * The function can return -EAGAIN in which case caller must
1692 * call it again later, or recover. '0' return code means successful
1695 * Management xmit state machine consists of two states:
1696 * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
1697 * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
1700 iscsi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1703 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1704 conn->id, mtask->xmstate, mtask->itt);
1706 if (mtask->xmstate & XMSTATE_IMM_HDR) {
1707 mtask->xmstate &= ~XMSTATE_IMM_HDR;
1708 if (mtask->data_count)
1709 mtask->xmstate |= XMSTATE_IMM_DATA;
1710 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
1711 conn->stop_stage != STOP_CONN_RECOVER &&
1713 iscsi_hdr_digest(conn, &mtask->headbuf,
1714 (u8*)mtask->hdrext);
1715 if (iscsi_sendhdr(conn, &mtask->headbuf, mtask->data_count)) {
1716 mtask->xmstate |= XMSTATE_IMM_HDR;
1717 if (mtask->data_count)
1718 mtask->xmstate &= ~XMSTATE_IMM_DATA;
1723 if (mtask->xmstate & XMSTATE_IMM_DATA) {
1724 BUG_ON(!mtask->data_count);
1725 mtask->xmstate &= ~XMSTATE_IMM_DATA;
1726 /* FIXME: implement.
1727 * Virtual buffer could be spreaded across multiple pages...
1730 if (iscsi_sendpage(conn, &mtask->sendbuf,
1731 &mtask->data_count, &mtask->sent)) {
1732 mtask->xmstate |= XMSTATE_IMM_DATA;
1735 } while (mtask->data_count);
1738 BUG_ON(mtask->xmstate != XMSTATE_IDLE);
1743 handle_xmstate_r_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1745 ctask->xmstate &= ~XMSTATE_R_HDR;
1746 if (conn->hdrdgst_en)
1747 iscsi_hdr_digest(conn, &ctask->headbuf, (u8*)ctask->hdrext);
1748 if (!iscsi_sendhdr(conn, &ctask->headbuf, 0)) {
1749 BUG_ON(ctask->xmstate != XMSTATE_IDLE);
1750 return 0; /* wait for Data-In */
1752 ctask->xmstate |= XMSTATE_R_HDR;
1757 handle_xmstate_w_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1759 ctask->xmstate &= ~XMSTATE_W_HDR;
1760 if (conn->hdrdgst_en)
1761 iscsi_hdr_digest(conn, &ctask->headbuf, (u8*)ctask->hdrext);
1762 if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->imm_count)) {
1763 ctask->xmstate |= XMSTATE_W_HDR;
1770 handle_xmstate_data_digest(struct iscsi_conn *conn,
1771 struct iscsi_cmd_task *ctask)
1773 ctask->xmstate &= ~XMSTATE_DATA_DIGEST;
1774 debug_tcp("resent data digest 0x%x\n", ctask->datadigest);
1775 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
1776 &ctask->datadigest, 0)) {
1777 ctask->xmstate |= XMSTATE_DATA_DIGEST;
1778 debug_tcp("resent data digest 0x%x fail!\n",
1786 handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1788 BUG_ON(!ctask->imm_count);
1789 ctask->xmstate &= ~XMSTATE_IMM_DATA;
1791 if (conn->datadgst_en) {
1792 iscsi_data_digest_init(conn, ctask);
1793 ctask->immdigest = 0;
1797 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->imm_count,
1799 ctask->xmstate |= XMSTATE_IMM_DATA;
1800 if (conn->datadgst_en) {
1801 crypto_digest_final(conn->data_tx_tfm,
1802 (u8*)&ctask->immdigest);
1803 debug_tcp("tx imm sendpage fail 0x%x\n",
1808 if (conn->datadgst_en)
1809 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
1811 if (!ctask->imm_count)
1813 iscsi_buf_init_sg(&ctask->sendbuf,
1814 &ctask->sg[ctask->sg_count++]);
1817 if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) {
1818 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
1819 &ctask->immdigest, 1)) {
1820 debug_tcp("sending imm digest 0x%x fail!\n",
1824 debug_tcp("sending imm digest 0x%x\n", ctask->immdigest);
1831 handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1833 struct iscsi_data_task *dtask;
1835 ctask->xmstate |= XMSTATE_UNS_DATA;
1836 if (ctask->xmstate & XMSTATE_UNS_INIT) {
1837 iscsi_unsolicit_data_init(conn, ctask);
1838 BUG_ON(!ctask->dtask);
1839 dtask = ctask->dtask;
1840 if (conn->hdrdgst_en)
1841 iscsi_hdr_digest(conn, &ctask->headbuf,
1842 (u8*)dtask->hdrext);
1843 ctask->xmstate &= ~XMSTATE_UNS_INIT;
1845 if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->data_count)) {
1846 ctask->xmstate &= ~XMSTATE_UNS_DATA;
1847 ctask->xmstate |= XMSTATE_UNS_HDR;
1851 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1852 ctask->itt, ctask->unsol_count, ctask->sent);
1857 handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1859 struct iscsi_data_task *dtask = ctask->dtask;
1861 BUG_ON(!ctask->data_count);
1862 ctask->xmstate &= ~XMSTATE_UNS_DATA;
1864 if (conn->datadgst_en) {
1865 iscsi_data_digest_init(conn, ctask);
1870 int start = ctask->sent;
1872 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->data_count,
1874 ctask->unsol_count -= ctask->sent - start;
1875 ctask->xmstate |= XMSTATE_UNS_DATA;
1876 /* will continue with this ctask later.. */
1877 if (conn->datadgst_en) {
1878 crypto_digest_final(conn->data_tx_tfm,
1879 (u8 *)&dtask->digest);
1880 debug_tcp("tx uns data fail 0x%x\n",
1886 BUG_ON(ctask->sent > ctask->total_length);
1887 ctask->unsol_count -= ctask->sent - start;
1890 * XXX:we may run here with un-initial sendbuf.
1893 if (conn->datadgst_en && ctask->sent - start > 0)
1894 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
1896 if (!ctask->data_count)
1898 iscsi_buf_init_sg(&ctask->sendbuf,
1899 &ctask->sg[ctask->sg_count++]);
1901 BUG_ON(ctask->unsol_count < 0);
1904 * Done with the Data-Out. Next, check if we need
1905 * to send another unsolicited Data-Out.
1907 if (ctask->unsol_count) {
1908 if (conn->datadgst_en) {
1909 if (iscsi_digest_final_send(conn, ctask,
1911 &dtask->digest, 1)) {
1912 debug_tcp("send uns digest 0x%x fail\n",
1916 debug_tcp("sending uns digest 0x%x, more uns\n",
1919 ctask->xmstate |= XMSTATE_UNS_INIT;
1923 if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) {
1924 if (iscsi_digest_final_send(conn, ctask,
1926 &dtask->digest, 1)) {
1927 debug_tcp("send last uns digest 0x%x fail\n",
1931 debug_tcp("sending uns digest 0x%x\n",dtask->digest);
1938 handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1940 struct iscsi_session *session = conn->session;
1941 struct iscsi_r2t_info *r2t = ctask->r2t;
1942 struct iscsi_data_task *dtask = r2t->dtask;
1945 ctask->xmstate &= ~XMSTATE_SOL_DATA;
1946 ctask->dtask = dtask;
1948 if (conn->datadgst_en) {
1949 iscsi_data_digest_init(conn, ctask);
1954 * send Data-Out whitnin this R2T sequence.
1956 if (!r2t->data_count)
1959 if (iscsi_sendpage(conn, &r2t->sendbuf, &r2t->data_count, &r2t->sent)) {
1960 ctask->xmstate |= XMSTATE_SOL_DATA;
1961 /* will continue with this ctask later.. */
1962 if (conn->datadgst_en) {
1963 crypto_digest_final(conn->data_tx_tfm,
1964 (u8 *)&dtask->digest);
1965 debug_tcp("r2t data send fail 0x%x\n", dtask->digest);
1970 BUG_ON(r2t->data_count < 0);
1971 if (conn->datadgst_en)
1972 iscsi_buf_data_digest_update(conn, &r2t->sendbuf);
1974 if (r2t->data_count) {
1975 BUG_ON(ctask->sc->use_sg == 0);
1976 if (!iscsi_buf_left(&r2t->sendbuf)) {
1977 BUG_ON(ctask->bad_sg == r2t->sg);
1978 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1986 * Done with this Data-Out. Next, check if we have
1987 * to send another Data-Out for this R2T.
1989 BUG_ON(r2t->data_length - r2t->sent < 0);
1990 left = r2t->data_length - r2t->sent;
1992 if (conn->datadgst_en) {
1993 if (iscsi_digest_final_send(conn, ctask,
1995 &dtask->digest, 1)) {
1996 debug_tcp("send r2t data digest 0x%x"
1997 "fail\n", dtask->digest);
2000 debug_tcp("r2t data send digest 0x%x\n",
2003 iscsi_solicit_data_cont(conn, ctask, r2t, left);
2004 ctask->xmstate |= XMSTATE_SOL_DATA;
2005 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2010 * Done with this R2T. Check if there are more
2011 * outstanding R2Ts ready to be processed.
2013 BUG_ON(ctask->r2t_data_count - r2t->data_length < 0);
2014 if (conn->datadgst_en) {
2015 if (iscsi_digest_final_send(conn, ctask, &dtask->digestbuf,
2016 &dtask->digest, 1)) {
2017 debug_tcp("send last r2t data digest 0x%x"
2018 "fail\n", dtask->digest);
2021 debug_tcp("r2t done dout digest 0x%x\n", dtask->digest);
2024 ctask->r2t_data_count -= r2t->data_length;
2026 spin_lock_bh(&session->lock);
2027 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
2028 spin_unlock_bh(&session->lock);
2029 if (__kfifo_get(ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
2031 ctask->xmstate |= XMSTATE_SOL_DATA;
2032 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2040 handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2042 struct iscsi_data_task *dtask = ctask->dtask;
2045 ctask->xmstate &= ~XMSTATE_W_PAD;
2046 iscsi_buf_init_virt(&ctask->sendbuf, (char*)&ctask->pad,
2048 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->pad_count, &sent)) {
2049 ctask->xmstate |= XMSTATE_W_PAD;
2053 if (conn->datadgst_en) {
2054 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
2057 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
2058 &ctask->immdigest, 1)) {
2059 debug_tcp("send padding digest 0x%x"
2060 "fail!\n", ctask->immdigest);
2063 debug_tcp("done with padding, digest 0x%x\n",
2066 if (iscsi_digest_final_send(conn, ctask,
2068 &dtask->digest, 1)) {
2069 debug_tcp("send padding digest 0x%x"
2070 "fail\n", dtask->digest);
2073 debug_tcp("done with padding, digest 0x%x\n",
2082 iscsi_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2086 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
2087 conn->id, ctask->xmstate, ctask->itt);
2090 * serialize with TMF AbortTask
2095 if (ctask->xmstate & XMSTATE_R_HDR) {
2096 rc = handle_xmstate_r_hdr(conn, ctask);
2100 if (ctask->xmstate & XMSTATE_W_HDR) {
2101 rc = handle_xmstate_w_hdr(conn, ctask);
2106 /* XXX: for data digest xmit recover */
2107 if (ctask->xmstate & XMSTATE_DATA_DIGEST) {
2108 rc = handle_xmstate_data_digest(conn, ctask);
2113 if (ctask->xmstate & XMSTATE_IMM_DATA) {
2114 rc = handle_xmstate_imm_data(conn, ctask);
2119 if (ctask->xmstate & XMSTATE_UNS_HDR) {
2120 BUG_ON(!ctask->unsol_count);
2121 ctask->xmstate &= ~XMSTATE_UNS_HDR;
2122 unsolicit_head_again:
2123 rc = handle_xmstate_uns_hdr(conn, ctask);
2128 if (ctask->xmstate & XMSTATE_UNS_DATA) {
2129 rc = handle_xmstate_uns_data(conn, ctask);
2131 goto unsolicit_head_again;
2137 if (ctask->xmstate & XMSTATE_SOL_HDR) {
2138 struct iscsi_r2t_info *r2t;
2140 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2141 ctask->xmstate |= XMSTATE_SOL_DATA;
2143 __kfifo_get(ctask->r2tqueue, (void*)&ctask->r2t,
2147 if (conn->hdrdgst_en)
2148 iscsi_hdr_digest(conn, &r2t->headbuf,
2149 (u8*)r2t->dtask->hdrext);
2150 if (iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count)) {
2151 ctask->xmstate &= ~XMSTATE_SOL_DATA;
2152 ctask->xmstate |= XMSTATE_SOL_HDR;
2156 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
2157 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
2161 if (ctask->xmstate & XMSTATE_SOL_DATA) {
2162 rc = handle_xmstate_sol_data(conn, ctask);
2164 goto solicit_head_again;
2171 * Last thing to check is whether we need to send write
2172 * padding. Note that we check for xmstate equality, not just the bit.
2174 if (ctask->xmstate == XMSTATE_W_PAD)
2175 rc = handle_xmstate_w_pad(conn, ctask);
2181 * iscsi_data_xmit - xmit any command into the scheduled connection
2182 * @conn: iscsi connection
2185 * The function can return -EAGAIN in which case the caller must
2186 * re-schedule it again later or recover. '0' return code means
2190 iscsi_data_xmit(struct iscsi_conn *conn)
2192 if (unlikely(conn->suspend_tx)) {
2193 debug_tcp("conn %d Tx suspended!\n", conn->id);
2198 * Transmit in the following order:
2200 * 1) un-finished xmit (ctask or mtask)
2201 * 2) immediate control PDUs
2204 * 5) non-immediate control PDUs
2206 * No need to lock around __kfifo_get as long as
2207 * there's one producer and one consumer.
2210 BUG_ON(conn->ctask && conn->mtask);
2213 if (iscsi_ctask_xmit(conn, conn->ctask))
2215 /* done with this in-progress ctask */
2219 if (iscsi_mtask_xmit(conn, conn->mtask))
2221 /* done with this in-progress mtask */
2225 /* process immediate first */
2226 if (unlikely(__kfifo_len(conn->immqueue))) {
2227 struct iscsi_session *session = conn->session;
2228 while (__kfifo_get(conn->immqueue, (void*)&conn->mtask,
2230 if (iscsi_mtask_xmit(conn, conn->mtask))
2233 if (conn->mtask->hdr.itt ==
2234 cpu_to_be32(ISCSI_RESERVED_TAG)) {
2235 spin_lock_bh(&session->lock);
2236 __kfifo_put(session->mgmtpool.queue,
2237 (void*)&conn->mtask, sizeof(void*));
2238 spin_unlock_bh(&session->lock);
2241 /* done with this mtask */
2245 /* process write queue */
2246 while (__kfifo_get(conn->writequeue, (void*)&conn->ctask,
2248 if (iscsi_ctask_xmit(conn, conn->ctask))
2252 /* process command queue */
2253 while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask,
2255 if (iscsi_ctask_xmit(conn, conn->ctask))
2258 /* done with this ctask */
2261 /* process the rest control plane PDUs, if any */
2262 if (unlikely(__kfifo_len(conn->mgmtqueue))) {
2263 struct iscsi_session *session = conn->session;
2265 while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
2267 if (iscsi_mtask_xmit(conn, conn->mtask))
2270 if (conn->mtask->hdr.itt ==
2271 cpu_to_be32(ISCSI_RESERVED_TAG)) {
2272 spin_lock_bh(&session->lock);
2273 __kfifo_put(session->mgmtpool.queue,
2274 (void*)&conn->mtask,
2276 spin_unlock_bh(&session->lock);
2279 /* done with this mtask */
2286 if (unlikely(conn->suspend_tx))
2293 iscsi_xmitworker(void *data)
2295 struct iscsi_conn *conn = data;
2298 * serialize Xmit worker on a per-connection basis.
2300 mutex_lock(&conn->xmitmutex);
2301 if (iscsi_data_xmit(conn))
2302 schedule_work(&conn->xmitwork);
2303 mutex_unlock(&conn->xmitmutex);
2306 #define FAILURE_BAD_HOST 1
2307 #define FAILURE_SESSION_FAILED 2
2308 #define FAILURE_SESSION_FREED 3
2309 #define FAILURE_WINDOW_CLOSED 4
2310 #define FAILURE_SESSION_TERMINATE 5
2313 iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
2315 struct Scsi_Host *host;
2317 struct iscsi_session *session;
2318 struct iscsi_conn *conn = NULL;
2319 struct iscsi_cmd_task *ctask = NULL;
2321 sc->scsi_done = done;
2324 host = sc->device->host;
2325 session = iscsi_hostdata(host->hostdata);
2326 BUG_ON(host != session->host);
2328 spin_lock(&session->lock);
2330 if (session->state != ISCSI_STATE_LOGGED_IN) {
2331 if (session->state == ISCSI_STATE_FAILED) {
2332 reason = FAILURE_SESSION_FAILED;
2334 } else if (session->state == ISCSI_STATE_TERMINATE) {
2335 reason = FAILURE_SESSION_TERMINATE;
2338 reason = FAILURE_SESSION_FREED;
2343 * Check for iSCSI window and take care of CmdSN wrap-around
2345 if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
2346 reason = FAILURE_WINDOW_CLOSED;
2350 conn = session->leadconn;
2352 __kfifo_get(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
2355 sc->SCp.phase = session->age;
2356 sc->SCp.ptr = (char*)ctask;
2357 iscsi_cmd_init(conn, ctask, sc);
2359 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*));
2361 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
2362 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
2363 conn->id, (long)sc, ctask->itt, sc->request_bufflen,
2364 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
2365 spin_unlock(&session->lock);
2367 if (!in_interrupt() && mutex_trylock(&conn->xmitmutex)) {
2368 spin_unlock_irq(host->host_lock);
2369 if (iscsi_data_xmit(conn))
2370 schedule_work(&conn->xmitwork);
2371 mutex_unlock(&conn->xmitmutex);
2372 spin_lock_irq(host->host_lock);
2374 schedule_work(&conn->xmitwork);
2379 spin_unlock(&session->lock);
2380 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
2381 return SCSI_MLQUEUE_HOST_BUSY;
2384 spin_unlock(&session->lock);
2385 printk(KERN_ERR "iscsi_tcp: cmd 0x%x is not queued (%d)\n",
2386 sc->cmnd[0], reason);
2387 sc->sense_buffer[0] = 0x70;
2388 sc->sense_buffer[2] = NOT_READY;
2389 sc->sense_buffer[7] = 0x6;
2390 sc->sense_buffer[12] = 0x08;
2391 sc->sense_buffer[13] = 0x00;
2392 sc->result = (DID_NO_CONNECT << 16);
2393 sc->resid = sc->request_bufflen;
2399 iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
2401 if (depth > ISCSI_MAX_CMD_PER_LUN)
2402 depth = ISCSI_MAX_CMD_PER_LUN;
2403 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
2404 return sdev->queue_depth;
2408 iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size)
2412 *items = kmalloc(max * sizeof(void*), GFP_KERNEL);
2417 q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL);
2418 if (q->pool == NULL) {
2423 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
2425 if (q->queue == ERR_PTR(-ENOMEM)) {
2431 for (i = 0; i < max; i++) {
2432 q->pool[i] = kmalloc(item_size, GFP_KERNEL);
2433 if (q->pool[i] == NULL) {
2436 for (j = 0; j < i; j++)
2439 kfifo_free(q->queue);
2444 memset(q->pool[i], 0, item_size);
2445 (*items)[i] = q->pool[i];
2446 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
2452 iscsi_pool_free(struct iscsi_queue *q, void **items)
2456 for (i = 0; i < q->max; i++)
2462 static iscsi_connh_t
2463 iscsi_conn_create(iscsi_sessionh_t sessionh, uint32_t conn_idx)
2465 struct iscsi_session *session = iscsi_ptr(sessionh);
2466 struct iscsi_conn *conn = NULL;
2468 conn = kmalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
2470 goto conn_alloc_fail;
2471 memset(conn, 0, sizeof(struct iscsi_conn));
2473 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
2474 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2475 conn->id = conn_idx;
2476 conn->exp_statsn = 0;
2477 conn->tmabort_state = TMABORT_INITIAL;
2479 /* initial operational parameters */
2480 conn->hdr_size = sizeof(struct iscsi_hdr);
2481 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2482 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2484 spin_lock_init(&conn->lock);
2486 /* initialize general xmit PDU commands queue */
2487 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
2489 if (conn->xmitqueue == ERR_PTR(-ENOMEM))
2490 goto xmitqueue_alloc_fail;
2492 /* initialize write response PDU commands queue */
2493 conn->writequeue = kfifo_alloc(session->cmds_max * sizeof(void*),
2495 if (conn->writequeue == ERR_PTR(-ENOMEM))
2496 goto writequeue_alloc_fail;
2498 /* initialize general immediate & non-immediate PDU commands queue */
2499 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
2501 if (conn->immqueue == ERR_PTR(-ENOMEM))
2502 goto immqueue_alloc_fail;
2504 conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
2506 if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
2507 goto mgmtqueue_alloc_fail;
2509 INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn);
2511 /* allocate login_mtask used for the login/text sequences */
2512 spin_lock_bh(&session->lock);
2513 if (!__kfifo_get(session->mgmtpool.queue,
2514 (void*)&conn->login_mtask,
2516 spin_unlock_bh(&session->lock);
2517 goto login_mtask_alloc_fail;
2519 spin_unlock_bh(&session->lock);
2521 /* allocate initial PDU receive place holder */
2522 if (conn->data_size <= PAGE_SIZE)
2523 conn->data = kmalloc(conn->data_size, GFP_KERNEL);
2525 conn->data = (void*)__get_free_pages(GFP_KERNEL,
2526 get_order(conn->data_size));
2528 goto max_recv_dlenght_alloc_fail;
2530 init_timer(&conn->tmabort_timer);
2531 mutex_init(&conn->xmitmutex);
2532 init_waitqueue_head(&conn->ehwait);
2534 return iscsi_handle(conn);
2536 max_recv_dlenght_alloc_fail:
2537 spin_lock_bh(&session->lock);
2538 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2540 spin_unlock_bh(&session->lock);
2541 login_mtask_alloc_fail:
2542 kfifo_free(conn->mgmtqueue);
2543 mgmtqueue_alloc_fail:
2544 kfifo_free(conn->immqueue);
2545 immqueue_alloc_fail:
2546 kfifo_free(conn->writequeue);
2547 writequeue_alloc_fail:
2548 kfifo_free(conn->xmitqueue);
2549 xmitqueue_alloc_fail:
2552 return iscsi_handle(NULL);
2556 iscsi_conn_destroy(iscsi_connh_t connh)
2558 struct iscsi_conn *conn = iscsi_ptr(connh);
2559 struct iscsi_session *session = conn->session;
2560 unsigned long flags;
2562 mutex_lock(&conn->xmitmutex);
2563 set_bit(SUSPEND_BIT, &conn->suspend_tx);
2564 if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE && conn->sock) {
2565 struct sock *sk = conn->sock->sk;
2568 * conn_start() has never been called!
2569 * need to cleanup the socket.
2571 write_lock_bh(&sk->sk_callback_lock);
2572 set_bit(SUSPEND_BIT, &conn->suspend_rx);
2573 write_unlock_bh(&sk->sk_callback_lock);
2575 sock_hold(conn->sock->sk);
2576 iscsi_conn_restore_callbacks(conn);
2577 sock_put(conn->sock->sk);
2578 sock_release(conn->sock);
2582 spin_lock_bh(&session->lock);
2583 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2584 if (session->leadconn == conn) {
2586 * leading connection? then give up on recovery.
2588 session->state = ISCSI_STATE_TERMINATE;
2589 wake_up(&conn->ehwait);
2591 spin_unlock_bh(&session->lock);
2593 mutex_unlock(&conn->xmitmutex);
2596 * Block until all in-progress commands for this connection
2600 spin_lock_irqsave(session->host->host_lock, flags);
2601 if (!session->host->host_busy) { /* OK for ERL == 0 */
2602 spin_unlock_irqrestore(session->host->host_lock, flags);
2605 spin_unlock_irqrestore(session->host->host_lock, flags);
2606 msleep_interruptible(500);
2607 printk("conn_destroy(): host_busy %d host_failed %d\n",
2608 session->host->host_busy, session->host->host_failed);
2610 * force eh_abort() to unblock
2612 wake_up(&conn->ehwait);
2615 /* now free crypto */
2616 if (conn->hdrdgst_en || conn->datadgst_en) {
2618 crypto_free_tfm(conn->tx_tfm);
2620 crypto_free_tfm(conn->rx_tfm);
2621 if (conn->data_tx_tfm)
2622 crypto_free_tfm(conn->data_tx_tfm);
2623 if (conn->data_rx_tfm)
2624 crypto_free_tfm(conn->data_rx_tfm);
2627 /* free conn->data, size = MaxRecvDataSegmentLength */
2628 if (conn->data_size <= PAGE_SIZE)
2631 free_pages((unsigned long)conn->data,
2632 get_order(conn->data_size));
2634 spin_lock_bh(&session->lock);
2635 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2637 list_del(&conn->item);
2638 if (list_empty(&session->connections))
2639 session->leadconn = NULL;
2640 if (session->leadconn && session->leadconn == conn)
2641 session->leadconn = container_of(session->connections.next,
2642 struct iscsi_conn, item);
2644 if (session->leadconn == NULL)
2645 /* none connections exits.. reset sequencing */
2646 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
2647 spin_unlock_bh(&session->lock);
2649 kfifo_free(conn->xmitqueue);
2650 kfifo_free(conn->writequeue);
2651 kfifo_free(conn->immqueue);
2652 kfifo_free(conn->mgmtqueue);
2657 iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh,
2658 uint32_t transport_fd, int is_leading)
2660 struct iscsi_session *session = iscsi_ptr(sessionh);
2661 struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh);
2663 struct socket *sock;
2666 /* lookup for existing socket */
2667 sock = sockfd_lookup(transport_fd, &err);
2669 printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
2673 /* lookup for existing connection */
2674 spin_lock_bh(&session->lock);
2675 list_for_each_entry(tmp, &session->connections, item) {
2677 if (conn->c_stage != ISCSI_CONN_STOPPED ||
2678 conn->stop_stage == STOP_CONN_TERM) {
2679 printk(KERN_ERR "iscsi_tcp: can't bind "
2680 "non-stopped connection (%d:%d)\n",
2681 conn->c_stage, conn->stop_stage);
2682 spin_unlock_bh(&session->lock);
2689 /* bind new iSCSI connection to session */
2690 conn->session = session;
2692 list_add(&conn->item, &session->connections);
2694 spin_unlock_bh(&session->lock);
2696 if (conn->stop_stage != STOP_CONN_SUSPEND) {
2697 /* bind iSCSI connection and socket */
2700 /* setup Socket parameters */
2703 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
2704 sk->sk_allocation = GFP_ATOMIC;
2706 /* FIXME: disable Nagle's algorithm */
2709 * Intercept TCP callbacks for sendfile like receive
2712 iscsi_conn_set_callbacks(conn);
2714 conn->sendpage = conn->sock->ops->sendpage;
2717 * set receive state machine into initial state
2719 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2723 session->leadconn = conn;
2726 * Unblock xmitworker(), Login Phase will pass through.
2728 clear_bit(SUSPEND_BIT, &conn->suspend_rx);
2729 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
2735 iscsi_conn_start(iscsi_connh_t connh)
2737 struct iscsi_conn *conn = iscsi_ptr(connh);
2738 struct iscsi_session *session = conn->session;
2741 /* FF phase warming up... */
2743 if (session == NULL) {
2744 printk(KERN_ERR "iscsi_tcp: can't start unbound connection\n");
2748 sk = conn->sock->sk;
2750 write_lock_bh(&sk->sk_callback_lock);
2751 spin_lock_bh(&session->lock);
2752 conn->c_stage = ISCSI_CONN_STARTED;
2753 session->state = ISCSI_STATE_LOGGED_IN;
2755 switch(conn->stop_stage) {
2756 case STOP_CONN_RECOVER:
2758 * unblock eh_abort() if it is blocked. re-try all
2759 * commands after successful recovery
2761 session->conn_cnt++;
2762 conn->stop_stage = 0;
2763 conn->tmabort_state = TMABORT_INITIAL;
2765 wake_up(&conn->ehwait);
2767 case STOP_CONN_TERM:
2768 session->conn_cnt++;
2769 conn->stop_stage = 0;
2771 case STOP_CONN_SUSPEND:
2772 conn->stop_stage = 0;
2773 clear_bit(SUSPEND_BIT, &conn->suspend_rx);
2774 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
2779 spin_unlock_bh(&session->lock);
2780 write_unlock_bh(&sk->sk_callback_lock);
2786 iscsi_conn_stop(iscsi_connh_t connh, int flag)
2788 struct iscsi_conn *conn = iscsi_ptr(connh);
2789 struct iscsi_session *session = conn->session;
2791 unsigned long flags;
2793 BUG_ON(!conn->sock);
2794 sk = conn->sock->sk;
2795 write_lock_bh(&sk->sk_callback_lock);
2796 set_bit(SUSPEND_BIT, &conn->suspend_rx);
2797 write_unlock_bh(&sk->sk_callback_lock);
2799 mutex_lock(&conn->xmitmutex);
2801 spin_lock_irqsave(session->host->host_lock, flags);
2802 spin_lock(&session->lock);
2803 conn->stop_stage = flag;
2804 conn->c_stage = ISCSI_CONN_STOPPED;
2805 set_bit(SUSPEND_BIT, &conn->suspend_tx);
2807 if (flag != STOP_CONN_SUSPEND)
2808 session->conn_cnt--;
2810 if (session->conn_cnt == 0 || session->leadconn == conn)
2811 session->state = ISCSI_STATE_FAILED;
2813 spin_unlock(&session->lock);
2814 spin_unlock_irqrestore(session->host->host_lock, flags);
2816 if (flag == STOP_CONN_TERM || flag == STOP_CONN_RECOVER) {
2817 struct iscsi_cmd_task *ctask;
2818 struct iscsi_mgmt_task *mtask;
2821 * Socket must go now.
2823 sock_hold(conn->sock->sk);
2824 iscsi_conn_restore_callbacks(conn);
2825 sock_put(conn->sock->sk);
2828 * flush xmit queues.
2830 spin_lock_bh(&session->lock);
2831 while (__kfifo_get(conn->writequeue, (void*)&ctask,
2833 __kfifo_get(conn->xmitqueue, (void*)&ctask,
2835 struct iscsi_r2t_info *r2t;
2838 * flush ctask's r2t queues
2840 while (__kfifo_get(ctask->r2tqueue, (void*)&r2t,
2842 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t,
2845 spin_unlock_bh(&session->lock);
2847 iscsi_ctask_cleanup(conn, ctask);
2849 spin_lock_bh(&session->lock);
2852 while (__kfifo_get(conn->immqueue, (void*)&mtask,
2854 __kfifo_get(conn->mgmtqueue, (void*)&mtask,
2856 __kfifo_put(session->mgmtpool.queue,
2857 (void*)&mtask, sizeof(void*));
2860 spin_unlock_bh(&session->lock);
2863 * release socket only after we stopped data_xmit()
2864 * activity and flushed all outstandings
2866 sock_release(conn->sock);
2870 * for connection level recovery we should not calculate
2871 * header digest. conn->hdr_size used for optimization
2872 * in hdr_extract() and will be re-negotiated at
2875 if (flag == STOP_CONN_RECOVER) {
2876 conn->hdr_size = sizeof(struct iscsi_hdr);
2877 conn->hdrdgst_en = 0;
2878 conn->datadgst_en = 0;
2881 mutex_unlock(&conn->xmitmutex);
2885 iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
2886 char *data, uint32_t data_size)
2888 struct iscsi_session *session = conn->session;
2889 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
2890 struct iscsi_mgmt_task *mtask;
2892 spin_lock_bh(&session->lock);
2893 if (session->state == ISCSI_STATE_TERMINATE) {
2894 spin_unlock_bh(&session->lock);
2897 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
2898 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
2900 * Login and Text are sent serially, in
2901 * request-followed-by-response sequence.
2902 * Same mtask can be used. Same ITT must be used.
2903 * Note that login_mtask is preallocated at conn_create().
2905 mtask = conn->login_mtask;
2907 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
2908 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
2910 if (!__kfifo_get(session->mgmtpool.queue,
2911 (void*)&mtask, sizeof(void*))) {
2912 spin_unlock_bh(&session->lock);
2918 * pre-format CmdSN and ExpStatSN for outgoing PDU.
2920 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
2921 hdr->itt = mtask->itt | (conn->id << CID_SHIFT) |
2922 (session->age << AGE_SHIFT);
2923 nop->cmdsn = cpu_to_be32(session->cmdsn);
2924 if (conn->c_stage == ISCSI_CONN_STARTED &&
2925 !(hdr->opcode & ISCSI_OP_IMMEDIATE))
2928 /* do not advance CmdSN */
2929 nop->cmdsn = cpu_to_be32(session->cmdsn);
2931 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
2933 memcpy(&mtask->hdr, hdr, sizeof(struct iscsi_hdr));
2935 iscsi_buf_init_virt(&mtask->headbuf, (char*)&mtask->hdr,
2936 sizeof(struct iscsi_hdr));
2938 spin_unlock_bh(&session->lock);
2941 memcpy(mtask->data, data, data_size);
2942 mtask->data_count = data_size;
2944 mtask->data_count = 0;
2946 mtask->xmstate = XMSTATE_IMM_HDR;
2948 if (mtask->data_count) {
2949 iscsi_buf_init_iov(&mtask->sendbuf, (char*)mtask->data,
2953 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
2954 hdr->opcode, hdr->itt, data_size);
2957 * since send_pdu() could be called at least from two contexts,
2958 * we need to serialize __kfifo_put, so we don't have to take
2959 * additional lock on fast data-path
2961 if (hdr->opcode & ISCSI_OP_IMMEDIATE)
2962 __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
2964 __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
2966 schedule_work(&conn->xmitwork);
2972 iscsi_eh_host_reset(struct scsi_cmnd *sc)
2974 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
2975 struct iscsi_conn *conn = ctask->conn;
2976 struct iscsi_session *session = conn->session;
2978 spin_lock_bh(&session->lock);
2979 if (session->state == ISCSI_STATE_TERMINATE) {
2980 debug_scsi("failing host reset: session terminated "
2981 "[CID %d age %d]", conn->id, session->age);
2982 spin_unlock_bh(&session->lock);
2985 spin_unlock_bh(&session->lock);
2987 debug_scsi("failing connection CID %d due to SCSI host reset "
2988 "[itt 0x%x age %d]", conn->id, ctask->itt,
2990 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
2996 iscsi_tmabort_timedout(unsigned long data)
2998 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data;
2999 struct iscsi_conn *conn = ctask->conn;
3000 struct iscsi_session *session = conn->session;
3002 spin_lock(&session->lock);
3003 if (conn->tmabort_state == TMABORT_INITIAL) {
3004 __kfifo_put(session->mgmtpool.queue,
3005 (void*)&ctask->mtask, sizeof(void*));
3006 conn->tmabort_state = TMABORT_TIMEDOUT;
3007 debug_scsi("tmabort timedout [sc %lx itt 0x%x]\n",
3008 (long)ctask->sc, ctask->itt);
3009 /* unblock eh_abort() */
3010 wake_up(&conn->ehwait);
3012 spin_unlock(&session->lock);
3016 iscsi_eh_abort(struct scsi_cmnd *sc)
3019 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
3020 struct iscsi_conn *conn = ctask->conn;
3021 struct iscsi_session *session = conn->session;
3023 conn->eh_abort_cnt++;
3024 debug_scsi("aborting [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3027 * two cases for ERL=0 here:
3029 * 1) connection-level failure;
3030 * 2) recovery due protocol error;
3032 mutex_lock(&conn->xmitmutex);
3033 spin_lock_bh(&session->lock);
3034 if (session->state != ISCSI_STATE_LOGGED_IN) {
3035 if (session->state == ISCSI_STATE_TERMINATE) {
3036 spin_unlock_bh(&session->lock);
3037 mutex_unlock(&conn->xmitmutex);
3040 spin_unlock_bh(&session->lock);
3042 struct iscsi_tm *hdr = &conn->tmhdr;
3045 * Still LOGGED_IN...
3048 if (!ctask->sc || sc->SCp.phase != session->age) {
3050 * 1) ctask completed before time out. But session
3051 * is still ok => Happy Retry.
3052 * 2) session was re-open during time out of ctask.
3054 spin_unlock_bh(&session->lock);
3055 mutex_unlock(&conn->xmitmutex);
3058 conn->tmabort_state = TMABORT_INITIAL;
3059 spin_unlock_bh(&session->lock);
3062 * ctask timed out but session is OK
3063 * ERL=0 requires task mgmt abort to be issued on each
3064 * failed command. requests must be serialized.
3066 memset(hdr, 0, sizeof(struct iscsi_tm));
3067 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
3068 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
3069 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3070 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
3071 hdr->rtt = ctask->hdr.itt;
3072 hdr->refcmdsn = ctask->hdr.cmdsn;
3074 rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr,
3077 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3078 debug_scsi("abort sent failure [itt 0x%x]", ctask->itt);
3080 struct iscsi_r2t_info *r2t;
3083 * TMF abort vs. TMF response race logic
3085 spin_lock_bh(&session->lock);
3086 ctask->mtask = (struct iscsi_mgmt_task *)
3087 session->mgmt_cmds[(hdr->itt & ITT_MASK) -
3088 ISCSI_MGMT_ITT_OFFSET];
3090 * have to flush r2tqueue to avoid r2t leaks
3092 while (__kfifo_get(ctask->r2tqueue, (void*)&r2t,
3094 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t,
3097 if (conn->tmabort_state == TMABORT_INITIAL) {
3098 conn->tmfcmd_pdus_cnt++;
3099 conn->tmabort_timer.expires = 3*HZ + jiffies;
3100 conn->tmabort_timer.function =
3101 iscsi_tmabort_timedout;
3102 conn->tmabort_timer.data = (unsigned long)ctask;
3103 add_timer(&conn->tmabort_timer);
3104 debug_scsi("abort sent [itt 0x%x]", ctask->itt);
3107 conn->tmabort_state == TMABORT_SUCCESS) {
3108 conn->tmabort_state = TMABORT_INITIAL;
3109 spin_unlock_bh(&session->lock);
3110 mutex_unlock(&conn->xmitmutex);
3113 conn->tmabort_state = TMABORT_INITIAL;
3114 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3116 spin_unlock_bh(&session->lock);
3119 mutex_unlock(&conn->xmitmutex);
3123 * block eh thread until:
3125 * 1) abort response;
3127 * 3) session re-opened;
3128 * 4) session terminated;
3131 int p_state = session->state;
3133 rc = wait_event_interruptible(conn->ehwait,
3134 (p_state == ISCSI_STATE_LOGGED_IN ?
3135 (session->state == ISCSI_STATE_TERMINATE ||
3136 conn->tmabort_state != TMABORT_INITIAL) :
3137 (session->state == ISCSI_STATE_TERMINATE ||
3138 session->state == ISCSI_STATE_LOGGED_IN)));
3141 session->state = ISCSI_STATE_TERMINATE;
3145 if (signal_pending(current))
3146 flush_signals(current);
3148 if (session->state == ISCSI_STATE_TERMINATE)
3151 spin_lock_bh(&session->lock);
3152 if (sc->SCp.phase == session->age &&
3153 (conn->tmabort_state == TMABORT_TIMEDOUT ||
3154 conn->tmabort_state == TMABORT_FAILED)) {
3155 conn->tmabort_state = TMABORT_INITIAL;
3158 * ctask completed before tmf abort response or
3160 * But session is still ok => Happy Retry.
3162 spin_unlock_bh(&session->lock);
3165 spin_unlock_bh(&session->lock);
3166 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3169 spin_unlock_bh(&session->lock);
3174 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3179 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3183 del_timer_sync(&conn->tmabort_timer);
3185 mutex_lock(&conn->xmitmutex);
3187 struct sock *sk = conn->sock->sk;
3189 write_lock_bh(&sk->sk_callback_lock);
3190 iscsi_ctask_cleanup(conn, ctask);
3191 write_unlock_bh(&sk->sk_callback_lock);
3193 mutex_unlock(&conn->xmitmutex);
3198 iscsi_r2tpool_alloc(struct iscsi_session *session)
3204 * initialize per-task: R2T pool and xmit queue
3206 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
3207 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
3210 * pre-allocated x4 as much r2ts to handle race when
3211 * target acks DataOut faster than we data_xmit() queues
3212 * could replenish r2tqueue.
3216 if (iscsi_pool_init(&ctask->r2tpool, session->max_r2t * 4,
3217 (void***)&ctask->r2ts, sizeof(struct iscsi_r2t_info))) {
3218 goto r2t_alloc_fail;
3221 /* R2T xmit queue */
3222 ctask->r2tqueue = kfifo_alloc(
3223 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
3224 if (ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
3225 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
3226 goto r2t_alloc_fail;
3231 * Data-Out PDU's within R2T-sequence can be quite big;
3234 ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX,
3235 mempool_alloc_slab, mempool_free_slab, taskcache);
3236 if (ctask->datapool == NULL) {
3237 kfifo_free(ctask->r2tqueue);
3238 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
3239 goto r2t_alloc_fail;
3241 INIT_LIST_HEAD(&ctask->dataqueue);
3247 for (i = 0; i < cmd_i; i++) {
3248 mempool_destroy(session->cmds[i]->datapool);
3249 kfifo_free(session->cmds[i]->r2tqueue);
3250 iscsi_pool_free(&session->cmds[i]->r2tpool,
3251 (void**)session->cmds[i]->r2ts);
3257 iscsi_r2tpool_free(struct iscsi_session *session)
3261 for (i = 0; i < session->cmds_max; i++) {
3262 mempool_destroy(session->cmds[i]->datapool);
3263 kfifo_free(session->cmds[i]->r2tqueue);
3264 iscsi_pool_free(&session->cmds[i]->r2tpool,
3265 (void**)session->cmds[i]->r2ts);
3269 static struct scsi_host_template iscsi_sht = {
3270 .name = "iSCSI Initiator over TCP/IP, v."
3272 .queuecommand = iscsi_queuecommand,
3273 .change_queue_depth = iscsi_change_queue_depth,
3274 .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
3275 .sg_tablesize = ISCSI_SG_TABLESIZE,
3276 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
3277 .eh_abort_handler = iscsi_eh_abort,
3278 .eh_host_reset_handler = iscsi_eh_host_reset,
3279 .use_clustering = DISABLE_CLUSTERING,
3280 .proc_name = "iscsi_tcp",
3284 static iscsi_sessionh_t
3285 iscsi_session_create(uint32_t initial_cmdsn, struct Scsi_Host *host)
3288 struct iscsi_session *session;
3290 session = iscsi_hostdata(host->hostdata);
3291 memset(session, 0, sizeof(struct iscsi_session));
3293 session->host = host;
3294 session->id = host->host_no;
3295 session->state = ISCSI_STATE_LOGGED_IN;
3296 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
3297 session->cmds_max = ISCSI_XMIT_CMDS_MAX;
3298 session->cmdsn = initial_cmdsn;
3299 session->exp_cmdsn = initial_cmdsn + 1;
3300 session->max_cmdsn = initial_cmdsn + 1;
3301 session->max_r2t = 1;
3303 /* initialize SCSI PDU commands pool */
3304 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
3305 (void***)&session->cmds, sizeof(struct iscsi_cmd_task)))
3306 goto cmdpool_alloc_fail;
3308 /* pre-format cmds pool with ITT */
3309 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++)
3310 session->cmds[cmd_i]->itt = cmd_i;
3312 spin_lock_init(&session->lock);
3313 INIT_LIST_HEAD(&session->connections);
3315 /* initialize immediate command pool */
3316 if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
3317 (void***)&session->mgmt_cmds, sizeof(struct iscsi_mgmt_task)))
3318 goto mgmtpool_alloc_fail;
3321 /* pre-format immediate cmds pool with ITT */
3322 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
3323 session->mgmt_cmds[cmd_i]->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
3324 session->mgmt_cmds[cmd_i]->data = kmalloc(
3325 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL);
3326 if (!session->mgmt_cmds[cmd_i]->data) {
3329 for (j = 0; j < cmd_i; j++)
3330 kfree(session->mgmt_cmds[j]->data);
3331 goto immdata_alloc_fail;
3335 if (iscsi_r2tpool_alloc(session))
3336 goto r2tpool_alloc_fail;
3338 return iscsi_handle(session);
3341 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
3342 kfree(session->mgmt_cmds[cmd_i]->data);
3343 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3345 mgmtpool_alloc_fail:
3346 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
3348 return iscsi_handle(NULL);
3352 iscsi_session_destroy(iscsi_sessionh_t sessionh)
3355 struct iscsi_data_task *dtask, *n;
3356 struct iscsi_session *session = iscsi_ptr(sessionh);
3358 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
3359 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
3360 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
3361 list_del(&dtask->item);
3362 mempool_free(dtask, ctask->datapool);
3366 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++)
3367 kfree(session->mgmt_cmds[cmd_i]->data);
3369 iscsi_r2tpool_free(session);
3370 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
3371 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
3375 iscsi_conn_set_param(iscsi_connh_t connh, enum iscsi_param param,
3378 struct iscsi_conn *conn = iscsi_ptr(connh);
3379 struct iscsi_session *session = conn->session;
3381 spin_lock_bh(&session->lock);
3382 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
3383 conn->stop_stage != STOP_CONN_RECOVER) {
3384 printk(KERN_ERR "iscsi_tcp: can not change parameter [%d]\n",
3386 spin_unlock_bh(&session->lock);
3389 spin_unlock_bh(&session->lock);
3392 case ISCSI_PARAM_MAX_RECV_DLENGTH: {
3393 char *saveptr = conn->data;
3394 gfp_t flags = GFP_KERNEL;
3396 if (conn->data_size >= value) {
3397 conn->max_recv_dlength = value;
3401 spin_lock_bh(&session->lock);
3402 if (conn->stop_stage == STOP_CONN_RECOVER)
3404 spin_unlock_bh(&session->lock);
3406 if (value <= PAGE_SIZE)
3407 conn->data = kmalloc(value, flags);
3409 conn->data = (void*)__get_free_pages(flags,
3411 if (conn->data == NULL) {
3412 conn->data = saveptr;
3415 if (conn->data_size <= PAGE_SIZE)
3418 free_pages((unsigned long)saveptr,
3419 get_order(conn->data_size));
3420 conn->max_recv_dlength = value;
3421 conn->data_size = value;
3424 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3425 conn->max_xmit_dlength = value;
3427 case ISCSI_PARAM_HDRDGST_EN:
3428 conn->hdrdgst_en = value;
3429 conn->hdr_size = sizeof(struct iscsi_hdr);
3430 if (conn->hdrdgst_en) {
3431 conn->hdr_size += sizeof(__u32);
3433 conn->tx_tfm = crypto_alloc_tfm("crc32c", 0);
3437 conn->rx_tfm = crypto_alloc_tfm("crc32c", 0);
3438 if (!conn->rx_tfm) {
3439 crypto_free_tfm(conn->tx_tfm);
3444 crypto_free_tfm(conn->tx_tfm);
3446 crypto_free_tfm(conn->rx_tfm);
3449 case ISCSI_PARAM_DATADGST_EN:
3450 conn->datadgst_en = value;
3451 if (conn->datadgst_en) {
3452 if (!conn->data_tx_tfm)
3454 crypto_alloc_tfm("crc32c", 0);
3455 if (!conn->data_tx_tfm)
3457 if (!conn->data_rx_tfm)
3459 crypto_alloc_tfm("crc32c", 0);
3460 if (!conn->data_rx_tfm) {
3461 crypto_free_tfm(conn->data_tx_tfm);
3465 if (conn->data_tx_tfm)
3466 crypto_free_tfm(conn->data_tx_tfm);
3467 if (conn->data_rx_tfm)
3468 crypto_free_tfm(conn->data_rx_tfm);
3470 conn->sendpage = conn->datadgst_en ?
3471 sock_no_sendpage : conn->sock->ops->sendpage;
3473 case ISCSI_PARAM_INITIAL_R2T_EN:
3474 session->initial_r2t_en = value;
3476 case ISCSI_PARAM_MAX_R2T:
3477 if (session->max_r2t == roundup_pow_of_two(value))
3479 iscsi_r2tpool_free(session);
3480 session->max_r2t = value;
3481 if (session->max_r2t & (session->max_r2t - 1))
3482 session->max_r2t = roundup_pow_of_two(session->max_r2t);
3483 if (iscsi_r2tpool_alloc(session))
3486 case ISCSI_PARAM_IMM_DATA_EN:
3487 session->imm_data_en = value;
3489 case ISCSI_PARAM_FIRST_BURST:
3490 session->first_burst = value;
3492 case ISCSI_PARAM_MAX_BURST:
3493 session->max_burst = value;
3495 case ISCSI_PARAM_PDU_INORDER_EN:
3496 session->pdu_inorder_en = value;
3498 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3499 session->dataseq_inorder_en = value;
3501 case ISCSI_PARAM_ERL:
3502 session->erl = value;
3504 case ISCSI_PARAM_IFMARKER_EN:
3506 session->ifmarker_en = value;
3508 case ISCSI_PARAM_OFMARKER_EN:
3510 session->ofmarker_en = value;
3520 iscsi_conn_get_param(iscsi_connh_t connh, enum iscsi_param param,
3523 struct iscsi_conn *conn = iscsi_ptr(connh);
3524 struct iscsi_session *session = conn->session;
3527 case ISCSI_PARAM_MAX_RECV_DLENGTH:
3528 *value = conn->max_recv_dlength;
3530 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
3531 *value = conn->max_xmit_dlength;
3533 case ISCSI_PARAM_HDRDGST_EN:
3534 *value = conn->hdrdgst_en;
3536 case ISCSI_PARAM_DATADGST_EN:
3537 *value = conn->datadgst_en;
3539 case ISCSI_PARAM_INITIAL_R2T_EN:
3540 *value = session->initial_r2t_en;
3542 case ISCSI_PARAM_MAX_R2T:
3543 *value = session->max_r2t;
3545 case ISCSI_PARAM_IMM_DATA_EN:
3546 *value = session->imm_data_en;
3548 case ISCSI_PARAM_FIRST_BURST:
3549 *value = session->first_burst;
3551 case ISCSI_PARAM_MAX_BURST:
3552 *value = session->max_burst;
3554 case ISCSI_PARAM_PDU_INORDER_EN:
3555 *value = session->pdu_inorder_en;
3557 case ISCSI_PARAM_DATASEQ_INORDER_EN:
3558 *value = session->dataseq_inorder_en;
3560 case ISCSI_PARAM_ERL:
3561 *value = session->erl;
3563 case ISCSI_PARAM_IFMARKER_EN:
3564 *value = session->ifmarker_en;
3566 case ISCSI_PARAM_OFMARKER_EN:
3567 *value = session->ofmarker_en;
3570 return ISCSI_ERR_PARAM_NOT_FOUND;
3577 iscsi_conn_get_stats(iscsi_connh_t connh, struct iscsi_stats *stats)
3579 struct iscsi_conn *conn = iscsi_ptr(connh);
3581 stats->txdata_octets = conn->txdata_octets;
3582 stats->rxdata_octets = conn->rxdata_octets;
3583 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
3584 stats->dataout_pdus = conn->dataout_pdus_cnt;
3585 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
3586 stats->datain_pdus = conn->datain_pdus_cnt;
3587 stats->r2t_pdus = conn->r2t_pdus_cnt;
3588 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
3589 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
3590 stats->custom_length = 3;
3591 strcpy(stats->custom[0].desc, "tx_sendpage_failures");
3592 stats->custom[0].value = conn->sendpage_failures_cnt;
3593 strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
3594 stats->custom[1].value = conn->discontiguous_hdr_cnt;
3595 strcpy(stats->custom[2].desc, "eh_abort_cnt");
3596 stats->custom[2].value = conn->eh_abort_cnt;
3600 iscsi_conn_send_pdu(iscsi_connh_t connh, struct iscsi_hdr *hdr, char *data,
3603 struct iscsi_conn *conn = iscsi_ptr(connh);
3606 mutex_lock(&conn->xmitmutex);
3607 rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
3608 mutex_unlock(&conn->xmitmutex);
3613 static struct iscsi_transport iscsi_tcp_transport = {
3614 .owner = THIS_MODULE,
3616 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
3618 .host_template = &iscsi_sht,
3619 .hostdata_size = sizeof(struct iscsi_session),
3621 .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN,
3622 .create_session = iscsi_session_create,
3623 .destroy_session = iscsi_session_destroy,
3624 .create_conn = iscsi_conn_create,
3625 .bind_conn = iscsi_conn_bind,
3626 .destroy_conn = iscsi_conn_destroy,
3627 .set_param = iscsi_conn_set_param,
3628 .get_param = iscsi_conn_get_param,
3629 .start_conn = iscsi_conn_start,
3630 .stop_conn = iscsi_conn_stop,
3631 .send_pdu = iscsi_conn_send_pdu,
3632 .get_stats = iscsi_conn_get_stats,
3636 iscsi_tcp_init(void)
3640 if (iscsi_max_lun < 1) {
3641 printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun);
3644 iscsi_tcp_transport.max_lun = iscsi_max_lun;
3646 taskcache = kmem_cache_create("iscsi_taskcache",
3647 sizeof(struct iscsi_data_task), 0,
3648 SLAB_HWCACHE_ALIGN | SLAB_NO_REAP, NULL, NULL);
3652 error = iscsi_register_transport(&iscsi_tcp_transport);
3654 kmem_cache_destroy(taskcache);
3660 iscsi_tcp_exit(void)
3662 iscsi_unregister_transport(&iscsi_tcp_transport);
3663 kmem_cache_destroy(taskcache);
3666 module_init(iscsi_tcp_init);
3667 module_exit(iscsi_tcp_exit);