X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=net%2Fsctp%2Foutqueue.c;h=fd4deefab3cfe5ece6dec9c05e85d5fb8bdc69d6;hb=ee6b967301b4aa5d4a4b61e2f682f086266db9fb;hp=992f361084b741115f3e14006bb00bb0772a4fcf;hpb=c58b8e4a25a1ba347a0e5d21984c97bd296f1691;p=linux-2.6 diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 992f361084..fd4deefab3 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1,21 +1,21 @@ -/* SCTP kernel reference Implementation +/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 Intel Corp. * - * This file is part of the SCTP kernel reference Implementation + * This file is part of the SCTP kernel implementation * * These functions implement the sctp_outq class. The outqueue handles * bundling and queueing of outgoing SCTP chunks. * - * The SCTP reference implementation is free software; + * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * - * The SCTP reference implementation is distributed in the hope that it + * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. @@ -382,7 +382,7 @@ static void sctp_insert_list(struct list_head *head, struct list_head *new) /* Mark all the eligible packets on a transport for retransmission. */ void sctp_retransmit_mark(struct sctp_outq *q, struct sctp_transport *transport, - __u8 fast_retransmit) + __u8 reason) { struct list_head *lchunk, *ltemp; struct sctp_chunk *chunk; @@ -412,15 +412,22 @@ void sctp_retransmit_mark(struct sctp_outq *q, continue; } - /* If we are doing retransmission due to a fast retransmit, - * only the chunk's that are marked for fast retransmit - * should be added to the retransmit queue. If we are doing - * retransmission due to a timeout or pmtu discovery, only the - * chunks that are not yet acked should be added to the - * retransmit queue. + /* If we are doing retransmission due to a timeout or pmtu + * discovery, only the chunks that are not yet acked should + * be added to the retransmit queue. */ - if ((fast_retransmit && (chunk->fast_retransmit > 0)) || - (!fast_retransmit && !chunk->tsn_gap_acked)) { + if ((reason == SCTP_RTXR_FAST_RTX && + (chunk->fast_retransmit > 0)) || + (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { + /* If this chunk was sent less then 1 rto ago, do not + * retransmit this chunk, but give the peer time + * to acknowlege it. Do this only when + * retransmitting due to T3 timeout. + */ + if (reason == SCTP_RTXR_T3_RTX && + (jiffies - chunk->sent_at) < transport->last_rto) + continue; + /* RFC 2960 6.2.1 Processing a Received SACK * * C) Any time a DATA chunk is marked for @@ -460,10 +467,10 @@ void sctp_retransmit_mark(struct sctp_outq *q, } } - SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, " + SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, " "cwnd: %d, ssthresh: %d, flight_size: %d, " "pba: %d\n", __FUNCTION__, - transport, fast_retransmit, + transport, reason, transport->cwnd, transport->ssthresh, transport->flight_size, transport->partial_bytes_acked); @@ -477,7 +484,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, sctp_retransmit_reason_t reason) { int error = 0; - __u8 fast_retransmit = 0; switch(reason) { case SCTP_RTXR_T3_RTX: @@ -488,20 +494,25 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, */ if (transport == transport->asoc->peer.retran_path) sctp_assoc_update_retran_path(transport->asoc); + transport->asoc->rtx_data_chunks += + transport->asoc->unack_data; break; case SCTP_RTXR_FAST_RTX: SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); - fast_retransmit = 1; break; case SCTP_RTXR_PMTUD: SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); break; + case SCTP_RTXR_T1_RTX: + SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS); + transport->asoc->init_retries++; + break; default: BUG(); } - sctp_retransmit_mark(q, transport, fast_retransmit); + sctp_retransmit_mark(q, transport, reason); /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by @@ -634,7 +645,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, /* If we are here due to a retransmit timeout or a fast * retransmit and if there are any chunks left in the retransmit - * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit. + * queue that could not fit in the PMTU sized packet, they need + * to be marked as ineligible for a subsequent fast retransmit. */ if (rtx_timeout && !lchunk) { list_for_each(lchunk1, lqueue) { @@ -653,10 +665,9 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, int sctp_outq_uncork(struct sctp_outq *q) { int error = 0; - if (q->cork) { + if (q->cork) q->cork = 0; - error = sctp_outq_flush(q, 0); - } + error = sctp_outq_flush(q, 0); return error; } @@ -708,7 +719,29 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) new_transport = chunk->transport; if (!new_transport) { - new_transport = asoc->peer.active_path; + /* + * If we have a prior transport pointer, see if + * the destination address of the chunk + * matches the destination address of the + * current transport. If not a match, then + * try to look up the transport with a given + * destination address. We do this because + * after processing ASCONFs, we may have new + * transports created. + */ + if (transport && + sctp_cmp_addr_exact(&chunk->dest, + &transport->ipaddr)) + new_transport = transport; + else + new_transport = sctp_assoc_lookup_paddr(asoc, + &chunk->dest); + + /* if we still don't have a new transport, then + * use the current active path. + */ + if (!new_transport) + new_transport = asoc->peer.active_path; } else if ((new_transport->state == SCTP_INACTIVE) || (new_transport->state == SCTP_UNCONFIRMED)) { /* If the chunk is Heartbeat or Heartbeat Ack, @@ -721,9 +754,12 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) * address of the IP datagram containing the * HEARTBEAT chunk to which this ack is responding. * ... + * + * ASCONF_ACKs also must be sent to the source. */ if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && - chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK) + chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK && + chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK) new_transport = asoc->peer.active_path; } @@ -1146,8 +1182,10 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) tchunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); tsn = ntohl(tchunk->subh.data_hdr->tsn); - if (TSN_lte(tsn, ctsn)) + if (TSN_lte(tsn, ctsn)) { + list_del_init(&tchunk->transmitted_list); sctp_chunk_free(tchunk); + } } /* ii) Set rwnd equal to the newly received a_rwnd minus the