static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
+static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
+
/* Add data to the front of the queue. */
static inline void sctp_outq_head_data(struct sctp_outq *q,
struct sctp_chunk *ch)
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
+ q->fast_rtx = 0;
q->outstanding_bytes = 0;
q->empty = 1;
q->cork = 0;
void sctp_outq_teardown(struct sctp_outq *q)
{
struct sctp_transport *transport;
- struct list_head *lchunk, *pos, *temp;
+ struct list_head *lchunk, *temp;
struct sctp_chunk *chunk, *tmp;
/* Throw away unacknowledged chunks. */
- list_for_each(pos, &q->asoc->peer.transport_addr_list) {
- transport = list_entry(pos, struct sctp_transport, transports);
+ list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
+ transports) {
while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
"cwnd: %d, ssthresh: %d, flight_size: %d, "
- "pba: %d\n", __FUNCTION__,
+ "pba: %d\n", __func__,
transport, reason,
transport->cwnd, transport->ssthresh,
transport->flight_size,
case SCTP_RTXR_FAST_RTX:
SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
+ q->fast_rtx = 1;
break;
case SCTP_RTXR_PMTUD:
SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
* the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
* following the procedures outlined in C1 - C5.
*/
- sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
+ if (reason == SCTP_RTXR_T3_RTX)
+ sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
- error = sctp_outq_flush(q, /* rtx_timeout */ 1);
+ /* Flush the queues only on timeout, since fast_rtx is only
+ * triggered during sack processing and the queue
+ * will be flushed at the end.
+ */
+ if (reason != SCTP_RTXR_FAST_RTX)
+ error = sctp_outq_flush(q, /* rtx_timeout */ 1);
if (error)
q->asoc->base.sk->sk_err = -error;
int rtx_timeout, int *start_timer)
{
struct list_head *lqueue;
- struct list_head *lchunk, *lchunk1;
struct sctp_transport *transport = pkt->transport;
sctp_xmit_t status;
struct sctp_chunk *chunk, *chunk1;
struct sctp_association *asoc;
+ int fast_rtx;
int error = 0;
+ int timer = 0;
+ int done = 0;
asoc = q->asoc;
lqueue = &q->retransmit;
+ fast_rtx = q->fast_rtx;
- /* RFC 2960 6.3.3 Handle T3-rtx Expiration
+ /* This loop handles time-out retransmissions, fast retransmissions,
+ * and retransmissions due to opening of whindow.
+ *
+ * RFC 2960 6.3.3 Handle T3-rtx Expiration
*
* E3) Determine how many of the earliest (i.e., lowest TSN)
* outstanding DATA chunks for the address for which the
* [Just to be painfully clear, if we are retransmitting
* because a timeout just happened, we should send only ONE
* packet of retransmitted data.]
+ *
+ * For fast retransmissions we also send only ONE packet. However,
+ * if we are just flushing the queue due to open window, we'll
+ * try to send as much as possible.
*/
- lchunk = sctp_list_dequeue(lqueue);
-
- while (lchunk) {
- chunk = list_entry(lchunk, struct sctp_chunk,
- transmitted_list);
+ list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
/* Make sure that Gap Acked TSNs are not retransmitted. A
* simple approach is just to move such TSNs out of the
* next chunk.
*/
if (chunk->tsn_gap_acked) {
- list_add_tail(lchunk, &transport->transmitted);
- lchunk = sctp_list_dequeue(lqueue);
+ list_del(&chunk->transmitted_list);
+ list_add_tail(&chunk->transmitted_list,
+ &transport->transmitted);
continue;
}
+ /* If we are doing fast retransmit, ignore non-fast_rtransmit
+ * chunks
+ */
+ if (fast_rtx && !chunk->fast_retransmit)
+ continue;
+
/* Attempt to append this chunk to the packet. */
status = sctp_packet_append_chunk(pkt, chunk);
switch (status) {
case SCTP_XMIT_PMTU_FULL:
/* Send this packet. */
- if ((error = sctp_packet_transmit(pkt)) == 0)
- *start_timer = 1;
+ error = sctp_packet_transmit(pkt);
/* If we are retransmitting, we should only
* send a single packet.
*/
- if (rtx_timeout) {
- list_add(lchunk, lqueue);
- lchunk = NULL;
- }
+ if (rtx_timeout || fast_rtx)
+ done = 1;
- /* Bundle lchunk in the next round. */
+ /* Bundle next chunk in the next round. */
break;
case SCTP_XMIT_RWND_FULL:
/* Send this packet. */
- if ((error = sctp_packet_transmit(pkt)) == 0)
- *start_timer = 1;
+ error = sctp_packet_transmit(pkt);
/* Stop sending DATA as there is no more room
* at the receiver.
*/
- list_add(lchunk, lqueue);
- lchunk = NULL;
+ done = 1;
break;
case SCTP_XMIT_NAGLE_DELAY:
/* Send this packet. */
- if ((error = sctp_packet_transmit(pkt)) == 0)
- *start_timer = 1;
+ error = sctp_packet_transmit(pkt);
/* Stop sending DATA because of nagle delay. */
- list_add(lchunk, lqueue);
- lchunk = NULL;
+ done = 1;
break;
default:
/* The append was successful, so add this chunk to
* the transmitted list.
*/
- list_add_tail(lchunk, &transport->transmitted);
+ list_del(&chunk->transmitted_list);
+ list_add_tail(&chunk->transmitted_list,
+ &transport->transmitted);
/* Mark the chunk as ineligible for fast retransmit
* after it is retransmitted.
if (chunk->fast_retransmit > 0)
chunk->fast_retransmit = -1;
- *start_timer = 1;
- q->empty = 0;
+ /* Force start T3-rtx timer when fast retransmitting
+ * the earliest outstanding TSN
+ */
+ if (!timer && fast_rtx &&
+ ntohl(chunk->subh.data_hdr->tsn) ==
+ asoc->ctsn_ack_point + 1)
+ timer = 2;
- /* Retrieve a new chunk to bundle. */
- lchunk = sctp_list_dequeue(lqueue);
+ q->empty = 0;
break;
}
- /* If we are here due to a retransmit timeout or a fast
- * retransmit and if there are any chunks left in the retransmit
- * queue that could not fit in the PMTU sized packet, they need
- * to be marked as ineligible for a subsequent fast retransmit.
- */
- if (rtx_timeout && !lchunk) {
- list_for_each(lchunk1, lqueue) {
- chunk1 = list_entry(lchunk1, struct sctp_chunk,
- transmitted_list);
- if (chunk1->fast_retransmit > 0)
- chunk1->fast_retransmit = -1;
- }
+ /* Set the timer if there were no errors */
+ if (!error && !timer)
+ timer = 1;
+
+ if (done)
+ break;
+ }
+
+ /* If we are here due to a retransmit timeout or a fast
+ * retransmit and if there are any chunks left in the retransmit
+ * queue that could not fit in the PMTU sized packet, they need
+ * to be marked as ineligible for a subsequent fast retransmit.
+ */
+ if (rtx_timeout || fast_rtx) {
+ list_for_each_entry(chunk1, lqueue, transmitted_list) {
+ if (chunk1->fast_retransmit > 0)
+ chunk1->fast_retransmit = -1;
}
}
+ *start_timer = timer;
+
+ /* Clear fast retransmit hint */
+ if (fast_rtx)
+ q->fast_rtx = 0;
+
return error;
}
return error;
}
+
/*
* Try to flush an outqueue.
*
* locking concerns must be made. Today we use the sock lock to protect
* this function.
*/
-int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
+static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
{
struct sctp_packet *packet;
struct sctp_packet singleton;
sctp_xmit_t status;
int error = 0;
int start_timer = 0;
+ int one_packet = 0;
/* These transports have chunks to send. */
struct list_head transport_list;
break;
case SCTP_CID_ABORT:
- case SCTP_CID_SACK:
- case SCTP_CID_HEARTBEAT:
+ if (sctp_test_T_bit(chunk)) {
+ packet->vtag = asoc->c.my_vtag;
+ }
+ /* The following chunks are "response" chunks, i.e.
+ * they are generated in response to something we
+ * received. If we are sending these, then we can
+ * send only 1 packet containing these chunks.
+ */
case SCTP_CID_HEARTBEAT_ACK:
- case SCTP_CID_SHUTDOWN:
case SCTP_CID_SHUTDOWN_ACK:
- case SCTP_CID_ERROR:
- case SCTP_CID_COOKIE_ECHO:
case SCTP_CID_COOKIE_ACK:
- case SCTP_CID_ECN_ECNE:
+ case SCTP_CID_COOKIE_ECHO:
+ case SCTP_CID_ERROR:
case SCTP_CID_ECN_CWR:
- case SCTP_CID_ASCONF:
case SCTP_CID_ASCONF_ACK:
+ one_packet = 1;
+ /* Fall throught */
+
+ case SCTP_CID_SACK:
+ case SCTP_CID_HEARTBEAT:
+ case SCTP_CID_SHUTDOWN:
+ case SCTP_CID_ECN_ECNE:
+ case SCTP_CID_ASCONF:
case SCTP_CID_FWD_TSN:
- sctp_packet_transmit_chunk(packet, chunk);
+ status = sctp_packet_transmit_chunk(packet, chunk,
+ one_packet);
+ if (status != SCTP_XMIT_OK) {
+ /* put the chunk back */
+ list_add(&chunk->list, &q->control_chunk_list);
+ }
break;
default:
rtx_timeout, &start_timer);
if (start_timer)
- sctp_transport_reset_timers(transport);
+ sctp_transport_reset_timers(transport,
+ start_timer-1);
/* This can happen on COOKIE-ECHO resend. Only
* one chunk can get bundled with a COOKIE-ECHO.
atomic_read(&chunk->skb->users) : -1);
/* Add the chunk to the packet. */
- status = sctp_packet_transmit_chunk(packet, chunk);
+ status = sctp_packet_transmit_chunk(packet, chunk, 0);
switch (status) {
case SCTP_XMIT_PMTU_FULL:
list_add_tail(&chunk->transmitted_list,
&transport->transmitted);
- sctp_transport_reset_timers(transport);
+ sctp_transport_reset_timers(transport, start_timer-1);
q->empty = 0;
static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack,
struct sctp_association *asoc)
{
- struct list_head *ltransport, *lchunk;
struct sctp_transport *transport;
struct sctp_chunk *chunk;
__u32 highest_new_tsn, tsn;
highest_new_tsn = ntohl(sack->cum_tsn_ack);
- list_for_each(ltransport, transport_list) {
- transport = list_entry(ltransport, struct sctp_transport,
- transports);
- list_for_each(lchunk, &transport->transmitted) {
- chunk = list_entry(lchunk, struct sctp_chunk,
- transmitted_list);
+ list_for_each_entry(transport, transport_list, transports) {
+ list_for_each_entry(chunk, &transport->transmitted,
+ transmitted_list) {
tsn = ntohl(chunk->subh.data_hdr->tsn);
if (!chunk->tsn_gap_acked &&
struct sctp_association *asoc = q->asoc;
struct sctp_transport *transport;
struct sctp_chunk *tchunk = NULL;
- struct list_head *lchunk, *transport_list, *pos, *temp;
+ struct list_head *lchunk, *transport_list, *temp;
sctp_sack_variable_t *frags = sack->variable;
__u32 sack_ctsn, ctsn, tsn;
__u32 highest_tsn, highest_new_tsn;
*/
if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
primary->cacc.changeover_active = 0;
- list_for_each(pos, transport_list) {
- transport = list_entry(pos, struct sctp_transport,
- transports);
+ list_for_each_entry(transport, transport_list,
+ transports) {
transport->cacc.cycling_changeover = 0;
}
}
*/
if (sack->num_gap_ack_blocks &&
primary->cacc.changeover_active) {
- list_for_each(pos, transport_list) {
- transport = list_entry(pos, struct sctp_transport,
- transports);
+ list_for_each_entry(transport, transport_list, transports) {
transport->cacc.cacc_saw_newack = 0;
}
}
*
* This is a MASSIVE candidate for optimization.
*/
- list_for_each(pos, transport_list) {
- transport = list_entry(pos, struct sctp_transport,
- transports);
+ list_for_each_entry(transport, transport_list, transports) {
sctp_check_transmitted(q, &transport->transmitted,
transport, sack, highest_new_tsn);
/*
count_of_newacks ++;
}
- list_for_each(pos, transport_list) {
- transport = list_entry(pos, struct sctp_transport,
- transports);
+ list_for_each_entry(transport, transport_list, transports) {
sctp_mark_missing(q, &transport->transmitted, transport,
highest_new_tsn, count_of_newacks);
}
sctp_generate_fwdtsn(q, sack_ctsn);
SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
- __FUNCTION__, sack_ctsn);
+ __func__, sack_ctsn);
SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, "
"%p is 0x%x. Adv peer ack point: 0x%x\n",
- __FUNCTION__, asoc, ctsn, asoc->adv_peer_ack_point);
+ __func__, asoc, ctsn, asoc->adv_peer_ack_point);
/* See if all chunks are acked.
* Make sure the empty queue handler will get run later.
*/
q->empty = (list_empty(&q->out_chunk_list) &&
- list_empty(&q->control_chunk_list) &&
list_empty(&q->retransmit));
if (!q->empty)
goto finish;
- list_for_each(pos, transport_list) {
- transport = list_entry(pos, struct sctp_transport,
- transports);
+ list_for_each_entry(transport, transport_list, transports) {
q->empty = q->empty && list_empty(&transport->transmitted);
if (!q->empty)
goto finish;
if (tchunk->tsn_gap_acked) {
SCTP_DEBUG_PRINTK("%s: Receiver reneged on "
"data TSN: 0x%x\n",
- __FUNCTION__,
+ __func__,
tsn);
tchunk->tsn_gap_acked = 0;
bytes_acked);
transport->flight_size -= bytes_acked;
+ if (transport->flight_size == 0)
+ transport->partial_bytes_acked = 0;
q->outstanding_bytes -= bytes_acked;
} else {
/* RFC 2960 6.1, sctpimpguide-06 2.15.2
(sack_ctsn+2 == q->asoc->next_tsn)) {
SCTP_DEBUG_PRINTK("%s: SACK received for zero "
"window probe: %u\n",
- __FUNCTION__, sack_ctsn);
+ __func__, sack_ctsn);
q->asoc->overall_error_count = 0;
transport->error_count = 0;
}
int count_of_newacks)
{
struct sctp_chunk *chunk;
- struct list_head *pos;
__u32 tsn;
char do_fast_retransmit = 0;
struct sctp_transport *primary = q->asoc->peer.primary_path;
- list_for_each(pos, transmitted_queue) {
+ list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
- chunk = list_entry(pos, struct sctp_chunk, transmitted_list);
tsn = ntohl(chunk->subh.data_hdr->tsn);
/* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
SCTP_DEBUG_PRINTK(
"%s: TSN 0x%x missing counter: %d\n",
- __FUNCTION__, tsn,
+ __func__, tsn,
chunk->tsn_missing_report);
}
}
SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
"ssthresh: %d, flight_size: %d, pba: %d\n",
- __FUNCTION__, transport, transport->cwnd,
+ __func__, transport, transport->cwnd,
transport->ssthresh, transport->flight_size,
transport->partial_bytes_acked);
}