2 * net/dccp/ccids/ccid3.c
4 * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
5 * Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
7 * An implementation of the DCCP protocol
9 * This code has been developed by the University of Waikato WAND
10 * research group. For further information please see http://www.wand.net.nz/
12 * This code also uses code from Lulea University, rereleased as GPL by its
14 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
16 * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
17 * and to make it work as a loadable module in the DCCP stack written by
18 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
20 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
39 #include "lib/packet_history.h"
40 #include "lib/loss_interval.h"
45 * Reason for maths here is to avoid 32 bit overflow when a is big.
46 * With this we get close to the limit.
48 static u32 usecs_div(const u32 a, const u32 b)
50 const u32 div = a < (UINT_MAX / (USEC_PER_SEC / 10)) ? 10 :
51 a < (UINT_MAX / (USEC_PER_SEC / 50)) ? 50 :
52 a < (UINT_MAX / (USEC_PER_SEC / 100)) ? 100 :
53 a < (UINT_MAX / (USEC_PER_SEC / 500)) ? 500 :
54 a < (UINT_MAX / (USEC_PER_SEC / 1000)) ? 1000 :
55 a < (UINT_MAX / (USEC_PER_SEC / 5000)) ? 5000 :
56 a < (UINT_MAX / (USEC_PER_SEC / 10000)) ? 10000 :
57 a < (UINT_MAX / (USEC_PER_SEC / 50000)) ? 50000 :
59 const u32 tmp = a * (USEC_PER_SEC / div);
60 return (b >= 2 * div) ? tmp / (b / div) : tmp;
63 static int ccid3_debug;
66 #define ccid3_pr_debug(format, a...) \
67 do { if (ccid3_debug) \
68 printk(KERN_DEBUG "%s: " format, __FUNCTION__, ##a); \
71 #define ccid3_pr_debug(format, a...)
74 static struct dccp_tx_hist *ccid3_tx_hist;
75 static struct dccp_rx_hist *ccid3_rx_hist;
76 static struct dccp_li_hist *ccid3_li_hist;
78 /* TFRC sender states */
79 enum ccid3_hc_tx_states {
80 TFRC_SSTATE_NO_SENT = 1,
87 static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
89 static char *ccid3_state_names[] = {
90 [TFRC_SSTATE_NO_SENT] = "NO_SENT",
91 [TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
92 [TFRC_SSTATE_FBACK] = "FBACK",
93 [TFRC_SSTATE_TERM] = "TERM",
96 return ccid3_state_names[state];
100 static void ccid3_hc_tx_set_state(struct sock *sk,
101 enum ccid3_hc_tx_states state)
103 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
104 enum ccid3_hc_tx_states oldstate = hctx->ccid3hctx_state;
106 ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
107 dccp_role(sk), sk, ccid3_tx_state_name(oldstate),
108 ccid3_tx_state_name(state));
109 WARN_ON(state == oldstate);
110 hctx->ccid3hctx_state = state;
113 /* Calculate new t_ipi (inter packet interval) by t_ipi = s / X_inst */
114 static inline void ccid3_calc_new_t_ipi(struct ccid3_hc_tx_sock *hctx)
117 * If no feedback spec says t_ipi is 1 second (set elsewhere and then
118 * doubles after every no feedback timer (separate function)
120 if (hctx->ccid3hctx_state != TFRC_SSTATE_NO_FBACK)
121 hctx->ccid3hctx_t_ipi = usecs_div(hctx->ccid3hctx_s,
125 /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
126 static inline void ccid3_calc_new_delta(struct ccid3_hc_tx_sock *hctx)
128 hctx->ccid3hctx_delta = min_t(u32, hctx->ccid3hctx_t_ipi / 2,
129 TFRC_OPSYS_HALF_TIME_GRAN);
135 * x_calc = calcX(s, R, p);
136 * X = max(min(X_calc, 2 * X_recv), s / t_mbi);
138 * If (now - tld >= R)
139 * X = max(min(2 * X, 2 * X_recv), s / R);
142 static void ccid3_hc_tx_update_x(struct sock *sk)
144 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
146 /* To avoid large error in calcX */
147 if (hctx->ccid3hctx_p >= TFRC_SMALLEST_P) {
148 hctx->ccid3hctx_x_calc = tfrc_calc_x(hctx->ccid3hctx_s,
151 hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_calc,
152 2 * hctx->ccid3hctx_x_recv),
154 TFRC_MAX_BACK_OFF_TIME));
158 dccp_timestamp(sk, &now);
159 if (timeval_delta(&now, &hctx->ccid3hctx_t_ld) >=
160 hctx->ccid3hctx_rtt) {
161 hctx->ccid3hctx_x = max_t(u32, min_t(u32, hctx->ccid3hctx_x_recv,
162 hctx->ccid3hctx_x) * 2,
163 usecs_div(hctx->ccid3hctx_s,
164 hctx->ccid3hctx_rtt));
165 hctx->ccid3hctx_t_ld = now;
170 static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
172 struct sock *sk = (struct sock *)data;
173 unsigned long next_tmout = 0;
174 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
177 if (sock_owned_by_user(sk)) {
178 /* Try again later. */
179 /* XXX: set some sensible MIB */
180 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
185 ccid3_pr_debug("%s, sk=%p, state=%s\n", dccp_role(sk), sk,
186 ccid3_tx_state_name(hctx->ccid3hctx_state));
188 switch (hctx->ccid3hctx_state) {
189 case TFRC_SSTATE_TERM:
191 case TFRC_SSTATE_NO_FBACK:
192 /* Halve send rate */
193 hctx->ccid3hctx_x /= 2;
194 if (hctx->ccid3hctx_x < (hctx->ccid3hctx_s /
195 TFRC_MAX_BACK_OFF_TIME))
196 hctx->ccid3hctx_x = (hctx->ccid3hctx_s /
197 TFRC_MAX_BACK_OFF_TIME);
199 ccid3_pr_debug("%s, sk=%p, state=%s, updated tx rate to %d "
202 ccid3_tx_state_name(hctx->ccid3hctx_state),
204 next_tmout = max_t(u32, 2 * usecs_div(hctx->ccid3hctx_s,
206 TFRC_INITIAL_TIMEOUT);
208 * FIXME - not sure above calculation is correct. See section
209 * 5 of CCID3 11 should adjust tx_t_ipi and double that to
213 case TFRC_SSTATE_FBACK:
215 * Check if IDLE since last timeout and recv rate is less than
218 if (!hctx->ccid3hctx_idle ||
219 (hctx->ccid3hctx_x_recv >=
220 4 * usecs_div(hctx->ccid3hctx_s, hctx->ccid3hctx_rtt))) {
221 ccid3_pr_debug("%s, sk=%p, state=%s, not idle\n",
223 ccid3_tx_state_name(hctx->ccid3hctx_state));
224 /* Halve sending rate */
226 /* If (X_calc > 2 * X_recv)
227 * X_recv = max(X_recv / 2, s / (2 * t_mbi));
229 * X_recv = X_calc / 4;
231 BUG_ON(hctx->ccid3hctx_p >= TFRC_SMALLEST_P &&
232 hctx->ccid3hctx_x_calc == 0);
234 /* check also if p is zero -> x_calc is infinity? */
235 if (hctx->ccid3hctx_p < TFRC_SMALLEST_P ||
236 hctx->ccid3hctx_x_calc > 2 * hctx->ccid3hctx_x_recv)
237 hctx->ccid3hctx_x_recv = max_t(u32, hctx->ccid3hctx_x_recv / 2,
238 hctx->ccid3hctx_s / (2 * TFRC_MAX_BACK_OFF_TIME));
240 hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc / 4;
242 /* Update sending rate */
243 ccid3_hc_tx_update_x(sk);
246 * Schedule no feedback timer to expire in
247 * max(4 * R, 2 * s / X)
249 next_tmout = max_t(u32, hctx->ccid3hctx_t_rto,
250 2 * usecs_div(hctx->ccid3hctx_s,
254 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
255 __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
260 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
261 jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout)));
262 hctx->ccid3hctx_idle = 1;
268 static int ccid3_hc_tx_send_packet(struct sock *sk,
269 struct sk_buff *skb, int len)
271 struct dccp_sock *dp = dccp_sk(sk);
272 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
273 struct dccp_tx_hist_entry *new_packet;
278 BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
280 /* Check if pure ACK or Terminating*/
282 * XXX: We only call this function for DATA and DATAACK, on, these
283 * packets can have zero length, but why the comment about "pure ACK"?
285 if (unlikely(len == 0))
288 /* See if last packet allocated was not sent */
289 new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
290 if (new_packet == NULL || new_packet->dccphtx_sent) {
291 new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
295 if (unlikely(new_packet == NULL)) {
296 LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, not enough "
297 "mem to add to history, send refused\n",
298 __FUNCTION__, dccp_role(sk), sk);
302 dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, new_packet);
305 dccp_timestamp(sk, &now);
307 switch (hctx->ccid3hctx_state) {
308 case TFRC_SSTATE_NO_SENT:
309 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
310 jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT));
311 hctx->ccid3hctx_last_win_count = 0;
312 hctx->ccid3hctx_t_last_win_count = now;
313 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
314 hctx->ccid3hctx_t_ipi = TFRC_INITIAL_IPI;
316 /* Set nominal send time for initial packet */
317 hctx->ccid3hctx_t_nom = now;
318 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
319 hctx->ccid3hctx_t_ipi);
320 ccid3_calc_new_delta(hctx);
323 case TFRC_SSTATE_NO_FBACK:
324 case TFRC_SSTATE_FBACK:
325 delay = (timeval_delta(&now, &hctx->ccid3hctx_t_nom) -
326 hctx->ccid3hctx_delta);
328 /* divide by -1000 is to convert to ms and get sign right */
329 rc = delay > 0 ? delay : 0;
332 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
333 __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
339 /* Can we send? if so add options and add to packet history */
341 dp->dccps_hc_tx_insert_options = 1;
342 new_packet->dccphtx_ccval =
343 DCCP_SKB_CB(skb)->dccpd_ccval =
344 hctx->ccid3hctx_last_win_count;
345 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
346 hctx->ccid3hctx_t_ipi);
352 static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, int len)
354 const struct dccp_sock *dp = dccp_sk(sk);
355 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
358 BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
360 dccp_timestamp(sk, &now);
362 /* check if we have sent a data packet */
364 unsigned long quarter_rtt;
365 struct dccp_tx_hist_entry *packet;
367 packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
368 if (unlikely(packet == NULL)) {
369 LIMIT_NETDEBUG(KERN_WARNING "%s: packet doesn't "
370 "exists in history!\n", __FUNCTION__);
373 if (unlikely(packet->dccphtx_sent)) {
374 LIMIT_NETDEBUG(KERN_WARNING "%s: no unsent packet in "
375 "history!\n", __FUNCTION__);
378 packet->dccphtx_tstamp = now;
379 packet->dccphtx_seqno = dp->dccps_gss;
381 * Check if win_count have changed
382 * Algorithm in "8.1. Window Counter Value" in RFC 4342.
384 quarter_rtt = timeval_delta(&now, &hctx->ccid3hctx_t_last_win_count);
385 if (likely(hctx->ccid3hctx_rtt > 8))
386 quarter_rtt /= hctx->ccid3hctx_rtt / 4;
388 if (quarter_rtt > 0) {
389 hctx->ccid3hctx_t_last_win_count = now;
390 hctx->ccid3hctx_last_win_count = (hctx->ccid3hctx_last_win_count +
391 min_t(unsigned long, quarter_rtt, 5)) % 16;
392 ccid3_pr_debug("%s, sk=%p, window changed from "
395 packet->dccphtx_ccval,
396 hctx->ccid3hctx_last_win_count);
399 hctx->ccid3hctx_idle = 0;
400 packet->dccphtx_rtt = hctx->ccid3hctx_rtt;
401 packet->dccphtx_sent = 1;
403 ccid3_pr_debug("%s, sk=%p, seqno=%llu NOT inserted!\n",
404 dccp_role(sk), sk, dp->dccps_gss);
406 switch (hctx->ccid3hctx_state) {
407 case TFRC_SSTATE_NO_SENT:
408 /* if first wasn't pure ack */
410 printk(KERN_CRIT "%s: %s, First packet sent is noted "
411 "as a data packet\n",
412 __FUNCTION__, dccp_role(sk));
414 case TFRC_SSTATE_NO_FBACK:
415 case TFRC_SSTATE_FBACK:
417 timeval_sub_usecs(&hctx->ccid3hctx_t_nom,
418 hctx->ccid3hctx_t_ipi);
419 ccid3_calc_new_t_ipi(hctx);
420 ccid3_calc_new_delta(hctx);
421 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
422 hctx->ccid3hctx_t_ipi);
426 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
427 __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
433 static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
435 const struct dccp_sock *dp = dccp_sk(sk);
436 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
437 struct ccid3_options_received *opt_recv;
438 struct dccp_tx_hist_entry *packet;
440 unsigned long next_tmout;
446 BUG_ON(hctx == NULL || hctx->ccid3hctx_state == TFRC_SSTATE_TERM);
448 /* we are only interested in ACKs */
449 if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
450 DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
453 opt_recv = &hctx->ccid3hctx_options_received;
455 t_elapsed = dp->dccps_options_received.dccpor_elapsed_time * 10;
456 x_recv = opt_recv->ccid3or_receive_rate;
457 pinv = opt_recv->ccid3or_loss_event_rate;
459 switch (hctx->ccid3hctx_state) {
460 case TFRC_SSTATE_NO_SENT:
461 /* FIXME: what to do here? */
463 case TFRC_SSTATE_NO_FBACK:
464 case TFRC_SSTATE_FBACK:
465 /* Calculate new round trip sample by
466 * R_sample = (now - t_recvdata) - t_delay */
467 /* get t_recvdata from history */
468 packet = dccp_tx_hist_find_entry(&hctx->ccid3hctx_hist,
469 DCCP_SKB_CB(skb)->dccpd_ack_seq);
470 if (unlikely(packet == NULL)) {
471 LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, seqno "
472 "%llu(%s) does't exist in history!\n",
473 __FUNCTION__, dccp_role(sk), sk,
474 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
475 dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type));
480 dccp_timestamp(sk, &now);
481 r_sample = timeval_delta(&now, &packet->dccphtx_tstamp);
482 if (unlikely(r_sample <= t_elapsed))
483 LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, "
485 __FUNCTION__, r_sample, t_elapsed);
487 r_sample -= t_elapsed;
489 /* Update RTT estimate by
490 * If (No feedback recv)
493 * R = q * R + (1 - q) * R_sample;
495 * q is a constant, RFC 3448 recomments 0.9
497 if (hctx->ccid3hctx_state == TFRC_SSTATE_NO_FBACK) {
498 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
499 hctx->ccid3hctx_rtt = r_sample;
501 hctx->ccid3hctx_rtt = (hctx->ccid3hctx_rtt * 9) / 10 +
504 ccid3_pr_debug("%s, sk=%p, New RTT estimate=%uus, "
505 "r_sample=%us\n", dccp_role(sk), sk,
506 hctx->ccid3hctx_rtt, r_sample);
508 /* Update timeout interval */
509 hctx->ccid3hctx_t_rto = max_t(u32, 4 * hctx->ccid3hctx_rtt,
512 /* Update receive rate */
513 hctx->ccid3hctx_x_recv = x_recv;/* X_recv in bytes per sec */
515 /* Update loss event rate */
516 if (pinv == ~0 || pinv == 0)
517 hctx->ccid3hctx_p = 0;
519 hctx->ccid3hctx_p = 1000000 / pinv;
521 if (hctx->ccid3hctx_p < TFRC_SMALLEST_P) {
522 hctx->ccid3hctx_p = TFRC_SMALLEST_P;
523 ccid3_pr_debug("%s, sk=%p, Smallest p used!\n",
528 /* unschedule no feedback timer */
529 sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
531 /* Update sending rate */
532 ccid3_hc_tx_update_x(sk);
534 /* Update next send time */
535 timeval_sub_usecs(&hctx->ccid3hctx_t_nom,
536 hctx->ccid3hctx_t_ipi);
537 ccid3_calc_new_t_ipi(hctx);
538 timeval_add_usecs(&hctx->ccid3hctx_t_nom,
539 hctx->ccid3hctx_t_ipi);
540 ccid3_calc_new_delta(hctx);
542 /* remove all packets older than the one acked from history */
543 dccp_tx_hist_purge_older(ccid3_tx_hist,
544 &hctx->ccid3hctx_hist, packet);
546 * As we have calculated new ipi, delta, t_nom it is possible that
547 * we now can send a packet, so wake up dccp_wait_for_ccids.
549 sk->sk_write_space(sk);
552 * Schedule no feedback timer to expire in
553 * max(4 * R, 2 * s / X)
555 next_tmout = max(hctx->ccid3hctx_t_rto,
556 2 * usecs_div(hctx->ccid3hctx_s,
559 ccid3_pr_debug("%s, sk=%p, Scheduled no feedback timer to "
560 "expire in %lu jiffies (%luus)\n",
562 usecs_to_jiffies(next_tmout), next_tmout);
564 sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
565 jiffies + max_t(u32, 1, usecs_to_jiffies(next_tmout)));
568 hctx->ccid3hctx_idle = 1;
571 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
572 __FUNCTION__, dccp_role(sk), sk, hctx->ccid3hctx_state);
578 static int ccid3_hc_tx_insert_options(struct sock *sk, struct sk_buff *skb)
580 const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
582 BUG_ON(hctx == NULL);
584 if (sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)
585 DCCP_SKB_CB(skb)->dccpd_ccval = hctx->ccid3hctx_last_win_count;
589 static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
590 unsigned char len, u16 idx,
591 unsigned char *value)
594 const struct dccp_sock *dp = dccp_sk(sk);
595 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
596 struct ccid3_options_received *opt_recv;
598 BUG_ON(hctx == NULL);
600 opt_recv = &hctx->ccid3hctx_options_received;
602 if (opt_recv->ccid3or_seqno != dp->dccps_gsr) {
603 opt_recv->ccid3or_seqno = dp->dccps_gsr;
604 opt_recv->ccid3or_loss_event_rate = ~0;
605 opt_recv->ccid3or_loss_intervals_idx = 0;
606 opt_recv->ccid3or_loss_intervals_len = 0;
607 opt_recv->ccid3or_receive_rate = 0;
611 case TFRC_OPT_LOSS_EVENT_RATE:
612 if (unlikely(len != 4)) {
613 LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid "
614 "len for TFRC_OPT_LOSS_EVENT_RATE\n",
615 __FUNCTION__, dccp_role(sk), sk);
618 opt_recv->ccid3or_loss_event_rate = ntohl(*(__be32 *)value);
619 ccid3_pr_debug("%s, sk=%p, LOSS_EVENT_RATE=%u\n",
621 opt_recv->ccid3or_loss_event_rate);
624 case TFRC_OPT_LOSS_INTERVALS:
625 opt_recv->ccid3or_loss_intervals_idx = idx;
626 opt_recv->ccid3or_loss_intervals_len = len;
627 ccid3_pr_debug("%s, sk=%p, LOSS_INTERVALS=(%u, %u)\n",
629 opt_recv->ccid3or_loss_intervals_idx,
630 opt_recv->ccid3or_loss_intervals_len);
632 case TFRC_OPT_RECEIVE_RATE:
633 if (unlikely(len != 4)) {
634 LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, invalid "
635 "len for TFRC_OPT_RECEIVE_RATE\n",
636 __FUNCTION__, dccp_role(sk), sk);
639 opt_recv->ccid3or_receive_rate = ntohl(*(__be32 *)value);
640 ccid3_pr_debug("%s, sk=%p, RECEIVE_RATE=%u\n",
642 opt_recv->ccid3or_receive_rate);
650 static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
652 struct dccp_sock *dp = dccp_sk(sk);
653 struct ccid3_hc_tx_sock *hctx = ccid_priv(ccid);
655 if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
656 dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
657 hctx->ccid3hctx_s = dp->dccps_packet_size;
659 hctx->ccid3hctx_s = TFRC_STD_PACKET_SIZE;
661 /* Set transmission rate to 1 packet per second */
662 hctx->ccid3hctx_x = hctx->ccid3hctx_s;
663 hctx->ccid3hctx_t_rto = USEC_PER_SEC;
664 hctx->ccid3hctx_state = TFRC_SSTATE_NO_SENT;
665 INIT_LIST_HEAD(&hctx->ccid3hctx_hist);
667 hctx->ccid3hctx_no_feedback_timer.function = ccid3_hc_tx_no_feedback_timer;
668 hctx->ccid3hctx_no_feedback_timer.data = (unsigned long)sk;
669 init_timer(&hctx->ccid3hctx_no_feedback_timer);
674 static void ccid3_hc_tx_exit(struct sock *sk)
676 struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
678 BUG_ON(hctx == NULL);
680 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
681 sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer);
683 /* Empty packet history */
684 dccp_tx_hist_purge(ccid3_tx_hist, &hctx->ccid3hctx_hist);
688 * RX Half Connection methods
691 /* TFRC receiver states */
692 enum ccid3_hc_rx_states {
693 TFRC_RSTATE_NO_DATA = 1,
695 TFRC_RSTATE_TERM = 127,
699 static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
701 static char *ccid3_rx_state_names[] = {
702 [TFRC_RSTATE_NO_DATA] = "NO_DATA",
703 [TFRC_RSTATE_DATA] = "DATA",
704 [TFRC_RSTATE_TERM] = "TERM",
707 return ccid3_rx_state_names[state];
711 static void ccid3_hc_rx_set_state(struct sock *sk,
712 enum ccid3_hc_rx_states state)
714 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
715 enum ccid3_hc_rx_states oldstate = hcrx->ccid3hcrx_state;
717 ccid3_pr_debug("%s(%p) %-8.8s -> %s\n",
718 dccp_role(sk), sk, ccid3_rx_state_name(oldstate),
719 ccid3_rx_state_name(state));
720 WARN_ON(state == oldstate);
721 hcrx->ccid3hcrx_state = state;
724 static void ccid3_hc_rx_send_feedback(struct sock *sk)
726 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
727 struct dccp_sock *dp = dccp_sk(sk);
728 struct dccp_rx_hist_entry *packet;
731 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
733 dccp_timestamp(sk, &now);
735 switch (hcrx->ccid3hcrx_state) {
736 case TFRC_RSTATE_NO_DATA:
737 hcrx->ccid3hcrx_x_recv = 0;
739 case TFRC_RSTATE_DATA: {
740 const u32 delta = timeval_delta(&now,
741 &hcrx->ccid3hcrx_tstamp_last_feedback);
742 hcrx->ccid3hcrx_x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv,
747 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
748 __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state);
753 packet = dccp_rx_hist_find_data_packet(&hcrx->ccid3hcrx_hist);
754 if (unlikely(packet == NULL)) {
755 LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, no data packet "
757 __FUNCTION__, dccp_role(sk), sk);
761 hcrx->ccid3hcrx_tstamp_last_feedback = now;
762 hcrx->ccid3hcrx_ccval_last_counter = packet->dccphrx_ccval;
763 hcrx->ccid3hcrx_bytes_recv = 0;
765 /* Convert to multiples of 10us */
766 hcrx->ccid3hcrx_elapsed_time =
767 timeval_delta(&now, &packet->dccphrx_tstamp) / 10;
768 if (hcrx->ccid3hcrx_p == 0)
769 hcrx->ccid3hcrx_pinv = ~0;
771 hcrx->ccid3hcrx_pinv = 1000000 / hcrx->ccid3hcrx_p;
772 dp->dccps_hc_rx_insert_options = 1;
776 static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
778 const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
781 BUG_ON(hcrx == NULL);
783 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
786 DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter;
788 if (dccp_packet_without_ack(skb))
791 x_recv = htonl(hcrx->ccid3hcrx_x_recv);
792 pinv = htonl(hcrx->ccid3hcrx_pinv);
794 if ((hcrx->ccid3hcrx_elapsed_time != 0 &&
795 dccp_insert_option_elapsed_time(sk, skb,
796 hcrx->ccid3hcrx_elapsed_time)) ||
797 dccp_insert_option_timestamp(sk, skb) ||
798 dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE,
799 &pinv, sizeof(pinv)) ||
800 dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE,
801 &x_recv, sizeof(x_recv)))
807 /* calculate first loss interval
809 * returns estimated loss interval in usecs */
811 static u32 ccid3_hc_rx_calc_first_li(struct sock *sk)
813 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
814 struct dccp_rx_hist_entry *entry, *next, *tail = NULL;
815 u32 rtt, delta, x_recv, fval, p, tmp2;
816 struct timeval tstamp = { 0, };
822 list_for_each_entry_safe(entry, next, &hcrx->ccid3hcrx_hist,
824 if (dccp_rx_hist_entry_data_packet(entry)) {
829 tstamp = entry->dccphrx_tstamp;
830 win_count = entry->dccphrx_ccval;
834 interval = win_count - entry->dccphrx_ccval;
836 interval += TFRC_WIN_COUNT_LIMIT;
844 if (unlikely(step == 0)) {
845 LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, packet history "
846 "contains no data packets!\n",
847 __FUNCTION__, dccp_role(sk), sk);
851 if (unlikely(interval == 0)) {
852 LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Could not find a "
853 "win_count interval > 0. Defaulting to 1\n",
854 __FUNCTION__, dccp_role(sk), sk);
859 LIMIT_NETDEBUG(KERN_WARNING "%s: tail is null\n",
863 rtt = timeval_delta(&tstamp, &tail->dccphrx_tstamp) * 4 / interval;
864 ccid3_pr_debug("%s, sk=%p, approximated RTT to %uus\n",
865 dccp_role(sk), sk, rtt);
869 dccp_timestamp(sk, &tstamp);
870 delta = timeval_delta(&tstamp, &hcrx->ccid3hcrx_tstamp_last_feedback);
871 x_recv = usecs_div(hcrx->ccid3hcrx_bytes_recv, delta);
874 x_recv = hcrx->ccid3hcrx_x_recv;
876 tmp1 = (u64)x_recv * (u64)rtt;
877 do_div(tmp1,10000000);
881 LIMIT_NETDEBUG(KERN_WARNING "tmp2 = 0 "
882 "%s: x_recv = %u, rtt =%u\n",
883 __FUNCTION__, x_recv, rtt);
887 fval = (hcrx->ccid3hcrx_s * 100000) / tmp2;
888 /* do not alter order above or you will get overflow on 32 bit */
889 p = tfrc_calc_x_reverse_lookup(fval);
890 ccid3_pr_debug("%s, sk=%p, receive rate=%u bytes/s, implied "
891 "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
899 static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
901 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
902 struct dccp_li_hist_entry *head;
905 if (list_empty(&hcrx->ccid3hcrx_li_hist)) {
906 if (!dccp_li_hist_interval_new(ccid3_li_hist,
907 &hcrx->ccid3hcrx_li_hist, seq_loss, win_loss))
910 head = list_entry(hcrx->ccid3hcrx_li_hist.next,
911 struct dccp_li_hist_entry, dccplih_node);
912 head->dccplih_interval = ccid3_hc_rx_calc_first_li(sk);
914 struct dccp_li_hist_entry *entry;
915 struct list_head *tail;
917 head = list_entry(hcrx->ccid3hcrx_li_hist.next,
918 struct dccp_li_hist_entry, dccplih_node);
919 /* FIXME win count check removed as was wrong */
920 /* should make this check with receive history */
921 /* and compare there as per section 10.2 of RFC4342 */
923 /* new loss event detected */
924 /* calculate last interval length */
925 seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
926 entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
929 printk(KERN_CRIT "%s: out of memory\n",__FUNCTION__);
934 list_add(&entry->dccplih_node, &hcrx->ccid3hcrx_li_hist);
936 tail = hcrx->ccid3hcrx_li_hist.prev;
938 kmem_cache_free(ccid3_li_hist->dccplih_slab, tail);
940 /* Create the newest interval */
941 entry->dccplih_seqno = seq_loss;
942 entry->dccplih_interval = seq_temp;
943 entry->dccplih_win_count = win_loss;
947 static int ccid3_hc_rx_detect_loss(struct sock *sk,
948 struct dccp_rx_hist_entry *packet)
950 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
951 struct dccp_rx_hist_entry *rx_hist = dccp_rx_hist_head(&hcrx->ccid3hcrx_hist);
952 u64 seqno = packet->dccphrx_seqno;
958 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
961 follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
962 hcrx->ccid3hcrx_seqno_nonloss = seqno;
963 hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
968 while (dccp_delta_seqno(hcrx->ccid3hcrx_seqno_nonloss, seqno)
969 > TFRC_RECV_NUM_LATE_LOSS) {
971 ccid3_hc_rx_update_li(sk, hcrx->ccid3hcrx_seqno_nonloss,
972 hcrx->ccid3hcrx_ccval_nonloss);
973 tmp_seqno = hcrx->ccid3hcrx_seqno_nonloss;
974 dccp_inc_seqno(&tmp_seqno);
975 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
976 dccp_inc_seqno(&tmp_seqno);
977 while (dccp_rx_hist_find_entry(&hcrx->ccid3hcrx_hist,
978 tmp_seqno, &ccval)) {
979 hcrx->ccid3hcrx_seqno_nonloss = tmp_seqno;
980 hcrx->ccid3hcrx_ccval_nonloss = ccval;
981 dccp_inc_seqno(&tmp_seqno);
985 /* FIXME - this code could be simplified with above while */
986 /* but works at moment */
987 if (follows48(packet->dccphrx_seqno, hcrx->ccid3hcrx_seqno_nonloss)) {
988 hcrx->ccid3hcrx_seqno_nonloss = seqno;
989 hcrx->ccid3hcrx_ccval_nonloss = packet->dccphrx_ccval;
993 dccp_rx_hist_add_packet(ccid3_rx_hist, &hcrx->ccid3hcrx_hist,
994 &hcrx->ccid3hcrx_li_hist, packet,
995 hcrx->ccid3hcrx_seqno_nonloss);
999 static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
1001 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
1002 const struct dccp_options_received *opt_recv;
1003 struct dccp_rx_hist_entry *packet;
1006 u32 p_prev, rtt_prev, r_sample, t_elapsed;
1009 BUG_ON(hcrx == NULL ||
1010 !(hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA ||
1011 hcrx->ccid3hcrx_state == TFRC_RSTATE_DATA));
1013 opt_recv = &dccp_sk(sk)->dccps_options_received;
1015 switch (DCCP_SKB_CB(skb)->dccpd_type) {
1017 if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
1019 case DCCP_PKT_DATAACK:
1020 if (opt_recv->dccpor_timestamp_echo == 0)
1022 rtt_prev = hcrx->ccid3hcrx_rtt;
1023 dccp_timestamp(sk, &now);
1024 timeval_sub_usecs(&now, opt_recv->dccpor_timestamp_echo * 10);
1025 r_sample = timeval_usecs(&now);
1026 t_elapsed = opt_recv->dccpor_elapsed_time * 10;
1028 if (unlikely(r_sample <= t_elapsed))
1029 LIMIT_NETDEBUG(KERN_WARNING "%s: r_sample=%uus, "
1031 __FUNCTION__, r_sample, t_elapsed);
1033 r_sample -= t_elapsed;
1035 if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA)
1036 hcrx->ccid3hcrx_rtt = r_sample;
1038 hcrx->ccid3hcrx_rtt = (hcrx->ccid3hcrx_rtt * 9) / 10 +
1041 if (rtt_prev != hcrx->ccid3hcrx_rtt)
1042 ccid3_pr_debug("%s, New RTT=%uus, elapsed time=%u\n",
1043 dccp_role(sk), hcrx->ccid3hcrx_rtt,
1044 opt_recv->dccpor_elapsed_time);
1048 default: /* We're not interested in other packet types, move along */
1052 packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
1054 if (unlikely(packet == NULL)) {
1055 LIMIT_NETDEBUG(KERN_WARNING "%s: %s, sk=%p, Not enough mem to "
1056 "add rx packet to history, consider it lost!\n",
1057 __FUNCTION__, dccp_role(sk), sk);
1061 win_count = packet->dccphrx_ccval;
1063 loss = ccid3_hc_rx_detect_loss(sk, packet);
1065 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK)
1068 switch (hcrx->ccid3hcrx_state) {
1069 case TFRC_RSTATE_NO_DATA:
1070 ccid3_pr_debug("%s, sk=%p(%s), skb=%p, sending initial "
1073 dccp_state_name(sk->sk_state), skb);
1074 ccid3_hc_rx_send_feedback(sk);
1075 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
1077 case TFRC_RSTATE_DATA:
1078 hcrx->ccid3hcrx_bytes_recv += skb->len -
1079 dccp_hdr(skb)->dccph_doff * 4;
1083 dccp_timestamp(sk, &now);
1084 if (timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) >=
1085 hcrx->ccid3hcrx_rtt) {
1086 hcrx->ccid3hcrx_tstamp_last_ack = now;
1087 ccid3_hc_rx_send_feedback(sk);
1091 printk(KERN_CRIT "%s: %s, sk=%p, Illegal state (%d)!\n",
1092 __FUNCTION__, dccp_role(sk), sk, hcrx->ccid3hcrx_state);
1097 /* Dealing with packet loss */
1098 ccid3_pr_debug("%s, sk=%p(%s), data loss! Reacting...\n",
1099 dccp_role(sk), sk, dccp_state_name(sk->sk_state));
1101 p_prev = hcrx->ccid3hcrx_p;
1103 /* Calculate loss event rate */
1104 if (!list_empty(&hcrx->ccid3hcrx_li_hist)) {
1105 u32 i_mean = dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist);
1107 /* Scaling up by 1000000 as fixed decimal */
1109 hcrx->ccid3hcrx_p = 1000000 / i_mean;
1111 printk(KERN_CRIT "%s: empty loss hist\n",__FUNCTION__);
1115 if (hcrx->ccid3hcrx_p > p_prev) {
1116 ccid3_hc_rx_send_feedback(sk);
1121 static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
1123 struct dccp_sock *dp = dccp_sk(sk);
1124 struct ccid3_hc_rx_sock *hcrx = ccid_priv(ccid);
1126 ccid3_pr_debug("%s, sk=%p\n", dccp_role(sk), sk);
1128 if (dp->dccps_packet_size >= TFRC_MIN_PACKET_SIZE &&
1129 dp->dccps_packet_size <= TFRC_MAX_PACKET_SIZE)
1130 hcrx->ccid3hcrx_s = dp->dccps_packet_size;
1132 hcrx->ccid3hcrx_s = TFRC_STD_PACKET_SIZE;
1134 hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA;
1135 INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist);
1136 INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist);
1137 dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack);
1138 hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack;
1139 hcrx->ccid3hcrx_rtt = 5000; /* XXX 5ms for now... */
1143 static void ccid3_hc_rx_exit(struct sock *sk)
1145 struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
1147 BUG_ON(hcrx == NULL);
1149 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
1151 /* Empty packet history */
1152 dccp_rx_hist_purge(ccid3_rx_hist, &hcrx->ccid3hcrx_hist);
1154 /* Empty loss interval history */
1155 dccp_li_hist_purge(ccid3_li_hist, &hcrx->ccid3hcrx_li_hist);
1158 static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
1160 const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
1162 /* Listen socks doesn't have a private CCID block */
1163 if (sk->sk_state == DCCP_LISTEN)
1166 BUG_ON(hcrx == NULL);
1168 info->tcpi_ca_state = hcrx->ccid3hcrx_state;
1169 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
1170 info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt;
1173 static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
1175 const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
1177 /* Listen socks doesn't have a private CCID block */
1178 if (sk->sk_state == DCCP_LISTEN)
1181 BUG_ON(hctx == NULL);
1183 info->tcpi_rto = hctx->ccid3hctx_t_rto;
1184 info->tcpi_rtt = hctx->ccid3hctx_rtt;
1187 static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
1188 u32 __user *optval, int __user *optlen)
1190 const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
1193 /* Listen socks doesn't have a private CCID block */
1194 if (sk->sk_state == DCCP_LISTEN)
1198 case DCCP_SOCKOPT_CCID_RX_INFO:
1199 if (len < sizeof(hcrx->ccid3hcrx_tfrc))
1201 len = sizeof(hcrx->ccid3hcrx_tfrc);
1202 val = &hcrx->ccid3hcrx_tfrc;
1205 return -ENOPROTOOPT;
1208 if (put_user(len, optlen) || copy_to_user(optval, val, len))
1214 static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
1215 u32 __user *optval, int __user *optlen)
1217 const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
1220 /* Listen socks doesn't have a private CCID block */
1221 if (sk->sk_state == DCCP_LISTEN)
1225 case DCCP_SOCKOPT_CCID_TX_INFO:
1226 if (len < sizeof(hctx->ccid3hctx_tfrc))
1228 len = sizeof(hctx->ccid3hctx_tfrc);
1229 val = &hctx->ccid3hctx_tfrc;
1232 return -ENOPROTOOPT;
1235 if (put_user(len, optlen) || copy_to_user(optval, val, len))
1241 static struct ccid_operations ccid3 = {
1242 .ccid_id = DCCPC_CCID3,
1243 .ccid_name = "ccid3",
1244 .ccid_owner = THIS_MODULE,
1245 .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock),
1246 .ccid_hc_tx_init = ccid3_hc_tx_init,
1247 .ccid_hc_tx_exit = ccid3_hc_tx_exit,
1248 .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet,
1249 .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent,
1250 .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv,
1251 .ccid_hc_tx_insert_options = ccid3_hc_tx_insert_options,
1252 .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options,
1253 .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock),
1254 .ccid_hc_rx_init = ccid3_hc_rx_init,
1255 .ccid_hc_rx_exit = ccid3_hc_rx_exit,
1256 .ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options,
1257 .ccid_hc_rx_packet_recv = ccid3_hc_rx_packet_recv,
1258 .ccid_hc_rx_get_info = ccid3_hc_rx_get_info,
1259 .ccid_hc_tx_get_info = ccid3_hc_tx_get_info,
1260 .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt,
1261 .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt,
1264 module_param(ccid3_debug, int, 0444);
1265 MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
1267 static __init int ccid3_module_init(void)
1271 ccid3_rx_hist = dccp_rx_hist_new("ccid3");
1272 if (ccid3_rx_hist == NULL)
1275 ccid3_tx_hist = dccp_tx_hist_new("ccid3");
1276 if (ccid3_tx_hist == NULL)
1279 ccid3_li_hist = dccp_li_hist_new("ccid3");
1280 if (ccid3_li_hist == NULL)
1283 rc = ccid_register(&ccid3);
1285 goto out_free_loss_interval_history;
1289 out_free_loss_interval_history:
1290 dccp_li_hist_delete(ccid3_li_hist);
1291 ccid3_li_hist = NULL;
1293 dccp_tx_hist_delete(ccid3_tx_hist);
1294 ccid3_tx_hist = NULL;
1296 dccp_rx_hist_delete(ccid3_rx_hist);
1297 ccid3_rx_hist = NULL;
1300 module_init(ccid3_module_init);
1302 static __exit void ccid3_module_exit(void)
1304 ccid_unregister(&ccid3);
1306 if (ccid3_tx_hist != NULL) {
1307 dccp_tx_hist_delete(ccid3_tx_hist);
1308 ccid3_tx_hist = NULL;
1310 if (ccid3_rx_hist != NULL) {
1311 dccp_rx_hist_delete(ccid3_rx_hist);
1312 ccid3_rx_hist = NULL;
1314 if (ccid3_li_hist != NULL) {
1315 dccp_li_hist_delete(ccid3_li_hist);
1316 ccid3_li_hist = NULL;
1319 module_exit(ccid3_module_exit);
1321 MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>, "
1322 "Arnaldo Carvalho de Melo <acme@ghostprotocols.net>");
1323 MODULE_DESCRIPTION("DCCP TFRC CCID3 CCID");
1324 MODULE_LICENSE("GPL");
1325 MODULE_ALIAS("net-dccp-ccid-3");