1 /* SCTP kernel reference Implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This abstraction carries sctp events to the ULP (sockets).
11 * The SCTP reference implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * The SCTP reference implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, write to
25 * the Free Software Foundation, 59 Temple Place - Suite 330,
26 * Boston, MA 02111-1307, USA.
28 * Please send any bug reports or fixes you make to the
30 * lksctp developers <lksctp-developers@lists.sourceforge.net>
32 * Or submit a bug report through the following website:
33 * http://www.sf.net/projects/lksctp
35 * Written or modified by:
36 * Jon Grimm <jgrimm@us.ibm.com>
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Sridhar Samudrala <sri@us.ibm.com>
40 * Any bugs reported given to us we will try to fix... any fixes shared will
41 * be incorporated into the next SCTP release.
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
47 #include <net/sctp/structs.h>
48 #include <net/sctp/sctp.h>
49 #include <net/sctp/sm.h>
51 /* Forward declarations for internal helpers. */
52 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 struct sctp_ulpevent *);
54 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 struct sctp_ulpevent *);
57 /* 1st Level Abstractions */
59 /* Initialize a ULP queue from a block of memory. */
60 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
61 struct sctp_association *asoc)
63 memset(ulpq, 0, sizeof(struct sctp_ulpq));
66 skb_queue_head_init(&ulpq->reasm);
67 skb_queue_head_init(&ulpq->lobby);
75 /* Flush the reassembly and ordering queues. */
76 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
79 struct sctp_ulpevent *event;
81 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
82 event = sctp_skb2event(skb);
83 sctp_ulpevent_free(event);
86 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
87 event = sctp_skb2event(skb);
88 sctp_ulpevent_free(event);
93 /* Dispose of a ulpqueue. */
94 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
96 sctp_ulpq_flush(ulpq);
101 /* Process an incoming DATA chunk. */
102 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
105 struct sk_buff_head temp;
106 sctp_data_chunk_t *hdr;
107 struct sctp_ulpevent *event;
109 hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
111 /* Create an event from the incoming chunk. */
112 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
116 /* Do reassembly if needed. */
117 event = sctp_ulpq_reasm(ulpq, event);
119 /* Do ordering if needed. */
120 if ((event) && (event->msg_flags & MSG_EOR)){
121 /* Create a temporary list to collect chunks on. */
122 skb_queue_head_init(&temp);
123 __skb_queue_tail(&temp, sctp_event2skb(event));
125 event = sctp_ulpq_order(ulpq, event);
128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
129 * very first SKB on the 'temp' list.
132 sctp_ulpq_tail_event(ulpq, event);
137 /* Add a new event for propagation to the ULP. */
138 /* Clear the partial delivery mode for this socket. Note: This
139 * assumes that no association is currently in partial delivery mode.
141 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
143 struct sctp_sock *sp = sctp_sk(sk);
145 if (atomic_dec_and_test(&sp->pd_mode)) {
146 /* This means there are no other associations in PD, so
147 * we can go ahead and clear out the lobby in one shot
149 if (!skb_queue_empty(&sp->pd_lobby)) {
150 struct list_head *list;
151 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
152 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
153 INIT_LIST_HEAD(list);
157 /* There are other associations in PD, so we only need to
158 * pull stuff out of the lobby that belongs to the
159 * associations that is exiting PD (all of its notifications
162 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
163 struct sk_buff *skb, *tmp;
164 struct sctp_ulpevent *event;
166 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
167 event = sctp_skb2event(skb);
168 if (event->asoc == asoc) {
169 __skb_unlink(skb, &sp->pd_lobby);
170 __skb_queue_tail(&sk->sk_receive_queue,
180 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
181 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
184 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
187 /* If the SKB of 'event' is on a list, it is the first such member
190 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
192 struct sock *sk = ulpq->asoc->base.sk;
193 struct sk_buff_head *queue, *skb_list;
194 struct sk_buff *skb = sctp_event2skb(event);
197 skb_list = (struct sk_buff_head *) skb->prev;
199 /* If the socket is just going to throw this away, do not
200 * even try to deliver it.
202 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
205 /* Check if the user wishes to receive this event. */
206 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
209 /* If we are in partial delivery mode, post to the lobby until
210 * partial delivery is cleared, unless, of course _this_ is
211 * the association the cause of the partial delivery.
214 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
215 queue = &sk->sk_receive_queue;
218 /* If the association is in partial delivery, we
219 * need to finish delivering the partially processed
220 * packet before passing any other data. This is
221 * because we don't truly support stream interleaving.
223 if ((event->msg_flags & MSG_NOTIFICATION) ||
224 (SCTP_DATA_NOT_FRAG ==
225 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
226 queue = &sctp_sk(sk)->pd_lobby;
228 clear_pd = event->msg_flags & MSG_EOR;
229 queue = &sk->sk_receive_queue;
233 * If fragment interleave is enabled, we
234 * can queue this to the recieve queue instead
237 if (sctp_sk(sk)->frag_interleave)
238 queue = &sk->sk_receive_queue;
240 queue = &sctp_sk(sk)->pd_lobby;
244 /* If we are harvesting multiple skbs they will be
245 * collected on a list.
248 sctp_skb_list_tail(skb_list, queue);
250 __skb_queue_tail(queue, skb);
252 /* Did we just complete partial delivery and need to get
253 * rolling again? Move pending data to the receive
257 sctp_ulpq_clear_pd(ulpq);
259 if (queue == &sk->sk_receive_queue)
260 sk->sk_data_ready(sk, 0);
265 sctp_queue_purge_ulpevents(skb_list);
267 sctp_ulpevent_free(event);
272 /* 2nd Level Abstractions */
274 /* Helper function to store chunks that need to be reassembled. */
275 static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
276 struct sctp_ulpevent *event)
279 struct sctp_ulpevent *cevent;
284 /* See if it belongs at the end. */
285 pos = skb_peek_tail(&ulpq->reasm);
287 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
291 /* Short circuit just dropping it at the end. */
292 cevent = sctp_skb2event(pos);
294 if (TSN_lt(ctsn, tsn)) {
295 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
299 /* Find the right place in this list. We store them by TSN. */
300 skb_queue_walk(&ulpq->reasm, pos) {
301 cevent = sctp_skb2event(pos);
304 if (TSN_lt(tsn, ctsn))
308 /* Insert before pos. */
309 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
313 /* Helper function to return an event corresponding to the reassembled
315 * This routine creates a re-assembled skb given the first and last skb's
316 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
317 * payload was fragmented on the way and ip had to reassemble them.
318 * We add the rest of skb's to the first skb's fraglist.
320 static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
323 struct sk_buff *new = NULL;
324 struct sctp_ulpevent *event;
325 struct sk_buff *pnext, *last;
326 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
328 /* Store the pointer to the 2nd skb */
329 if (f_frag == l_frag)
334 /* Get the last skb in the f_frag's frag_list if present. */
335 for (last = list; list; last = list, list = list->next);
337 /* Add the list of remaining fragments to the first fragments
343 if (skb_cloned(f_frag)) {
344 /* This is a cloned skb, we can't just modify
345 * the frag_list. We need a new skb to do that.
346 * Instead of calling skb_unshare(), we'll do it
347 * ourselves since we need to delay the free.
349 new = skb_copy(f_frag, GFP_ATOMIC);
351 return NULL; /* try again later */
353 sctp_skb_set_owner_r(new, f_frag->sk);
355 skb_shinfo(new)->frag_list = pos;
357 skb_shinfo(f_frag)->frag_list = pos;
360 /* Remove the first fragment from the reassembly queue. */
361 __skb_unlink(f_frag, queue);
363 /* if we did unshare, then free the old skb and re-assign */
373 /* Update the len and data_len fields of the first fragment. */
374 f_frag->len += pos->len;
375 f_frag->data_len += pos->len;
377 /* Remove the fragment from the reassembly queue. */
378 __skb_unlink(pos, queue);
380 /* Break if we have reached the last fragment. */
387 event = sctp_skb2event(f_frag);
388 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
394 /* Helper function to check if an incoming chunk has filled up the last
395 * missing fragment in a SCTP datagram and return the corresponding event.
397 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
400 struct sctp_ulpevent *cevent;
401 struct sk_buff *first_frag = NULL;
402 __u32 ctsn, next_tsn;
403 struct sctp_ulpevent *retval = NULL;
405 /* Initialized to 0 just to avoid compiler warning message. Will
406 * never be used with this value. It is referenced only after it
407 * is set when we find the first fragment of a message.
411 /* The chunks are held in the reasm queue sorted by TSN.
412 * Walk through the queue sequentially and look for a sequence of
413 * fragmented chunks that complete a datagram.
414 * 'first_frag' and next_tsn are reset when we find a chunk which
415 * is the first fragment of a datagram. Once these 2 fields are set
416 * we expect to find the remaining middle fragments and the last
417 * fragment in order. If not, first_frag is reset to NULL and we
418 * start the next pass when we find another first fragment.
420 skb_queue_walk(&ulpq->reasm, pos) {
421 cevent = sctp_skb2event(pos);
424 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
425 case SCTP_DATA_FIRST_FRAG:
430 case SCTP_DATA_MIDDLE_FRAG:
431 if ((first_frag) && (ctsn == next_tsn))
437 case SCTP_DATA_LAST_FRAG:
438 if (first_frag && (ctsn == next_tsn))
449 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
451 retval->msg_flags |= MSG_EOR;
455 /* Retrieve the next set of fragments of a partial message. */
456 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
458 struct sk_buff *pos, *last_frag, *first_frag;
459 struct sctp_ulpevent *cevent;
460 __u32 ctsn, next_tsn;
462 struct sctp_ulpevent *retval;
464 /* The chunks are held in the reasm queue sorted by TSN.
465 * Walk through the queue sequentially and look for the first
466 * sequence of fragmented chunks.
469 if (skb_queue_empty(&ulpq->reasm))
472 last_frag = first_frag = NULL;
477 skb_queue_walk(&ulpq->reasm, pos) {
478 cevent = sctp_skb2event(pos);
481 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
482 case SCTP_DATA_MIDDLE_FRAG:
487 } else if (next_tsn == ctsn)
492 case SCTP_DATA_LAST_FRAG:
495 else if (ctsn != next_tsn)
505 /* We have the reassembled event. There is no need to look
509 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
510 if (retval && is_last)
511 retval->msg_flags |= MSG_EOR;
517 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
520 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
521 struct sctp_ulpevent *event)
523 struct sctp_ulpevent *retval = NULL;
525 /* Check if this is part of a fragmented message. */
526 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
527 event->msg_flags |= MSG_EOR;
531 sctp_ulpq_store_reasm(ulpq, event);
533 retval = sctp_ulpq_retrieve_reassembled(ulpq);
537 /* Do not even bother unless this is the next tsn to
541 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
542 if (TSN_lte(ctsn, ctsnap))
543 retval = sctp_ulpq_retrieve_partial(ulpq);
549 /* Retrieve the first part (sequential fragments) for partial delivery. */
550 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
552 struct sk_buff *pos, *last_frag, *first_frag;
553 struct sctp_ulpevent *cevent;
554 __u32 ctsn, next_tsn;
555 struct sctp_ulpevent *retval;
557 /* The chunks are held in the reasm queue sorted by TSN.
558 * Walk through the queue sequentially and look for a sequence of
559 * fragmented chunks that start a datagram.
562 if (skb_queue_empty(&ulpq->reasm))
565 last_frag = first_frag = NULL;
569 skb_queue_walk(&ulpq->reasm, pos) {
570 cevent = sctp_skb2event(pos);
573 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
574 case SCTP_DATA_FIRST_FRAG:
583 case SCTP_DATA_MIDDLE_FRAG:
586 if (ctsn == next_tsn) {
597 /* We have the reassembled event. There is no need to look
601 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
605 /* Helper function to gather skbs that have possibly become
606 * ordered by an an incoming chunk.
608 static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
609 struct sctp_ulpevent *event)
611 struct sk_buff_head *event_list;
612 struct sk_buff *pos, *tmp;
613 struct sctp_ulpevent *cevent;
614 struct sctp_stream *in;
620 in = &ulpq->asoc->ssnmap->in;
622 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
624 /* We are holding the chunks by stream, by SSN. */
625 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
626 cevent = (struct sctp_ulpevent *) pos->cb;
627 csid = cevent->stream;
630 /* Have we gone too far? */
634 /* Have we not gone far enough? */
638 if (cssn != sctp_ssn_peek(in, sid))
641 /* Found it, so mark in the ssnmap. */
642 sctp_ssn_next(in, sid);
644 __skb_unlink(pos, &ulpq->lobby);
646 /* Attach all gathered skbs to the event. */
647 __skb_queue_tail(event_list, pos);
651 /* Helper function to store chunks needing ordering. */
652 static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
653 struct sctp_ulpevent *event)
656 struct sctp_ulpevent *cevent;
660 pos = skb_peek_tail(&ulpq->lobby);
662 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
669 cevent = (struct sctp_ulpevent *) pos->cb;
670 csid = cevent->stream;
673 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
677 if ((sid == csid) && SSN_lt(cssn, ssn)) {
678 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
682 /* Find the right place in this list. We store them by
683 * stream ID and then by SSN.
685 skb_queue_walk(&ulpq->lobby, pos) {
686 cevent = (struct sctp_ulpevent *) pos->cb;
687 csid = cevent->stream;
692 if (csid == sid && SSN_lt(ssn, cssn))
697 /* Insert before pos. */
698 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
702 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
703 struct sctp_ulpevent *event)
706 struct sctp_stream *in;
708 /* Check if this message needs ordering. */
709 if (SCTP_DATA_UNORDERED & event->msg_flags)
712 /* Note: The stream ID must be verified before this routine. */
715 in = &ulpq->asoc->ssnmap->in;
717 /* Is this the expected SSN for this stream ID? */
718 if (ssn != sctp_ssn_peek(in, sid)) {
719 /* We've received something out of order, so find where it
720 * needs to be placed. We order by stream and then by SSN.
722 sctp_ulpq_store_ordered(ulpq, event);
726 /* Mark that the next chunk has been found. */
727 sctp_ssn_next(in, sid);
729 /* Go find any other chunks that were waiting for
732 sctp_ulpq_retrieve_ordered(ulpq, event);
737 /* Helper function to gather skbs that have possibly become
738 * ordered by forward tsn skipping their dependencies.
740 static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
742 struct sk_buff *pos, *tmp;
743 struct sctp_ulpevent *cevent;
744 struct sctp_ulpevent *event;
745 struct sctp_stream *in;
746 struct sk_buff_head temp;
749 in = &ulpq->asoc->ssnmap->in;
751 /* We are holding the chunks by stream, by SSN. */
752 skb_queue_head_init(&temp);
754 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
755 cevent = (struct sctp_ulpevent *) pos->cb;
756 csid = cevent->stream;
759 if (cssn != sctp_ssn_peek(in, csid))
762 /* Found it, so mark in the ssnmap. */
763 sctp_ssn_next(in, csid);
765 __skb_unlink(pos, &ulpq->lobby);
767 /* Create a temporary list to collect chunks on. */
768 event = sctp_skb2event(pos);
769 __skb_queue_tail(&temp, sctp_event2skb(event));
771 /* Attach all gathered skbs to the event. */
772 __skb_queue_tail(&temp, pos);
776 /* Send event to the ULP. 'event' is the sctp_ulpevent for
777 * very first SKB on the 'temp' list.
780 sctp_ulpq_tail_event(ulpq, event);
783 /* Skip over an SSN. */
784 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
786 struct sctp_stream *in;
788 /* Note: The stream ID must be verified before this routine. */
789 in = &ulpq->asoc->ssnmap->in;
791 /* Is this an old SSN? If so ignore. */
792 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
795 /* Mark that we are no longer expecting this SSN or lower. */
796 sctp_ssn_skip(in, sid, ssn);
798 /* Go find any other chunks that were waiting for
799 * ordering and deliver them if needed.
801 sctp_ulpq_reap_ordered(ulpq);
805 /* Renege 'needed' bytes from the ordering queue. */
806 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
811 struct sctp_ulpevent *event;
812 struct sctp_tsnmap *tsnmap;
814 tsnmap = &ulpq->asoc->peer.tsn_map;
816 while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) {
817 freed += skb_headlen(skb);
818 event = sctp_skb2event(skb);
821 sctp_ulpevent_free(event);
822 sctp_tsnmap_renege(tsnmap, tsn);
830 /* Renege 'needed' bytes from the reassembly queue. */
831 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
836 struct sctp_ulpevent *event;
837 struct sctp_tsnmap *tsnmap;
839 tsnmap = &ulpq->asoc->peer.tsn_map;
841 /* Walk backwards through the list, reneges the newest tsns. */
842 while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
843 freed += skb_headlen(skb);
844 event = sctp_skb2event(skb);
847 sctp_ulpevent_free(event);
848 sctp_tsnmap_renege(tsnmap, tsn);
856 /* Partial deliver the first message as there is pressure on rwnd. */
857 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
858 struct sctp_chunk *chunk,
861 struct sctp_ulpevent *event;
862 struct sctp_association *asoc;
863 struct sctp_sock *sp;
866 sp = sctp_sk(asoc->base.sk);
868 /* If the association is already in Partial Delivery mode
869 * we have noting to do.
874 /* If the user enabled fragment interleave socket option,
875 * multiple associations can enter partial delivery.
876 * Otherwise, we can only enter partial delivery if the
877 * socket is not in partial deliver mode.
879 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
880 /* Is partial delivery possible? */
881 event = sctp_ulpq_retrieve_first(ulpq);
882 /* Send event to the ULP. */
884 sctp_ulpq_tail_event(ulpq, event);
885 atomic_inc(&sp->pd_mode);
892 /* Renege some packets to make room for an incoming chunk. */
893 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
896 struct sctp_association *asoc;
902 needed = ntohs(chunk->chunk_hdr->length);
903 needed -= sizeof(sctp_data_chunk_t);
905 needed = SCTP_DEFAULT_MAXWINDOW;
909 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
910 freed = sctp_ulpq_renege_order(ulpq, needed);
911 if (freed < needed) {
912 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
915 /* If able to free enough room, accept this chunk. */
916 if (chunk && (freed >= needed)) {
918 tsn = ntohl(chunk->subh.data_hdr->tsn);
919 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
920 sctp_ulpq_tail_data(ulpq, chunk, gfp);
922 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
930 /* Notify the application if an association is aborted and in
931 * partial delivery mode. Send up any pending received messages.
933 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
935 struct sctp_ulpevent *ev = NULL;
941 sk = ulpq->asoc->base.sk;
942 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
943 &sctp_sk(sk)->subscribe))
944 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
945 SCTP_PARTIAL_DELIVERY_ABORTED,
948 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
950 /* If there is data waiting, send it up the socket now. */
951 if (sctp_ulpq_clear_pd(ulpq) || ev)
952 sk->sk_data_ready(sk, 0);