X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=net%2Fsctp%2Fsocket.c;h=d47d5787e2e5b8eba49dcb177c0d8b6549d993f2;hb=b46ae36de451212d253f31112338517753739191;hp=9f5d7930f54d7187835a1565862ffb441772d5f0;hpb=f26f7c480555812ca7c4037e0a50fa54afe2cb4a;p=linux-2.6 diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9f5d7930f5..d47d5787e2 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1,4 +1,4 @@ -/* SCTP kernel reference Implementation +/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. @@ -6,7 +6,7 @@ * Copyright (c) 2001-2002 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * - * This file is part of the SCTP kernel reference Implementation + * This file is part of the SCTP kernel implementation * * These functions interface with the sockets layer to implement the * SCTP Extensions for the Sockets API. @@ -15,13 +15,13 @@ * functions--this file is the functions which populate the struct proto * for SCTP which is the BOTTOM of the sockets interface. * - * The SCTP reference implementation is free software; + * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * - * The SCTP reference implementation is distributed in the hope that it + * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. @@ -174,7 +174,8 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) sizeof(struct sctp_chunk); atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); - sk_charge_skb(sk, chunk->skb); + sk->sk_wmem_queued += chunk->skb->truesize; + sk_mem_charge(sk, chunk->skb->truesize); } /* Verify that this is a valid address. */ @@ -390,7 +391,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) /* Add the address to the bind address list. * Use GFP_ATOMIC since BHs will be disabled. */ - ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC); + ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); /* Copy back into socket for getsockname() use. */ if (!ret) { @@ -585,8 +586,8 @@ static int sctp_send_asconf_add_ip(struct sock *sk, addr = (union sctp_addr *)addr_buf; af = sctp_get_af_specific(addr->v4.sin_family); memcpy(&saveaddr, addr, af->sockaddr_len); - retval = sctp_add_bind_addr(bp, &saveaddr, 0, - GFP_ATOMIC); + retval = sctp_add_bind_addr(bp, &saveaddr, + SCTP_ADDR_NEW, GFP_ATOMIC); addr_buf += af->sockaddr_len; } } @@ -777,7 +778,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk, af = sctp_get_af_specific(laddr->v4.sin_family); list_for_each_entry(saddr, &bp->address_list, list) { if (sctp_cmp_addr_exact(&saddr->a, laddr)) - saddr->use_as_src = 0; + saddr->state = SCTP_ADDR_DEL; } addr_buf += af->sockaddr_len; } @@ -1910,7 +1911,8 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, * rwnd by that amount. If all the data in the skb is read, * rwnd is updated when the event is freed. */ - sctp_assoc_rwnd_increase(event->asoc, copied); + if (!sctp_ulpevent_is_notification(event)) + sctp_assoc_rwnd_increase(event->asoc, copied); goto out; } else if ((event->msg_flags & MSG_NOTIFICATION) || (event->msg_flags & MSG_EOR)) @@ -4313,6 +4315,9 @@ static int sctp_copy_laddrs_old(struct sock *sk, __u16 port, (AF_INET6 == addr->a.sa.sa_family)) continue; memcpy(&temp, &addr->a, sizeof(temp)); + if (!temp.v4.sin_port) + temp.v4.sin_port = htons(port); + sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), &temp); addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; @@ -4345,6 +4350,9 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, (AF_INET6 == addr->a.sa.sa_family)) continue; memcpy(&temp, &addr->a, sizeof(temp)); + if (!temp.v4.sin_port) + temp.v4.sin_port = htons(port); + sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), &temp); addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; @@ -6008,7 +6016,8 @@ static void __sctp_write_space(struct sctp_association *asoc) */ if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) - sock_wake_async(sock, 2, POLL_OUT); + sock_wake_async(sock, + SOCK_WAKE_SPACE, POLL_OUT); } } } @@ -6034,10 +6043,10 @@ static void sctp_wfree(struct sk_buff *skb) atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); /* - * This undoes what is done via sk_charge_skb + * This undoes what is done via sctp_set_owner_w and sk_mem_charge */ sk->sk_wmem_queued -= skb->truesize; - sk->sk_forward_alloc += skb->truesize; + sk_mem_uncharge(sk, skb->truesize); sock_wfree(skb); __sctp_write_space(asoc); @@ -6058,9 +6067,9 @@ void sctp_sock_rfree(struct sk_buff *skb) atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); /* - * Mimic the behavior of sk_stream_rfree + * Mimic the behavior of sock_rfree */ - sk->sk_forward_alloc += event->rmem_len; + sk_mem_uncharge(sk, event->rmem_len); } @@ -6326,7 +6335,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sk_buff *skb, *tmp; struct sctp_ulpevent *event; struct sctp_bind_hashbucket *head; - int flags = 0; /* Migrate socket buffer sizes and all the socket level options to the * new socket. @@ -6356,15 +6364,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, /* Copy the bind_addr list from the original endpoint to the new * endpoint so that we can handle restarts properly */ - if (PF_INET6 == assoc->base.sk->sk_family) - flags = SCTP_ADDR6_ALLOWED; - if (assoc->peer.ipv4_address) - flags |= SCTP_ADDR4_PEERSUPP; - if (assoc->peer.ipv6_address) - flags |= SCTP_ADDR6_PEERSUPP; - sctp_bind_addr_copy(&newsp->ep->base.bind_addr, - &oldsp->ep->base.bind_addr, - SCTP_SCOPE_GLOBAL, GFP_KERNEL, flags); + sctp_bind_addr_dup(&newsp->ep->base.bind_addr, + &oldsp->ep->base.bind_addr, GFP_KERNEL); /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue.