X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=net%2Fiucv%2Faf_iucv.c;h=aef6645803559c0e0695574a731dad158fa8a091;hb=907135aaa0cc120a347222c8f274ecc5ca0db641;hp=d9e9ddb8eac5e68a22365e7e1cc1519a8649df04;hpb=ba7cc09c9c9e29a57045dc5bbf843ac1cfad3283;p=linux-2.6 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index d9e9ddb8ea..aef6645803 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -41,6 +41,9 @@ static struct proto iucv_proto = { .obj_size = sizeof(struct iucv_sock), }; +static void iucv_sock_kill(struct sock *sk); +static void iucv_sock_close(struct sock *sk); + /* Call Back functions */ static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); @@ -213,13 +216,16 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct sock *sk; - sk = sk_alloc(PF_IUCV, prio, &iucv_proto, 1); + sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); + spin_lock_init(&iucv_sk(sk)->accept_q_lock); skb_queue_head_init(&iucv_sk(sk)->send_skb_q); + INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); + spin_lock_init(&iucv_sk(sk)->message_q.lock); skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); iucv_sk(sk)->send_tag = 0; @@ -239,7 +245,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) } /* Create an IUCV socket */ -static int iucv_sock_create(struct socket *sock, int protocol) +static int iucv_sock_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; @@ -274,15 +280,25 @@ void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) void iucv_accept_enqueue(struct sock *parent, struct sock *sk) { + unsigned long flags; + struct iucv_sock *par = iucv_sk(parent); + sock_hold(sk); - list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q); + spin_lock_irqsave(&par->accept_q_lock, flags); + list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); + spin_unlock_irqrestore(&par->accept_q_lock, flags); iucv_sk(sk)->parent = parent; parent->sk_ack_backlog++; } void iucv_accept_unlink(struct sock *sk) { + unsigned long flags; + struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); + + spin_lock_irqsave(&par->accept_q_lock, flags); list_del_init(&iucv_sk(sk)->accept_q); + spin_unlock_irqrestore(&par->accept_q_lock, flags); iucv_sk(sk)->parent->sk_ack_backlog--; iucv_sk(sk)->parent = NULL; sock_put(sk); @@ -298,8 +314,8 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) lock_sock(sk); if (sk->sk_state == IUCV_CLOSED) { - release_sock(sk); iucv_accept_unlink(sk); + release_sock(sk); continue; } @@ -659,6 +675,90 @@ out: return err; } +static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) +{ + int dataleft, size, copied = 0; + struct sk_buff *nskb; + + dataleft = len; + while (dataleft) { + if (dataleft >= sk->sk_rcvbuf / 4) + size = sk->sk_rcvbuf / 4; + else + size = dataleft; + + nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); + if (!nskb) + return -ENOMEM; + + memcpy(nskb->data, skb->data + copied, size); + copied += size; + dataleft -= size; + + skb_reset_transport_header(nskb); + skb_reset_network_header(nskb); + nskb->len = size; + + skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); + } + + return 0; +} + +static void iucv_process_message(struct sock *sk, struct sk_buff *skb, + struct iucv_path *path, + struct iucv_message *msg) +{ + int rc; + + if (msg->flags & IPRMDATA) { + skb->data = NULL; + skb->len = 0; + } else { + rc = iucv_message_receive(path, msg, 0, skb->data, + msg->length, NULL); + if (rc) { + kfree_skb(skb); + return; + } + if (skb->truesize >= sk->sk_rcvbuf / 4) { + rc = iucv_fragment_skb(sk, skb, msg->length); + kfree_skb(skb); + skb = NULL; + if (rc) { + iucv_path_sever(path, NULL); + return; + } + skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); + } else { + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + skb->len = msg->length; + } + } + + if (sock_queue_rcv_skb(sk, skb)) + skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); +} + +static void iucv_process_message_q(struct sock *sk) +{ + struct iucv_sock *iucv = iucv_sk(sk); + struct sk_buff *skb; + struct sock_msg_q *p, *n; + + list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { + skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA); + if (!skb) + break; + iucv_process_message(sk, skb, p->path, &p->msg); + list_del(&p->list); + kfree(p); + if (!skb_queue_empty(&iucv->backlog_skb_q)) + break; + } +} + static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { @@ -670,8 +770,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, int err = 0; if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && - skb_queue_empty(&iucv->backlog_skb_q) && - skb_queue_empty(&sk->sk_receive_queue)) + skb_queue_empty(&iucv->backlog_skb_q) && + skb_queue_empty(&sk->sk_receive_queue) && + list_empty(&iucv->message_q.list)) return 0; if (flags & (MSG_OOB)) @@ -710,16 +811,23 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, kfree_skb(skb); /* Queue backlog skbs */ - rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); + rskb = skb_dequeue(&iucv->backlog_skb_q); while (rskb) { if (sock_queue_rcv_skb(sk, rskb)) { - skb_queue_head(&iucv_sk(sk)->backlog_skb_q, + skb_queue_head(&iucv->backlog_skb_q, rskb); break; } else { - rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); + rskb = skb_dequeue(&iucv->backlog_skb_q); } } + if (skb_queue_empty(&iucv->backlog_skb_q)) { + spin_lock_bh(&iucv->message_q.lock); + if (!list_empty(&iucv->message_q.list)) + iucv_process_message_q(sk); + spin_unlock_bh(&iucv->message_q.lock); + } + } else skb_queue_head(&sk->sk_receive_queue, skb); @@ -879,6 +987,7 @@ static int iucv_callback_connreq(struct iucv_path *path, /* Find out if this path belongs to af_iucv. */ read_lock(&iucv_sk_list.lock); iucv = NULL; + sk = NULL; sk_for_each(sk, node, &iucv_sk_list.head) if (sk->sk_state == IUCV_LISTEN && !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { @@ -960,99 +1069,44 @@ static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) sk->sk_state_change(sk); } -static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len, - struct sk_buff_head *fragmented_skb_q) -{ - int dataleft, size, copied = 0; - struct sk_buff *nskb; - - dataleft = len; - while (dataleft) { - if (dataleft >= sk->sk_rcvbuf / 4) - size = sk->sk_rcvbuf / 4; - else - size = dataleft; - - nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); - if (!nskb) - return -ENOMEM; - - memcpy(nskb->data, skb->data + copied, size); - copied += size; - dataleft -= size; - - skb_reset_transport_header(nskb); - skb_reset_network_header(nskb); - nskb->len = size; - - skb_queue_tail(fragmented_skb_q, nskb); - } - - return 0; -} - static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) { struct sock *sk = path->private; struct iucv_sock *iucv = iucv_sk(sk); - struct sk_buff *skb, *fskb; - struct sk_buff_head fragmented_skb_q; - int rc; - - skb_queue_head_init(&fragmented_skb_q); + struct sk_buff *skb; + struct sock_msg_q *save_msg; + int len; if (sk->sk_shutdown & RCV_SHUTDOWN) return; + if (!list_empty(&iucv->message_q.list) || + !skb_queue_empty(&iucv->backlog_skb_q)) + goto save_message; + + len = atomic_read(&sk->sk_rmem_alloc); + len += msg->length + sizeof(struct sk_buff); + if (len > sk->sk_rcvbuf) + goto save_message; + skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); - if (!skb) { - iucv_path_sever(path, NULL); - return; - } + if (!skb) + goto save_message; - if (msg->flags & IPRMDATA) { - skb->data = NULL; - skb->len = 0; - } else { - rc = iucv_message_receive(path, msg, 0, skb->data, - msg->length, NULL); - if (rc) { - kfree_skb(skb); - return; - } - if (skb->truesize >= sk->sk_rcvbuf / 4) { - rc = iucv_fragment_skb(sk, skb, msg->length, - &fragmented_skb_q); - kfree_skb(skb); - skb = NULL; - if (rc) { - iucv_path_sever(path, NULL); - return; - } - } else { - skb_reset_transport_header(skb); - skb_reset_network_header(skb); - skb->len = msg->length; - } - } - /* Queue the fragmented skb */ - fskb = skb_dequeue(&fragmented_skb_q); - while (fskb) { - if (!skb_queue_empty(&iucv->backlog_skb_q)) - skb_queue_tail(&iucv->backlog_skb_q, fskb); - else if (sock_queue_rcv_skb(sk, fskb)) - skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb); - fskb = skb_dequeue(&fragmented_skb_q); - } + spin_lock(&iucv->message_q.lock); + iucv_process_message(sk, skb, path, msg); + spin_unlock(&iucv->message_q.lock); - /* Queue the original skb if it exists (was not fragmented) */ - if (skb) { - if (!skb_queue_empty(&iucv->backlog_skb_q)) - skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); - else if (sock_queue_rcv_skb(sk, skb)) - skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); - } + return; + +save_message: + save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); + save_msg->path = path; + save_msg->msg = *msg; + spin_lock(&iucv->message_q.lock); + list_add_tail(&save_msg->list, &iucv->message_q.list); + spin_unlock(&iucv->message_q.lock); } static void iucv_callback_txdone(struct iucv_path *path,