#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/bug.h>
#include <net/sock.h>
{
struct request_sock *req = queue->rskq_accept_head;
- BUG_TRAP(req != NULL);
+ WARN_ON(req == NULL);
queue->rskq_accept_head = req->dl_next;
if (queue->rskq_accept_head == NULL)
struct request_sock *req = reqsk_queue_remove(queue);
struct sock *child = req->sk;
- BUG_TRAP(child != NULL);
+ WARN_ON(child == NULL);
sk_acceptq_removed(parent);
__reqsk_free(req);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
for (; list; list=list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
struct sk_buff *skb = clist;
clist = clist->next;
- BUG_TRAP(!atomic_read(&skb->users));
+ WARN_ON(atomic_read(&skb->users));
__kfree_skb(skb);
}
}
dev->uninit(dev);
/* Notifier chain MUST detach us from master device. */
- BUG_TRAP(!dev->master);
+ WARN_ON(dev->master);
/* Remove entries from kobject tree */
netdev_unregister_kobject(dev);
/* paranoia */
BUG_ON(atomic_read(&dev->refcnt));
- BUG_TRAP(!dev->ip_ptr);
- BUG_TRAP(!dev->ip6_ptr);
- BUG_TRAP(!dev->dn_ptr);
+ WARN_ON(dev->ip_ptr);
+ WARN_ON(dev->ip6_ptr);
+ WARN_ON(dev->dn_ptr);
if (dev->destructor)
dev->destructor(dev);
}
}
- BUG_TRAP(lopt->qlen == 0);
+ WARN_ON(lopt->qlen != 0);
if (lopt_size > PAGE_SIZE)
vfree(lopt);
else
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + frag->size;
if ((copy = end - offset) > 0) {
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
__wsum csum2;
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
__skb_queue_purge(&sk->sk_error_queue);
/* Next, the write queue. */
- BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));
+ WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
/* Account for returned memory. */
sk_mem_reclaim(sk);
- BUG_TRAP(!sk->sk_wmem_queued);
- BUG_TRAP(!sk->sk_forward_alloc);
+ WARN_ON(sk->sk_wmem_queued);
+ WARN_ON(sk->sk_forward_alloc);
/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket
#include <linux/dmaengine.h>
#include <linux/socket.h>
-#include <linux/rtnetlink.h> /* for BUG_TRAP */
#include <net/tcp.h>
#include <net/netdma.h>
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
copy = end - offset;
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
copy = end - offset;
{
s64 delta = dccp_delta_seqno(s1, s2);
- BUG_TRAP(delta >= 0);
+ WARN_ON(delta < 0);
return (u64)delta <= ndp + 1;
}
/* Stop the REQUEST timer */
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
- BUG_TRAP(sk->sk_send_head != NULL);
+ WARN_ON(sk->sk_send_head == NULL);
__kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
* ICMPs are not backlogged, hence we cannot get an established
* socket here.
*/
- BUG_TRAP(!req->sk);
+ WARN_ON(req->sk);
if (seq != dccp_rsk(req)->dreq_iss) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
* ICMPs are not backlogged, hence we cannot get an established
* socket here.
*/
- BUG_TRAP(req->sk == NULL);
+ WARN_ON(req->sk != NULL);
if (seq != dccp_rsk(req)->dreq_iss) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
inet_csk_delack_init(sk);
__sk_dst_reset(sk);
- BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
+ WARN_ON(inet->num && !icsk->icsk_bind_hash);
sk->sk_error_report(sk);
return err;
*/
local_bh_disable();
bh_lock_sock(sk);
- BUG_TRAP(!sock_owned_by_user(sk));
+ WARN_ON(sock_owned_by_user(sk));
/* Have we already been destroyed by a softirq or backlog? */
if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
* -- Acks in client-PARTOPEN state (sec. 8.1.5)
* -- CloseReq in server-CLOSEREQ state (sec. 8.3)
* -- Close in node-CLOSING state (sec. 8.3) */
- BUG_TRAP(sk->sk_send_head != NULL);
+ WARN_ON(sk->sk_send_head == NULL);
/*
* More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
return;
}
- BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
- BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
- BUG_TRAP(!sk->sk_wmem_queued);
- BUG_TRAP(!sk->sk_forward_alloc);
+ WARN_ON(atomic_read(&sk->sk_rmem_alloc));
+ WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+ WARN_ON(sk->sk_wmem_queued);
+ WARN_ON(sk->sk_forward_alloc);
kfree(inet->opt);
dst_release(sk->sk_dst_cache);
answer_flags = answer->flags;
rcu_read_unlock();
- BUG_TRAP(answer_prot->slab != NULL);
+ WARN_ON(answer_prot->slab == NULL);
err = -ENOBUFS;
sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
lock_sock(sk2);
- BUG_TRAP((1 << sk2->sk_state) &
- (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE));
+ WARN_ON(!((1 << sk2->sk_state) &
+ (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
sock_graft(sk2, newsock);
{
struct net_device *dev = idev->dev;
- BUG_TRAP(!idev->ifa_list);
- BUG_TRAP(!idev->mc_list);
+ WARN_ON(idev->ifa_list);
+ WARN_ON(idev->mc_list);
#ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n",
idev, dev ? dev->name : "NIL");
}
ipv4_devconf_setall(in_dev);
if (ifa->ifa_dev != in_dev) {
- BUG_TRAP(!ifa->ifa_dev);
+ WARN_ON(ifa->ifa_dev);
in_dev_hold(in_dev);
ifa->ifa_dev = in_dev;
}
success:
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, snum);
- BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
+ WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
ret = 0;
fail_unlock:
}
newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
- BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
+ WARN_ON(newsk->sk_state == TCP_SYN_RECV);
out:
release_sock(sk);
return newsk;
ireq->rmt_addr == raddr &&
ireq->loc_addr == laddr &&
AF_INET_FAMILY(req->rsk_ops->family)) {
- BUG_TRAP(!req->sk);
+ WARN_ON(req->sk);
*prevp = prev;
break;
}
*/
void inet_csk_destroy_sock(struct sock *sk)
{
- BUG_TRAP(sk->sk_state == TCP_CLOSE);
- BUG_TRAP(sock_flag(sk, SOCK_DEAD));
+ WARN_ON(sk->sk_state != TCP_CLOSE);
+ WARN_ON(!sock_flag(sk, SOCK_DEAD));
/* It cannot be in hash table! */
- BUG_TRAP(sk_unhashed(sk));
+ WARN_ON(!sk_unhashed(sk));
/* If it has not 0 inet_sk(sk)->num, it must be bound */
- BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash);
+ WARN_ON(inet_sk(sk)->num && !inet_csk(sk)->icsk_bind_hash);
sk->sk_prot->destroy(sk);
local_bh_disable();
bh_lock_sock(child);
- BUG_TRAP(!sock_owned_by_user(child));
+ WARN_ON(sock_owned_by_user(child));
sock_hold(child);
sk->sk_prot->disconnect(child, O_NONBLOCK);
sk_acceptq_removed(sk);
__reqsk_free(req);
}
- BUG_TRAP(!sk->sk_ack_backlog);
+ WARN_ON(sk->sk_ack_backlog);
}
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
struct sk_buff *fp;
struct netns_frags *nf;
- BUG_TRAP(q->last_in & INET_FRAG_COMPLETE);
- BUG_TRAP(del_timer(&q->timer) == 0);
+ WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
+ WARN_ON(del_timer(&q->timer) != 0);
/* Release all fragment data. */
fp = q->fragments;
inet->num = lport;
inet->sport = htons(lport);
sk->sk_hash = hash;
- BUG_TRAP(sk_unhashed(sk));
+ WARN_ON(!sk_unhashed(sk));
__sk_add_node(sk, &head->chain);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock(lock);
rwlock_t *lock;
struct inet_ehash_bucket *head;
- BUG_TRAP(sk_unhashed(sk));
+ WARN_ON(!sk_unhashed(sk));
sk->sk_hash = inet_sk_ehashfn(sk);
head = inet_ehash_bucket(hashinfo, sk->sk_hash);
return;
}
- BUG_TRAP(sk_unhashed(sk));
+ WARN_ON(!sk_unhashed(sk));
list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
lock = &hashinfo->lhash_lock;
*/
inet_bind_bucket_for_each(tb, node, &head->chain) {
if (tb->ib_net == net && tb->port == port) {
- BUG_TRAP(!hlist_empty(&tb->owners));
+ WARN_ON(hlist_empty(&tb->owners));
if (tb->fastreuse >= 0)
goto next_port;
if (!check_established(death_row, sk,
hashinfo->bhash_size)];
spin_lock(&bhead->lock);
tw->tw_tb = icsk->icsk_bind_hash;
- BUG_TRAP(icsk->icsk_bind_hash);
+ WARN_ON(!icsk->icsk_bind_hash);
inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
spin_unlock(&bhead->lock);
qp->q.fragments = head;
}
- BUG_TRAP(head != NULL);
- BUG_TRAP(FRAG_CB(head)->offset == 0);
+ WARN_ON(head == NULL);
+ WARN_ON(FRAG_CB(head)->offset != 0);
/* Allocate a new buffer for the datagram. */
ihlen = ip_hdrlen(head);
__skb_pull(newskb, skb_network_offset(newskb));
newskb->pkt_type = PACKET_LOOPBACK;
newskb->ip_summed = CHECKSUM_UNNECESSARY;
- BUG_TRAP(newskb->dst);
+ WARN_ON(!newskb->dst);
netif_rx(newskb);
return 0;
}
#if TCP_DEBUG
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
- BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
+ WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
#endif
if (inet_csk_ack_scheduled(sk)) {
goto found_ok_skb;
if (tcp_hdr(skb)->fin)
goto found_fin_ok;
- BUG_TRAP(flags & MSG_PEEK);
+ WARN_ON(!(flags & MSG_PEEK));
skb = skb->next;
} while (skb != (struct sk_buff *)&sk->sk_receive_queue);
tp->ucopy.len = len;
- BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
- (flags & (MSG_PEEK | MSG_TRUNC)));
+ WARN_ON(tp->copied_seq != tp->rcv_nxt &&
+ !(flags & (MSG_PEEK | MSG_TRUNC)));
/* Ugly... If prequeue is not empty, we have to
* process it before releasing socket, otherwise
*/
local_bh_disable();
bh_lock_sock(sk);
- BUG_TRAP(!sock_owned_by_user(sk));
+ WARN_ON(sock_owned_by_user(sk));
/* Have we already been destroyed by a softirq or backlog? */
if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
- BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
+ WARN_ON(inet->num && !icsk->icsk_bind_hash);
sk->sk_error_report(sk);
return err;
out:
#if FASTRETRANS_DEBUG > 0
- BUG_TRAP((int)tp->sacked_out >= 0);
- BUG_TRAP((int)tp->lost_out >= 0);
- BUG_TRAP((int)tp->retrans_out >= 0);
- BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0);
+ WARN_ON((int)tp->sacked_out < 0);
+ WARN_ON((int)tp->lost_out < 0);
+ WARN_ON((int)tp->retrans_out < 0);
+ WARN_ON((int)tcp_packets_in_flight(tp) < 0);
#endif
return flag;
}
int err;
unsigned int mss;
- BUG_TRAP(packets <= tp->packets_out);
+ WARN_ON(packets > tp->packets_out);
if (tp->lost_skb_hint) {
skb = tp->lost_skb_hint;
cnt = tp->lost_cnt_hint;
/* E. Check state exit conditions. State can be terminated
* when high_seq is ACKed. */
if (icsk->icsk_ca_state == TCP_CA_Open) {
- BUG_TRAP(tp->retrans_out == 0);
+ WARN_ON(tp->retrans_out != 0);
tp->retrans_stamp = 0;
} else if (!before(tp->snd_una, tp->high_seq)) {
switch (icsk->icsk_ca_state) {
}
#if FASTRETRANS_DEBUG > 0
- BUG_TRAP((int)tp->sacked_out >= 0);
- BUG_TRAP((int)tp->lost_out >= 0);
- BUG_TRAP((int)tp->retrans_out >= 0);
+ WARN_ON((int)tp->sacked_out < 0);
+ WARN_ON((int)tp->lost_out < 0);
+ WARN_ON((int)tp->retrans_out < 0);
if (!tp->packets_out && tcp_is_sack(tp)) {
icsk = inet_csk(sk);
if (tp->lost_out) {
int i;
/* RCV.NXT must cover all the block! */
- BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq));
+ WARN_ON(before(tp->rcv_nxt, sp->end_seq));
/* Zap this SACK, by moving forward any other SACKS. */
for (i=this_sack+1; i < num_sacks; i++)
/* ICMPs are not backlogged, hence we cannot get
an established socket here.
*/
- BUG_TRAP(!req->sk);
+ WARN_ON(req->sk);
if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
if (!tp->packets_out)
goto out;
- BUG_TRAP(!tcp_write_queue_empty(sk));
+ WARN_ON(tcp_write_queue_empty(sk));
if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
!((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
void in6_dev_finish_destroy(struct inet6_dev *idev)
{
struct net_device *dev = idev->dev;
- BUG_TRAP(idev->addr_list==NULL);
- BUG_TRAP(idev->mc_list==NULL);
+
+ WARN_ON(idev->addr_list != NULL);
+ WARN_ON(idev->mc_list != NULL);
+
#ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "in6_dev_finish_destroy: %s\n", dev ? dev->name : "NIL");
#endif
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
{
- BUG_TRAP(ifp->if_next==NULL);
- BUG_TRAP(ifp->lst_next==NULL);
+ WARN_ON(ifp->if_next != NULL);
+ WARN_ON(ifp->lst_next != NULL);
+
#ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
#endif
answer_flags = answer->flags;
rcu_read_unlock();
- BUG_TRAP(answer_prot->slab != NULL);
+ WARN_ON(answer_prot->slab == NULL);
err = -ENOBUFS;
sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot);
ipv6_addr_equal(&treq->rmt_addr, raddr) &&
ipv6_addr_equal(&treq->loc_addr, laddr) &&
(!treq->iif || treq->iif == iif)) {
- BUG_TRAP(req->sk == NULL);
+ WARN_ON(req->sk != NULL);
*prevp = prev;
return req;
}
struct hlist_head *list;
rwlock_t *lock;
- BUG_TRAP(sk_unhashed(sk));
+ WARN_ON(!sk_unhashed(sk));
if (sk->sk_state == TCP_LISTEN) {
list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
* in hash table socket with a funny identity. */
inet->num = lport;
inet->sport = htons(lport);
- BUG_TRAP(sk_unhashed(sk));
+ WARN_ON(!sk_unhashed(sk));
__sk_add_node(sk, &head->chain);
sk->sk_hash = hash;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
w->leaf = rt;
return 1;
}
- BUG_TRAP(res!=0);
+ WARN_ON(res == 0);
}
w->leaf = NULL;
return 0;
pn->leaf = fib6_find_prefix(info->nl_net, pn);
#if RT6_DEBUG >= 2
if (!pn->leaf) {
- BUG_TRAP(pn->leaf != NULL);
+ WARN_ON(pn->leaf == NULL);
pn->leaf = info->nl_net->ipv6.ip6_null_entry;
}
#endif
#ifdef CONFIG_IPV6_SUBTREES
if (src_len) {
- BUG_TRAP(saddr!=NULL);
+ WARN_ON(saddr == NULL);
if (fn && fn->subtree)
fn = fib6_locate_1(fn->subtree, saddr, src_len,
offsetof(struct rt6_info, rt6i_src));
RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
iter++;
- BUG_TRAP(!(fn->fn_flags&RTN_RTINFO));
- BUG_TRAP(!(fn->fn_flags&RTN_TL_ROOT));
- BUG_TRAP(fn->leaf==NULL);
+ WARN_ON(fn->fn_flags & RTN_RTINFO);
+ WARN_ON(fn->fn_flags & RTN_TL_ROOT);
+ WARN_ON(fn->leaf != NULL);
children = 0;
child = NULL;
fn->leaf = fib6_find_prefix(net, fn);
#if RT6_DEBUG >= 2
if (fn->leaf==NULL) {
- BUG_TRAP(fn->leaf);
+ WARN_ON(!fn->leaf);
fn->leaf = net->ipv6.ip6_null_entry;
}
#endif
pn = fn->parent;
#ifdef CONFIG_IPV6_SUBTREES
if (FIB6_SUBTREE(pn) == fn) {
- BUG_TRAP(fn->fn_flags&RTN_ROOT);
+ WARN_ON(!(fn->fn_flags & RTN_ROOT));
FIB6_SUBTREE(pn) = NULL;
nstate = FWS_L;
} else {
- BUG_TRAP(!(fn->fn_flags&RTN_ROOT));
+ WARN_ON(fn->fn_flags & RTN_ROOT);
#endif
if (pn->right == fn) pn->right = child;
else if (pn->left == fn) pn->left = child;
#if RT6_DEBUG >= 2
- else BUG_TRAP(0);
+ else
+ WARN_ON(1);
#endif
if (child)
child->parent = pn;
#if RT6_DEBUG >= 2
if (rt->u.dst.obsolete>0) {
- BUG_TRAP(fn==NULL);
+ WARN_ON(fn != NULL);
return -ENOENT;
}
#endif
if (fn == NULL || rt == net->ipv6.ip6_null_entry)
return -ENOENT;
- BUG_TRAP(fn->fn_flags&RTN_RTINFO);
+ WARN_ON(!(fn->fn_flags & RTN_RTINFO));
if (!(rt->rt6i_flags&RTF_CACHE)) {
struct fib6_node *pn = fn;
w->node = pn;
#ifdef CONFIG_IPV6_SUBTREES
if (FIB6_SUBTREE(pn) == fn) {
- BUG_TRAP(fn->fn_flags&RTN_ROOT);
+ WARN_ON(!(fn->fn_flags & RTN_ROOT));
w->state = FWS_L;
continue;
}
continue;
}
#if RT6_DEBUG >= 2
- BUG_TRAP(0);
+ WARN_ON(1);
#endif
}
}
}
return 0;
}
- BUG_TRAP(res==0);
+ WARN_ON(res != 0);
}
w->leaf = rt;
return 0;
__skb_pull(newskb, skb_network_offset(newskb));
newskb->pkt_type = PACKET_LOOPBACK;
newskb->ip_summed = CHECKSUM_UNNECESSARY;
- BUG_TRAP(newskb->dst);
+ WARN_ON(!newskb->dst);
netif_rx(newskb);
return 0;
calc_padlen(sizeof(*dstopt), 6));
hao->type = IPV6_TLV_HAO;
+ BUILD_BUG_ON(sizeof(*hao) != 18);
hao->length = sizeof(*hao) - 2;
- BUG_TRAP(hao->length == 16);
len = ((char *)hao - (char *)dstopt) + sizeof(*hao);
memcpy(&iph->saddr, x->coaddr, sizeof(iph->saddr));
spin_unlock_bh(&x->lock);
- BUG_TRAP(len == x->props.header_len);
+ WARN_ON(len != x->props.header_len);
dstopt->hdrlen = (x->props.header_len >> 3) - 1;
return 0;
x->props.header_len = sizeof(struct ipv6_destopt_hdr) +
calc_padlen(sizeof(struct ipv6_destopt_hdr), 6) +
sizeof(struct ipv6_destopt_hao);
- BUG_TRAP(x->props.header_len == 24);
+ WARN_ON(x->props.header_len != 24);
return 0;
}
rt2->rt_hdr.segments_left = 1;
memset(&rt2->reserved, 0, sizeof(rt2->reserved));
- BUG_TRAP(rt2->rt_hdr.hdrlen == 2);
+ WARN_ON(rt2->rt_hdr.hdrlen != 2);
memcpy(&rt2->addr, &iph->daddr, sizeof(rt2->addr));
spin_lock_bh(&x->lock);
fq_kill(fq);
- BUG_TRAP(head != NULL);
- BUG_TRAP(NFCT_FRAG6_CB(head)->offset == 0);
+ WARN_ON(head == NULL);
+ WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
/* Unfragmented part is taken from the first segment. */
payload_len = ((head->data - skb_network_header(head)) -
fq->q.fragments = head;
}
- BUG_TRAP(head != NULL);
- BUG_TRAP(FRAG6_CB(head)->offset == 0);
+ WARN_ON(head == NULL);
+ WARN_ON(FRAG6_CB(head)->offset != 0);
/* Unfragmented part is taken from the first segment. */
payload_len = ((head->data - skb_network_header(head)) -
/* ICMPs are not backlogged, hence we cannot get
* an established socket here.
*/
- BUG_TRAP(req->sk == NULL);
+ WARN_ON(req->sk != NULL);
if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
return;
}
- BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
- BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
+ WARN_ON(atomic_read(&sk->sk_rmem_alloc));
+ WARN_ON(atomic_read(&sk->sk_wmem_alloc));
atomic_dec(&pfkey_socks_nr);
}
printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
return;
}
- BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
- BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
- BUG_TRAP(!nlk_sk(sk)->groups);
+
+ WARN_ON(atomic_read(&sk->sk_rmem_alloc));
+ WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+ WARN_ON(nlk_sk(sk)->groups);
}
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
static void packet_sock_destruct(struct sock *sk)
{
- BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
- BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
+ WARN_ON(atomic_read(&sk->sk_rmem_alloc));
+ WARN_ON(atomic_read(&sk->sk_wmem_alloc));
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive packet socket: %p\n", sk);
rxrpc_purge_queue(&sk->sk_receive_queue);
- BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
- BUG_TRAP(sk_unhashed(sk));
- BUG_TRAP(!sk->sk_socket);
+ WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+ WARN_ON(!sk_unhashed(sk));
+ WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive rxrpc socket: %p\n", sk);
return;
}
}
- BUG_TRAP(0);
+ WARN_ON(1);
}
EXPORT_SYMBOL(tcf_hash_destroy);
return;
}
}
- BUG_TRAP(0);
+ WARN_ON(1);
}
static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
}
}
}
- BUG_TRAP(0);
+ WARN_ON(1);
return 0;
}
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode **hn;
- BUG_TRAP(!ht->refcnt);
+ WARN_ON(ht->refcnt);
u32_clear_hnode(tp, ht);
}
}
- BUG_TRAP(0);
+ WARN_ON(1);
return -ENOENT;
}
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
- BUG_TRAP(root_ht != NULL);
+ WARN_ON(root_ht == NULL);
if (root_ht && --root_ht->refcnt == 0)
u32_destroy_hnode(tp, root_ht);
while ((ht = tp_c->hlist) != NULL) {
tp_c->hlist = ht->next;
- BUG_TRAP(ht->refcnt == 0);
+ WARN_ON(ht->refcnt != 0);
kfree(ht);
}
this->tparent->children = NULL;
}
} else {
- BUG_TRAP(this->sibling == this);
+ WARN_ON(this->sibling != this);
}
}
{
struct cbq_sched_data *q = qdisc_priv(sch);
- BUG_TRAP(!cl->filters);
+ WARN_ON(cl->filters);
tcf_destroy_chain(&cl->filter_list);
qdisc_destroy(cl->q);
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
- BUG_TRAP(!timer_pending(&dev->watchdog_timer));
+ WARN_ON(timer_pending(&dev->watchdog_timer));
}
*/
static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
- BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
+ WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
if (!cl->prio_activity) {
cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
*/
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
{
- BUG_TRAP(cl->prio_activity);
+ WARN_ON(!cl->prio_activity);
htb_deactivate_prios(q, cl);
cl->prio_activity = 0;
u32 *pid;
} stk[TC_HTB_MAXDEPTH], *sp = stk;
- BUG_TRAP(tree->rb_node);
+ WARN_ON(!tree->rb_node);
sp->root = tree->rb_node;
sp->pptr = pptr;
sp->pid = pid;
*sp->pptr = (*sp->pptr)->rb_left;
if (sp > stk) {
sp--;
- BUG_TRAP(*sp->pptr);
+ WARN_ON(!*sp->pptr);
if (!*sp->pptr)
return NULL;
htb_next_rb_node(sp->pptr);
sp->pid = cl->un.inner.last_ptr_id + prio;
}
}
- BUG_TRAP(0);
+ WARN_ON(1);
return NULL;
}
do {
next:
- BUG_TRAP(cl);
+ WARN_ON(!cl);
if (!cl)
return NULL;
{
struct htb_class *parent = cl->parent;
- BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity);
+ WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
if (parent->cmode != HTB_CAN_SEND)
htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
{
if (!cl->level) {
- BUG_TRAP(cl->un.leaf.q);
+ WARN_ON(!cl->un.leaf.q);
qdisc_destroy(cl->un.leaf.q);
}
gen_kill_estimator(&cl->bstats, &cl->rate_est);
spin_unlock_bh(&sctp_assocs_id_lock);
}
- BUG_TRAP(!atomic_read(&asoc->rmem_alloc));
+ WARN_ON(atomic_read(&asoc->rmem_alloc));
if (asoc->base.malloced) {
kfree(asoc);
static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
{
- BUG_TRAP(sk_unhashed(sk));
+ WARN_ON(!sk_unhashed(sk));
sk_add_node(sk, list);
}
skb_queue_purge(&sk->sk_receive_queue);
- BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
- BUG_TRAP(sk_unhashed(sk));
- BUG_TRAP(!sk->sk_socket);
+ WARN_ON(atomic_read(&sk->sk_wmem_alloc));
+ WARN_ON(!sk_unhashed(sk));
+ WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
printk("Attempt to release alive unix socket: %p\n", sk);
return;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
for (; list; list = list->next) {
int end;
- BUG_TRAP(start <= offset + len);
+ WARN_ON(start > offset + len);
end = start + list->len;
if ((copy = end - offset) > 0) {
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
-#include <linux/rtnetlink.h>
#include <linux/smp.h>
#include <linux/vmalloc.h>
#include <net/ip.h>
break;
}
- BUG_TRAP(pos);
+ WARN_ON(!pos);
if (--pos->users)
return;
void __xfrm_state_destroy(struct xfrm_state *x)
{
- BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
+ WARN_ON(x->km.state != XFRM_STATE_DEAD);
spin_lock_bh(&xfrm_state_lock);
list_del(&x->all);