6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
23 /* Each xfrm_state may be linked to two tables:
25 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
26 2. Hash table by daddr to find what SAs exist for given
27 destination/tunnel endpoint. (output)
30 static DEFINE_SPINLOCK(xfrm_state_lock);
32 /* Hash table to find appropriate SA towards given target (endpoint
33 * of tunnel or destination of transport mode) allowed by selector.
35 * Main use is finding SA after policy selected tunnel or transport mode.
36 * Also, it can be used by ah/esp icmp error handler to find offending SA.
38 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
39 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
41 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
42 EXPORT_SYMBOL(km_waitq);
44 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
45 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
47 static struct work_struct xfrm_state_gc_work;
48 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
49 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
51 static int xfrm_state_gc_flush_bundles;
53 static void __xfrm_state_delete(struct xfrm_state *x);
55 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
56 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
58 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
59 static void km_state_expired(struct xfrm_state *x, int hard);
61 static void xfrm_state_gc_destroy(struct xfrm_state *x)
63 if (del_timer(&x->timer))
74 x->type->destructor(x);
75 xfrm_put_type(x->type);
80 static void xfrm_state_gc_task(void *data)
83 struct list_head *entry, *tmp;
84 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
86 if (xfrm_state_gc_flush_bundles) {
87 xfrm_state_gc_flush_bundles = 0;
91 spin_lock_bh(&xfrm_state_gc_lock);
92 list_splice_init(&xfrm_state_gc_list, &gc_list);
93 spin_unlock_bh(&xfrm_state_gc_lock);
95 list_for_each_safe(entry, tmp, &gc_list) {
96 x = list_entry(entry, struct xfrm_state, bydst);
97 xfrm_state_gc_destroy(x);
102 static inline unsigned long make_jiffies(long secs)
104 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
105 return MAX_SCHEDULE_TIMEOUT-1;
110 static void xfrm_timer_handler(unsigned long data)
112 struct xfrm_state *x = (struct xfrm_state*)data;
113 unsigned long now = (unsigned long)xtime.tv_sec;
114 long next = LONG_MAX;
118 if (x->km.state == XFRM_STATE_DEAD)
120 if (x->km.state == XFRM_STATE_EXPIRED)
122 if (x->lft.hard_add_expires_seconds) {
123 long tmo = x->lft.hard_add_expires_seconds +
124 x->curlft.add_time - now;
130 if (x->lft.hard_use_expires_seconds) {
131 long tmo = x->lft.hard_use_expires_seconds +
132 (x->curlft.use_time ? : now) - now;
140 if (x->lft.soft_add_expires_seconds) {
141 long tmo = x->lft.soft_add_expires_seconds +
142 x->curlft.add_time - now;
148 if (x->lft.soft_use_expires_seconds) {
149 long tmo = x->lft.soft_use_expires_seconds +
150 (x->curlft.use_time ? : now) - now;
158 km_state_expired(x, 0);
160 if (next != LONG_MAX &&
161 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
166 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
167 x->km.state = XFRM_STATE_EXPIRED;
173 km_state_expired(x, 1);
174 __xfrm_state_delete(x);
177 spin_unlock(&x->lock);
181 struct xfrm_state *xfrm_state_alloc(void)
183 struct xfrm_state *x;
185 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
188 memset(x, 0, sizeof(struct xfrm_state));
189 atomic_set(&x->refcnt, 1);
190 atomic_set(&x->tunnel_users, 0);
191 INIT_LIST_HEAD(&x->bydst);
192 INIT_LIST_HEAD(&x->byspi);
193 init_timer(&x->timer);
194 x->timer.function = xfrm_timer_handler;
195 x->timer.data = (unsigned long)x;
196 x->curlft.add_time = (unsigned long)xtime.tv_sec;
197 x->lft.soft_byte_limit = XFRM_INF;
198 x->lft.soft_packet_limit = XFRM_INF;
199 x->lft.hard_byte_limit = XFRM_INF;
200 x->lft.hard_packet_limit = XFRM_INF;
201 spin_lock_init(&x->lock);
205 EXPORT_SYMBOL(xfrm_state_alloc);
207 void __xfrm_state_destroy(struct xfrm_state *x)
209 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
211 spin_lock_bh(&xfrm_state_gc_lock);
212 list_add(&x->bydst, &xfrm_state_gc_list);
213 spin_unlock_bh(&xfrm_state_gc_lock);
214 schedule_work(&xfrm_state_gc_work);
216 EXPORT_SYMBOL(__xfrm_state_destroy);
218 static void __xfrm_state_delete(struct xfrm_state *x)
220 if (x->km.state != XFRM_STATE_DEAD) {
221 x->km.state = XFRM_STATE_DEAD;
222 spin_lock(&xfrm_state_lock);
224 atomic_dec(&x->refcnt);
227 atomic_dec(&x->refcnt);
229 spin_unlock(&xfrm_state_lock);
230 if (del_timer(&x->timer))
231 atomic_dec(&x->refcnt);
233 /* The number two in this test is the reference
234 * mentioned in the comment below plus the reference
235 * our caller holds. A larger value means that
236 * there are DSTs attached to this xfrm_state.
238 if (atomic_read(&x->refcnt) > 2) {
239 xfrm_state_gc_flush_bundles = 1;
240 schedule_work(&xfrm_state_gc_work);
243 /* All xfrm_state objects are created by xfrm_state_alloc.
244 * The xfrm_state_alloc call gives a reference, and that
245 * is what we are dropping here.
247 atomic_dec(&x->refcnt);
251 void xfrm_state_delete(struct xfrm_state *x)
253 spin_lock_bh(&x->lock);
254 __xfrm_state_delete(x);
255 spin_unlock_bh(&x->lock);
257 EXPORT_SYMBOL(xfrm_state_delete);
259 void xfrm_state_flush(u8 proto)
262 struct xfrm_state *x;
264 spin_lock_bh(&xfrm_state_lock);
265 for (i = 0; i < XFRM_DST_HSIZE; i++) {
267 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
268 if (!xfrm_state_kern(x) &&
269 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
271 spin_unlock_bh(&xfrm_state_lock);
273 xfrm_state_delete(x);
276 spin_lock_bh(&xfrm_state_lock);
281 spin_unlock_bh(&xfrm_state_lock);
284 EXPORT_SYMBOL(xfrm_state_flush);
287 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
288 struct xfrm_tmpl *tmpl,
289 xfrm_address_t *daddr, xfrm_address_t *saddr,
290 unsigned short family)
292 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
295 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
296 xfrm_state_put_afinfo(afinfo);
301 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
302 struct flowi *fl, struct xfrm_tmpl *tmpl,
303 struct xfrm_policy *pol, int *err,
304 unsigned short family)
306 unsigned h = xfrm_dst_hash(daddr, family);
307 struct xfrm_state *x, *x0;
308 int acquire_in_progress = 0;
310 struct xfrm_state *best = NULL;
311 struct xfrm_state_afinfo *afinfo;
313 afinfo = xfrm_state_get_afinfo(family);
314 if (afinfo == NULL) {
315 *err = -EAFNOSUPPORT;
319 spin_lock_bh(&xfrm_state_lock);
320 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
321 if (x->props.family == family &&
322 x->props.reqid == tmpl->reqid &&
323 xfrm_state_addr_check(x, daddr, saddr, family) &&
324 tmpl->mode == x->props.mode &&
325 tmpl->id.proto == x->id.proto &&
326 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
328 1. There is a valid state with matching selector.
330 2. Valid state with inappropriate selector. Skip.
332 Entering area of "sysdeps".
334 3. If state is not valid, selector is temporary,
335 it selects only session which triggered
336 previous resolution. Key manager will do
337 something to install a state with proper
340 if (x->km.state == XFRM_STATE_VALID) {
341 if (!xfrm_selector_match(&x->sel, fl, family))
344 best->km.dying > x->km.dying ||
345 (best->km.dying == x->km.dying &&
346 best->curlft.add_time < x->curlft.add_time))
348 } else if (x->km.state == XFRM_STATE_ACQ) {
349 acquire_in_progress = 1;
350 } else if (x->km.state == XFRM_STATE_ERROR ||
351 x->km.state == XFRM_STATE_EXPIRED) {
352 if (xfrm_selector_match(&x->sel, fl, family))
359 if (!x && !error && !acquire_in_progress) {
360 x0 = afinfo->state_lookup(&tmpl->id.daddr, tmpl->id.spi, tmpl->id.proto);
366 x = xfrm_state_alloc();
371 /* Initialize temporary selector matching only
372 * to current session. */
373 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
375 if (km_query(x, tmpl, pol) == 0) {
376 x->km.state = XFRM_STATE_ACQ;
377 list_add_tail(&x->bydst, xfrm_state_bydst+h);
380 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
381 list_add(&x->byspi, xfrm_state_byspi+h);
384 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
386 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
387 add_timer(&x->timer);
389 x->km.state = XFRM_STATE_DEAD;
399 *err = acquire_in_progress ? -EAGAIN : error;
400 spin_unlock_bh(&xfrm_state_lock);
401 xfrm_state_put_afinfo(afinfo);
405 static void __xfrm_state_insert(struct xfrm_state *x)
407 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
409 list_add(&x->bydst, xfrm_state_bydst+h);
412 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
414 list_add(&x->byspi, xfrm_state_byspi+h);
417 if (!mod_timer(&x->timer, jiffies + HZ))
423 void xfrm_state_insert(struct xfrm_state *x)
425 spin_lock_bh(&xfrm_state_lock);
426 __xfrm_state_insert(x);
427 spin_unlock_bh(&xfrm_state_lock);
429 EXPORT_SYMBOL(xfrm_state_insert);
431 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
433 int xfrm_state_add(struct xfrm_state *x)
435 struct xfrm_state_afinfo *afinfo;
436 struct xfrm_state *x1;
440 family = x->props.family;
441 afinfo = xfrm_state_get_afinfo(family);
442 if (unlikely(afinfo == NULL))
443 return -EAFNOSUPPORT;
445 spin_lock_bh(&xfrm_state_lock);
447 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
456 x1 = __xfrm_find_acq_byseq(x->km.seq);
457 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
464 x1 = afinfo->find_acq(
465 x->props.mode, x->props.reqid, x->id.proto,
466 &x->id.daddr, &x->props.saddr, 0);
468 __xfrm_state_insert(x);
472 spin_unlock_bh(&xfrm_state_lock);
473 xfrm_state_put_afinfo(afinfo);
476 xfrm_state_delete(x1);
482 EXPORT_SYMBOL(xfrm_state_add);
484 int xfrm_state_update(struct xfrm_state *x)
486 struct xfrm_state_afinfo *afinfo;
487 struct xfrm_state *x1;
490 afinfo = xfrm_state_get_afinfo(x->props.family);
491 if (unlikely(afinfo == NULL))
492 return -EAFNOSUPPORT;
494 spin_lock_bh(&xfrm_state_lock);
495 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
501 if (xfrm_state_kern(x1)) {
507 if (x1->km.state == XFRM_STATE_ACQ) {
508 __xfrm_state_insert(x);
514 spin_unlock_bh(&xfrm_state_lock);
515 xfrm_state_put_afinfo(afinfo);
521 xfrm_state_delete(x1);
527 spin_lock_bh(&x1->lock);
528 if (likely(x1->km.state == XFRM_STATE_VALID)) {
529 if (x->encap && x1->encap)
530 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
531 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
534 if (!mod_timer(&x1->timer, jiffies + HZ))
536 if (x1->curlft.use_time)
537 xfrm_state_check_expire(x1);
541 spin_unlock_bh(&x1->lock);
547 EXPORT_SYMBOL(xfrm_state_update);
549 int xfrm_state_check_expire(struct xfrm_state *x)
551 if (!x->curlft.use_time)
552 x->curlft.use_time = (unsigned long)xtime.tv_sec;
554 if (x->km.state != XFRM_STATE_VALID)
557 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
558 x->curlft.packets >= x->lft.hard_packet_limit) {
559 km_state_expired(x, 1);
560 if (!mod_timer(&x->timer, jiffies + XFRM_ACQ_EXPIRES*HZ))
566 (x->curlft.bytes >= x->lft.soft_byte_limit ||
567 x->curlft.packets >= x->lft.soft_packet_limit))
568 km_state_expired(x, 0);
571 EXPORT_SYMBOL(xfrm_state_check_expire);
573 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
575 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
579 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
581 /* Check tail too... */
585 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
587 int err = xfrm_state_check_expire(x);
590 err = xfrm_state_check_space(x, skb);
594 EXPORT_SYMBOL(xfrm_state_check);
597 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
598 unsigned short family)
600 struct xfrm_state *x;
601 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
605 spin_lock_bh(&xfrm_state_lock);
606 x = afinfo->state_lookup(daddr, spi, proto);
607 spin_unlock_bh(&xfrm_state_lock);
608 xfrm_state_put_afinfo(afinfo);
611 EXPORT_SYMBOL(xfrm_state_lookup);
614 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
615 xfrm_address_t *daddr, xfrm_address_t *saddr,
616 int create, unsigned short family)
618 struct xfrm_state *x;
619 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
623 spin_lock_bh(&xfrm_state_lock);
624 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
625 spin_unlock_bh(&xfrm_state_lock);
626 xfrm_state_put_afinfo(afinfo);
629 EXPORT_SYMBOL(xfrm_find_acq);
631 /* Silly enough, but I'm lazy to build resolution list */
633 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
636 struct xfrm_state *x;
638 for (i = 0; i < XFRM_DST_HSIZE; i++) {
639 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
640 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
649 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
651 struct xfrm_state *x;
653 spin_lock_bh(&xfrm_state_lock);
654 x = __xfrm_find_acq_byseq(seq);
655 spin_unlock_bh(&xfrm_state_lock);
658 EXPORT_SYMBOL(xfrm_find_acq_byseq);
660 u32 xfrm_get_acqseq(void)
664 static DEFINE_SPINLOCK(acqseq_lock);
666 spin_lock_bh(&acqseq_lock);
667 res = (++acqseq ? : ++acqseq);
668 spin_unlock_bh(&acqseq_lock);
671 EXPORT_SYMBOL(xfrm_get_acqseq);
674 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
677 struct xfrm_state *x0;
682 if (minspi == maxspi) {
683 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
691 minspi = ntohl(minspi);
692 maxspi = ntohl(maxspi);
693 for (h=0; h<maxspi-minspi+1; h++) {
694 spi = minspi + net_random()%(maxspi-minspi+1);
695 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
697 x->id.spi = htonl(spi);
704 spin_lock_bh(&xfrm_state_lock);
705 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
706 list_add(&x->byspi, xfrm_state_byspi+h);
708 spin_unlock_bh(&xfrm_state_lock);
712 EXPORT_SYMBOL(xfrm_alloc_spi);
714 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
718 struct xfrm_state *x;
722 spin_lock_bh(&xfrm_state_lock);
723 for (i = 0; i < XFRM_DST_HSIZE; i++) {
724 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
725 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
734 for (i = 0; i < XFRM_DST_HSIZE; i++) {
735 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
736 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
738 err = func(x, --count, data);
744 spin_unlock_bh(&xfrm_state_lock);
747 EXPORT_SYMBOL(xfrm_state_walk);
749 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
755 if (unlikely(seq == 0))
758 if (likely(seq > x->replay.seq))
761 diff = x->replay.seq - seq;
762 if (diff >= x->props.replay_window) {
763 x->stats.replay_window++;
767 if (x->replay.bitmap & (1U << diff)) {
773 EXPORT_SYMBOL(xfrm_replay_check);
775 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
781 if (seq > x->replay.seq) {
782 diff = seq - x->replay.seq;
783 if (diff < x->props.replay_window)
784 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
786 x->replay.bitmap = 1;
789 diff = x->replay.seq - seq;
790 x->replay.bitmap |= (1U << diff);
793 EXPORT_SYMBOL(xfrm_replay_advance);
795 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
796 static DEFINE_RWLOCK(xfrm_km_lock);
798 static void km_state_expired(struct xfrm_state *x, int hard)
803 x->km.state = XFRM_STATE_EXPIRED;
807 read_lock(&xfrm_km_lock);
808 list_for_each_entry(km, &xfrm_km_list, list)
810 read_unlock(&xfrm_km_lock);
816 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
821 read_lock(&xfrm_km_lock);
822 list_for_each_entry(km, &xfrm_km_list, list) {
823 err = km->acquire(x, t, pol, XFRM_POLICY_OUT);
827 read_unlock(&xfrm_km_lock);
831 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
836 read_lock(&xfrm_km_lock);
837 list_for_each_entry(km, &xfrm_km_list, list) {
839 err = km->new_mapping(x, ipaddr, sport);
843 read_unlock(&xfrm_km_lock);
846 EXPORT_SYMBOL(km_new_mapping);
848 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
852 read_lock(&xfrm_km_lock);
853 list_for_each_entry(km, &xfrm_km_list, list)
854 if (km->notify_policy)
855 km->notify_policy(pol, dir, hard);
856 read_unlock(&xfrm_km_lock);
862 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
867 struct xfrm_policy *pol = NULL;
869 if (optlen <= 0 || optlen > PAGE_SIZE)
872 data = kmalloc(optlen, GFP_KERNEL);
877 if (copy_from_user(data, optval, optlen))
881 read_lock(&xfrm_km_lock);
882 list_for_each_entry(km, &xfrm_km_list, list) {
883 pol = km->compile_policy(sk->sk_family, optname, data,
888 read_unlock(&xfrm_km_lock);
891 xfrm_sk_policy_insert(sk, err, pol);
900 EXPORT_SYMBOL(xfrm_user_policy);
902 int xfrm_register_km(struct xfrm_mgr *km)
904 write_lock_bh(&xfrm_km_lock);
905 list_add_tail(&km->list, &xfrm_km_list);
906 write_unlock_bh(&xfrm_km_lock);
909 EXPORT_SYMBOL(xfrm_register_km);
911 int xfrm_unregister_km(struct xfrm_mgr *km)
913 write_lock_bh(&xfrm_km_lock);
915 write_unlock_bh(&xfrm_km_lock);
918 EXPORT_SYMBOL(xfrm_unregister_km);
920 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
923 if (unlikely(afinfo == NULL))
925 if (unlikely(afinfo->family >= NPROTO))
926 return -EAFNOSUPPORT;
927 write_lock(&xfrm_state_afinfo_lock);
928 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
931 afinfo->state_bydst = xfrm_state_bydst;
932 afinfo->state_byspi = xfrm_state_byspi;
933 xfrm_state_afinfo[afinfo->family] = afinfo;
935 write_unlock(&xfrm_state_afinfo_lock);
938 EXPORT_SYMBOL(xfrm_state_register_afinfo);
940 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
943 if (unlikely(afinfo == NULL))
945 if (unlikely(afinfo->family >= NPROTO))
946 return -EAFNOSUPPORT;
947 write_lock(&xfrm_state_afinfo_lock);
948 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
949 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
952 xfrm_state_afinfo[afinfo->family] = NULL;
953 afinfo->state_byspi = NULL;
954 afinfo->state_bydst = NULL;
957 write_unlock(&xfrm_state_afinfo_lock);
960 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
962 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
964 struct xfrm_state_afinfo *afinfo;
965 if (unlikely(family >= NPROTO))
967 read_lock(&xfrm_state_afinfo_lock);
968 afinfo = xfrm_state_afinfo[family];
969 if (likely(afinfo != NULL))
970 read_lock(&afinfo->lock);
971 read_unlock(&xfrm_state_afinfo_lock);
975 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
977 if (unlikely(afinfo == NULL))
979 read_unlock(&afinfo->lock);
982 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
983 void xfrm_state_delete_tunnel(struct xfrm_state *x)
986 struct xfrm_state *t = x->tunnel;
988 if (atomic_read(&t->tunnel_users) == 2)
989 xfrm_state_delete(t);
990 atomic_dec(&t->tunnel_users);
995 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
997 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1001 res -= x->props.header_len;
1009 spin_lock_bh(&x->lock);
1010 if (x->km.state == XFRM_STATE_VALID &&
1011 x->type && x->type->get_max_size)
1012 m = x->type->get_max_size(x, m);
1014 m += x->props.header_len;
1015 spin_unlock_bh(&x->lock);
1025 EXPORT_SYMBOL(xfrm_state_mtu);
1027 void __init xfrm_state_init(void)
1031 for (i=0; i<XFRM_DST_HSIZE; i++) {
1032 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1033 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1035 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);