6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
23 /* Each xfrm_state may be linked to two tables:
25 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
26 2. Hash table by daddr to find what SAs exist for given
27 destination/tunnel endpoint. (output)
30 static DEFINE_SPINLOCK(xfrm_state_lock);
32 /* Hash table to find appropriate SA towards given target (endpoint
33 * of tunnel or destination of transport mode) allowed by selector.
35 * Main use is finding SA after policy selected tunnel or transport mode.
36 * Also, it can be used by ah/esp icmp error handler to find offending SA.
38 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
39 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
41 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
42 EXPORT_SYMBOL(km_waitq);
44 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
45 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
47 static struct work_struct xfrm_state_gc_work;
48 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
49 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
51 static int xfrm_state_gc_flush_bundles;
53 static int __xfrm_state_delete(struct xfrm_state *x);
55 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
56 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
58 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
59 static void km_state_expired(struct xfrm_state *x, int hard);
61 static void xfrm_state_gc_destroy(struct xfrm_state *x)
63 if (del_timer(&x->timer))
70 x->type->destructor(x);
71 xfrm_put_type(x->type);
76 static void xfrm_state_gc_task(void *data)
79 struct list_head *entry, *tmp;
80 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
82 if (xfrm_state_gc_flush_bundles) {
83 xfrm_state_gc_flush_bundles = 0;
87 spin_lock_bh(&xfrm_state_gc_lock);
88 list_splice_init(&xfrm_state_gc_list, &gc_list);
89 spin_unlock_bh(&xfrm_state_gc_lock);
91 list_for_each_safe(entry, tmp, &gc_list) {
92 x = list_entry(entry, struct xfrm_state, bydst);
93 xfrm_state_gc_destroy(x);
98 static inline unsigned long make_jiffies(long secs)
100 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
101 return MAX_SCHEDULE_TIMEOUT-1;
106 static void xfrm_timer_handler(unsigned long data)
108 struct xfrm_state *x = (struct xfrm_state*)data;
109 unsigned long now = (unsigned long)xtime.tv_sec;
110 long next = LONG_MAX;
114 if (x->km.state == XFRM_STATE_DEAD)
116 if (x->km.state == XFRM_STATE_EXPIRED)
118 if (x->lft.hard_add_expires_seconds) {
119 long tmo = x->lft.hard_add_expires_seconds +
120 x->curlft.add_time - now;
126 if (x->lft.hard_use_expires_seconds) {
127 long tmo = x->lft.hard_use_expires_seconds +
128 (x->curlft.use_time ? : now) - now;
136 if (x->lft.soft_add_expires_seconds) {
137 long tmo = x->lft.soft_add_expires_seconds +
138 x->curlft.add_time - now;
144 if (x->lft.soft_use_expires_seconds) {
145 long tmo = x->lft.soft_use_expires_seconds +
146 (x->curlft.use_time ? : now) - now;
155 km_state_expired(x, 0);
157 if (next != LONG_MAX &&
158 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
163 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
164 x->km.state = XFRM_STATE_EXPIRED;
169 if (!__xfrm_state_delete(x) && x->id.spi)
170 km_state_expired(x, 1);
173 spin_unlock(&x->lock);
177 struct xfrm_state *xfrm_state_alloc(void)
179 struct xfrm_state *x;
181 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
184 memset(x, 0, sizeof(struct xfrm_state));
185 atomic_set(&x->refcnt, 1);
186 atomic_set(&x->tunnel_users, 0);
187 INIT_LIST_HEAD(&x->bydst);
188 INIT_LIST_HEAD(&x->byspi);
189 init_timer(&x->timer);
190 x->timer.function = xfrm_timer_handler;
191 x->timer.data = (unsigned long)x;
192 x->curlft.add_time = (unsigned long)xtime.tv_sec;
193 x->lft.soft_byte_limit = XFRM_INF;
194 x->lft.soft_packet_limit = XFRM_INF;
195 x->lft.hard_byte_limit = XFRM_INF;
196 x->lft.hard_packet_limit = XFRM_INF;
197 spin_lock_init(&x->lock);
201 EXPORT_SYMBOL(xfrm_state_alloc);
203 void __xfrm_state_destroy(struct xfrm_state *x)
205 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
207 spin_lock_bh(&xfrm_state_gc_lock);
208 list_add(&x->bydst, &xfrm_state_gc_list);
209 spin_unlock_bh(&xfrm_state_gc_lock);
210 schedule_work(&xfrm_state_gc_work);
212 EXPORT_SYMBOL(__xfrm_state_destroy);
214 static int __xfrm_state_delete(struct xfrm_state *x)
218 if (x->km.state != XFRM_STATE_DEAD) {
219 x->km.state = XFRM_STATE_DEAD;
220 spin_lock(&xfrm_state_lock);
222 atomic_dec(&x->refcnt);
225 atomic_dec(&x->refcnt);
227 spin_unlock(&xfrm_state_lock);
228 if (del_timer(&x->timer))
229 atomic_dec(&x->refcnt);
231 /* The number two in this test is the reference
232 * mentioned in the comment below plus the reference
233 * our caller holds. A larger value means that
234 * there are DSTs attached to this xfrm_state.
236 if (atomic_read(&x->refcnt) > 2) {
237 xfrm_state_gc_flush_bundles = 1;
238 schedule_work(&xfrm_state_gc_work);
241 /* All xfrm_state objects are created by xfrm_state_alloc.
242 * The xfrm_state_alloc call gives a reference, and that
243 * is what we are dropping here.
245 atomic_dec(&x->refcnt);
252 int xfrm_state_delete(struct xfrm_state *x)
256 spin_lock_bh(&x->lock);
257 err = __xfrm_state_delete(x);
258 spin_unlock_bh(&x->lock);
262 EXPORT_SYMBOL(xfrm_state_delete);
264 void xfrm_state_flush(u8 proto)
267 struct xfrm_state *x;
269 spin_lock_bh(&xfrm_state_lock);
270 for (i = 0; i < XFRM_DST_HSIZE; i++) {
272 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
273 if (!xfrm_state_kern(x) &&
274 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
276 spin_unlock_bh(&xfrm_state_lock);
278 xfrm_state_delete(x);
281 spin_lock_bh(&xfrm_state_lock);
286 spin_unlock_bh(&xfrm_state_lock);
289 EXPORT_SYMBOL(xfrm_state_flush);
292 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
293 struct xfrm_tmpl *tmpl,
294 xfrm_address_t *daddr, xfrm_address_t *saddr,
295 unsigned short family)
297 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
300 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
301 xfrm_state_put_afinfo(afinfo);
306 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
307 struct flowi *fl, struct xfrm_tmpl *tmpl,
308 struct xfrm_policy *pol, int *err,
309 unsigned short family)
311 unsigned h = xfrm_dst_hash(daddr, family);
312 struct xfrm_state *x, *x0;
313 int acquire_in_progress = 0;
315 struct xfrm_state *best = NULL;
316 struct xfrm_state_afinfo *afinfo;
318 afinfo = xfrm_state_get_afinfo(family);
319 if (afinfo == NULL) {
320 *err = -EAFNOSUPPORT;
324 spin_lock_bh(&xfrm_state_lock);
325 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
326 if (x->props.family == family &&
327 x->props.reqid == tmpl->reqid &&
328 xfrm_state_addr_check(x, daddr, saddr, family) &&
329 tmpl->mode == x->props.mode &&
330 tmpl->id.proto == x->id.proto &&
331 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
333 1. There is a valid state with matching selector.
335 2. Valid state with inappropriate selector. Skip.
337 Entering area of "sysdeps".
339 3. If state is not valid, selector is temporary,
340 it selects only session which triggered
341 previous resolution. Key manager will do
342 something to install a state with proper
345 if (x->km.state == XFRM_STATE_VALID) {
346 if (!xfrm_selector_match(&x->sel, fl, family))
349 best->km.dying > x->km.dying ||
350 (best->km.dying == x->km.dying &&
351 best->curlft.add_time < x->curlft.add_time))
353 } else if (x->km.state == XFRM_STATE_ACQ) {
354 acquire_in_progress = 1;
355 } else if (x->km.state == XFRM_STATE_ERROR ||
356 x->km.state == XFRM_STATE_EXPIRED) {
357 if (xfrm_selector_match(&x->sel, fl, family))
364 if (!x && !error && !acquire_in_progress) {
366 (x0 = afinfo->state_lookup(daddr, tmpl->id.spi,
367 tmpl->id.proto)) != NULL) {
372 x = xfrm_state_alloc();
377 /* Initialize temporary selector matching only
378 * to current session. */
379 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
381 if (km_query(x, tmpl, pol) == 0) {
382 x->km.state = XFRM_STATE_ACQ;
383 list_add_tail(&x->bydst, xfrm_state_bydst+h);
386 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
387 list_add(&x->byspi, xfrm_state_byspi+h);
390 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
392 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
393 add_timer(&x->timer);
395 x->km.state = XFRM_STATE_DEAD;
405 *err = acquire_in_progress ? -EAGAIN : error;
406 spin_unlock_bh(&xfrm_state_lock);
407 xfrm_state_put_afinfo(afinfo);
411 static void __xfrm_state_insert(struct xfrm_state *x)
413 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
415 list_add(&x->bydst, xfrm_state_bydst+h);
418 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
420 list_add(&x->byspi, xfrm_state_byspi+h);
423 if (!mod_timer(&x->timer, jiffies + HZ))
429 void xfrm_state_insert(struct xfrm_state *x)
431 spin_lock_bh(&xfrm_state_lock);
432 __xfrm_state_insert(x);
433 spin_unlock_bh(&xfrm_state_lock);
435 EXPORT_SYMBOL(xfrm_state_insert);
437 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
439 int xfrm_state_add(struct xfrm_state *x)
441 struct xfrm_state_afinfo *afinfo;
442 struct xfrm_state *x1;
446 family = x->props.family;
447 afinfo = xfrm_state_get_afinfo(family);
448 if (unlikely(afinfo == NULL))
449 return -EAFNOSUPPORT;
451 spin_lock_bh(&xfrm_state_lock);
453 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
462 x1 = __xfrm_find_acq_byseq(x->km.seq);
463 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
470 x1 = afinfo->find_acq(
471 x->props.mode, x->props.reqid, x->id.proto,
472 &x->id.daddr, &x->props.saddr, 0);
474 __xfrm_state_insert(x);
478 spin_unlock_bh(&xfrm_state_lock);
479 xfrm_state_put_afinfo(afinfo);
482 xfrm_state_delete(x1);
488 EXPORT_SYMBOL(xfrm_state_add);
490 int xfrm_state_update(struct xfrm_state *x)
492 struct xfrm_state_afinfo *afinfo;
493 struct xfrm_state *x1;
496 afinfo = xfrm_state_get_afinfo(x->props.family);
497 if (unlikely(afinfo == NULL))
498 return -EAFNOSUPPORT;
500 spin_lock_bh(&xfrm_state_lock);
501 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
507 if (xfrm_state_kern(x1)) {
513 if (x1->km.state == XFRM_STATE_ACQ) {
514 __xfrm_state_insert(x);
520 spin_unlock_bh(&xfrm_state_lock);
521 xfrm_state_put_afinfo(afinfo);
527 xfrm_state_delete(x1);
533 spin_lock_bh(&x1->lock);
534 if (likely(x1->km.state == XFRM_STATE_VALID)) {
535 if (x->encap && x1->encap)
536 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
537 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
540 if (!mod_timer(&x1->timer, jiffies + HZ))
542 if (x1->curlft.use_time)
543 xfrm_state_check_expire(x1);
547 spin_unlock_bh(&x1->lock);
553 EXPORT_SYMBOL(xfrm_state_update);
555 int xfrm_state_check_expire(struct xfrm_state *x)
557 if (!x->curlft.use_time)
558 x->curlft.use_time = (unsigned long)xtime.tv_sec;
560 if (x->km.state != XFRM_STATE_VALID)
563 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
564 x->curlft.packets >= x->lft.hard_packet_limit) {
565 x->km.state = XFRM_STATE_EXPIRED;
566 if (!mod_timer(&x->timer, jiffies))
572 (x->curlft.bytes >= x->lft.soft_byte_limit ||
573 x->curlft.packets >= x->lft.soft_packet_limit)) {
575 km_state_expired(x, 0);
579 EXPORT_SYMBOL(xfrm_state_check_expire);
581 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
583 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
587 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
589 /* Check tail too... */
593 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
595 int err = xfrm_state_check_expire(x);
598 err = xfrm_state_check_space(x, skb);
602 EXPORT_SYMBOL(xfrm_state_check);
605 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
606 unsigned short family)
608 struct xfrm_state *x;
609 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
613 spin_lock_bh(&xfrm_state_lock);
614 x = afinfo->state_lookup(daddr, spi, proto);
615 spin_unlock_bh(&xfrm_state_lock);
616 xfrm_state_put_afinfo(afinfo);
619 EXPORT_SYMBOL(xfrm_state_lookup);
622 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
623 xfrm_address_t *daddr, xfrm_address_t *saddr,
624 int create, unsigned short family)
626 struct xfrm_state *x;
627 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
631 spin_lock_bh(&xfrm_state_lock);
632 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
633 spin_unlock_bh(&xfrm_state_lock);
634 xfrm_state_put_afinfo(afinfo);
637 EXPORT_SYMBOL(xfrm_find_acq);
639 /* Silly enough, but I'm lazy to build resolution list */
641 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
644 struct xfrm_state *x;
646 for (i = 0; i < XFRM_DST_HSIZE; i++) {
647 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
648 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
657 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
659 struct xfrm_state *x;
661 spin_lock_bh(&xfrm_state_lock);
662 x = __xfrm_find_acq_byseq(seq);
663 spin_unlock_bh(&xfrm_state_lock);
666 EXPORT_SYMBOL(xfrm_find_acq_byseq);
668 u32 xfrm_get_acqseq(void)
672 static DEFINE_SPINLOCK(acqseq_lock);
674 spin_lock_bh(&acqseq_lock);
675 res = (++acqseq ? : ++acqseq);
676 spin_unlock_bh(&acqseq_lock);
679 EXPORT_SYMBOL(xfrm_get_acqseq);
682 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
685 struct xfrm_state *x0;
690 if (minspi == maxspi) {
691 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
699 minspi = ntohl(minspi);
700 maxspi = ntohl(maxspi);
701 for (h=0; h<maxspi-minspi+1; h++) {
702 spi = minspi + net_random()%(maxspi-minspi+1);
703 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
705 x->id.spi = htonl(spi);
712 spin_lock_bh(&xfrm_state_lock);
713 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
714 list_add(&x->byspi, xfrm_state_byspi+h);
716 spin_unlock_bh(&xfrm_state_lock);
720 EXPORT_SYMBOL(xfrm_alloc_spi);
722 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
726 struct xfrm_state *x;
730 spin_lock_bh(&xfrm_state_lock);
731 for (i = 0; i < XFRM_DST_HSIZE; i++) {
732 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
733 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
742 for (i = 0; i < XFRM_DST_HSIZE; i++) {
743 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
744 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
746 err = func(x, --count, data);
752 spin_unlock_bh(&xfrm_state_lock);
755 EXPORT_SYMBOL(xfrm_state_walk);
757 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
763 if (unlikely(seq == 0))
766 if (likely(seq > x->replay.seq))
769 diff = x->replay.seq - seq;
770 if (diff >= x->props.replay_window) {
771 x->stats.replay_window++;
775 if (x->replay.bitmap & (1U << diff)) {
781 EXPORT_SYMBOL(xfrm_replay_check);
783 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
789 if (seq > x->replay.seq) {
790 diff = seq - x->replay.seq;
791 if (diff < x->props.replay_window)
792 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
794 x->replay.bitmap = 1;
797 diff = x->replay.seq - seq;
798 x->replay.bitmap |= (1U << diff);
801 EXPORT_SYMBOL(xfrm_replay_advance);
803 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
804 static DEFINE_RWLOCK(xfrm_km_lock);
806 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
810 read_lock(&xfrm_km_lock);
811 list_for_each_entry(km, &xfrm_km_list, list)
812 if (km->notify_policy)
813 km->notify_policy(xp, dir, c);
814 read_unlock(&xfrm_km_lock);
817 void km_state_notify(struct xfrm_state *x, struct km_event *c)
820 read_lock(&xfrm_km_lock);
821 list_for_each_entry(km, &xfrm_km_list, list)
824 read_unlock(&xfrm_km_lock);
827 EXPORT_SYMBOL(km_policy_notify);
828 EXPORT_SYMBOL(km_state_notify);
830 static void km_state_expired(struct xfrm_state *x, int hard)
835 c.event = XFRM_MSG_EXPIRE;
836 km_state_notify(x, &c);
843 * We send to all registered managers regardless of failure
844 * We are happy with one success
846 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
848 int err = -EINVAL, acqret;
851 read_lock(&xfrm_km_lock);
852 list_for_each_entry(km, &xfrm_km_list, list) {
853 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
857 read_unlock(&xfrm_km_lock);
861 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
866 read_lock(&xfrm_km_lock);
867 list_for_each_entry(km, &xfrm_km_list, list) {
869 err = km->new_mapping(x, ipaddr, sport);
873 read_unlock(&xfrm_km_lock);
876 EXPORT_SYMBOL(km_new_mapping);
878 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
883 c.event = XFRM_MSG_POLEXPIRE;
884 km_policy_notify(pol, dir, &c);
890 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
895 struct xfrm_policy *pol = NULL;
897 if (optlen <= 0 || optlen > PAGE_SIZE)
900 data = kmalloc(optlen, GFP_KERNEL);
905 if (copy_from_user(data, optval, optlen))
909 read_lock(&xfrm_km_lock);
910 list_for_each_entry(km, &xfrm_km_list, list) {
911 pol = km->compile_policy(sk->sk_family, optname, data,
916 read_unlock(&xfrm_km_lock);
919 xfrm_sk_policy_insert(sk, err, pol);
928 EXPORT_SYMBOL(xfrm_user_policy);
930 int xfrm_register_km(struct xfrm_mgr *km)
932 write_lock_bh(&xfrm_km_lock);
933 list_add_tail(&km->list, &xfrm_km_list);
934 write_unlock_bh(&xfrm_km_lock);
937 EXPORT_SYMBOL(xfrm_register_km);
939 int xfrm_unregister_km(struct xfrm_mgr *km)
941 write_lock_bh(&xfrm_km_lock);
943 write_unlock_bh(&xfrm_km_lock);
946 EXPORT_SYMBOL(xfrm_unregister_km);
948 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
951 if (unlikely(afinfo == NULL))
953 if (unlikely(afinfo->family >= NPROTO))
954 return -EAFNOSUPPORT;
955 write_lock(&xfrm_state_afinfo_lock);
956 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
959 afinfo->state_bydst = xfrm_state_bydst;
960 afinfo->state_byspi = xfrm_state_byspi;
961 xfrm_state_afinfo[afinfo->family] = afinfo;
963 write_unlock(&xfrm_state_afinfo_lock);
966 EXPORT_SYMBOL(xfrm_state_register_afinfo);
968 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
971 if (unlikely(afinfo == NULL))
973 if (unlikely(afinfo->family >= NPROTO))
974 return -EAFNOSUPPORT;
975 write_lock(&xfrm_state_afinfo_lock);
976 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
977 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
980 xfrm_state_afinfo[afinfo->family] = NULL;
981 afinfo->state_byspi = NULL;
982 afinfo->state_bydst = NULL;
985 write_unlock(&xfrm_state_afinfo_lock);
988 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
990 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
992 struct xfrm_state_afinfo *afinfo;
993 if (unlikely(family >= NPROTO))
995 read_lock(&xfrm_state_afinfo_lock);
996 afinfo = xfrm_state_afinfo[family];
997 if (likely(afinfo != NULL))
998 read_lock(&afinfo->lock);
999 read_unlock(&xfrm_state_afinfo_lock);
1003 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1005 if (unlikely(afinfo == NULL))
1007 read_unlock(&afinfo->lock);
1010 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1011 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1014 struct xfrm_state *t = x->tunnel;
1016 if (atomic_read(&t->tunnel_users) == 2)
1017 xfrm_state_delete(t);
1018 atomic_dec(&t->tunnel_users);
1023 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1026 * This function is NOT optimal. For example, with ESP it will give an
1027 * MTU that's usually two bytes short of being optimal. However, it will
1028 * usually give an answer that's a multiple of 4 provided the input is
1029 * also a multiple of 4.
1031 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1035 res -= x->props.header_len;
1043 spin_lock_bh(&x->lock);
1044 if (x->km.state == XFRM_STATE_VALID &&
1045 x->type && x->type->get_max_size)
1046 m = x->type->get_max_size(x, m);
1048 m += x->props.header_len;
1049 spin_unlock_bh(&x->lock);
1059 EXPORT_SYMBOL(xfrm_state_mtu);
1061 int xfrm_init_state(struct xfrm_state *x)
1063 struct xfrm_state_afinfo *afinfo;
1064 int family = x->props.family;
1067 err = -EAFNOSUPPORT;
1068 afinfo = xfrm_state_get_afinfo(family);
1073 if (afinfo->init_flags)
1074 err = afinfo->init_flags(x);
1076 xfrm_state_put_afinfo(afinfo);
1081 err = -EPROTONOSUPPORT;
1082 x->type = xfrm_get_type(x->id.proto, family);
1083 if (x->type == NULL)
1086 err = x->type->init_state(x);
1090 x->km.state = XFRM_STATE_VALID;
1096 EXPORT_SYMBOL(xfrm_init_state);
1098 void __init xfrm_state_init(void)
1102 for (i=0; i<XFRM_DST_HSIZE; i++) {
1103 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
1104 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
1106 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);