6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
24 #include "xfrm_hash.h"
27 EXPORT_SYMBOL(xfrm_nl);
29 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
33 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37 /* Each xfrm_state may be linked to two tables:
39 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
40 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
41 destination/tunnel endpoint. (output)
44 static DEFINE_SPINLOCK(xfrm_state_lock);
46 /* Hash table to find appropriate SA towards given target (endpoint
47 * of tunnel or destination of transport mode) allowed by selector.
49 * Main use is finding SA after policy selected tunnel or transport mode.
50 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 static struct hlist_head *xfrm_state_bydst __read_mostly;
53 static struct hlist_head *xfrm_state_bysrc __read_mostly;
54 static struct hlist_head *xfrm_state_byspi __read_mostly;
55 static unsigned int xfrm_state_hmask __read_mostly;
56 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
57 static unsigned int xfrm_state_num;
58 static unsigned int xfrm_state_genid;
60 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
61 xfrm_address_t *saddr,
63 unsigned short family)
65 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
68 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
69 xfrm_address_t *saddr,
70 unsigned short family)
72 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
75 static inline unsigned int
76 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
78 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
81 static void xfrm_hash_transfer(struct hlist_head *list,
82 struct hlist_head *ndsttable,
83 struct hlist_head *nsrctable,
84 struct hlist_head *nspitable,
85 unsigned int nhashmask)
87 struct hlist_node *entry, *tmp;
90 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
93 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
94 x->props.reqid, x->props.family,
96 hlist_add_head(&x->bydst, ndsttable+h);
98 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
101 hlist_add_head(&x->bysrc, nsrctable+h);
104 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
105 x->id.proto, x->props.family,
107 hlist_add_head(&x->byspi, nspitable+h);
112 static unsigned long xfrm_hash_new_size(void)
114 return ((xfrm_state_hmask + 1) << 1) *
115 sizeof(struct hlist_head);
118 static DEFINE_MUTEX(hash_resize_mutex);
120 static void xfrm_hash_resize(struct work_struct *__unused)
122 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
123 unsigned long nsize, osize;
124 unsigned int nhashmask, ohashmask;
127 mutex_lock(&hash_resize_mutex);
129 nsize = xfrm_hash_new_size();
130 ndst = xfrm_hash_alloc(nsize);
133 nsrc = xfrm_hash_alloc(nsize);
135 xfrm_hash_free(ndst, nsize);
138 nspi = xfrm_hash_alloc(nsize);
140 xfrm_hash_free(ndst, nsize);
141 xfrm_hash_free(nsrc, nsize);
145 spin_lock_bh(&xfrm_state_lock);
147 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
148 for (i = xfrm_state_hmask; i >= 0; i--)
149 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
152 odst = xfrm_state_bydst;
153 osrc = xfrm_state_bysrc;
154 ospi = xfrm_state_byspi;
155 ohashmask = xfrm_state_hmask;
157 xfrm_state_bydst = ndst;
158 xfrm_state_bysrc = nsrc;
159 xfrm_state_byspi = nspi;
160 xfrm_state_hmask = nhashmask;
162 spin_unlock_bh(&xfrm_state_lock);
164 osize = (ohashmask + 1) * sizeof(struct hlist_head);
165 xfrm_hash_free(odst, osize);
166 xfrm_hash_free(osrc, osize);
167 xfrm_hash_free(ospi, osize);
170 mutex_unlock(&hash_resize_mutex);
173 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
175 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
176 EXPORT_SYMBOL(km_waitq);
178 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
179 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
181 static struct work_struct xfrm_state_gc_work;
182 static HLIST_HEAD(xfrm_state_gc_list);
183 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
185 int __xfrm_state_delete(struct xfrm_state *x);
187 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
188 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
190 static void xfrm_state_gc_destroy(struct xfrm_state *x)
192 del_timer_sync(&x->timer);
193 del_timer_sync(&x->rtimer);
200 xfrm_put_mode(x->mode);
202 x->type->destructor(x);
203 xfrm_put_type(x->type);
205 security_xfrm_state_free(x);
209 static void xfrm_state_gc_task(struct work_struct *data)
211 struct xfrm_state *x;
212 struct hlist_node *entry, *tmp;
213 struct hlist_head gc_list;
215 spin_lock_bh(&xfrm_state_gc_lock);
216 gc_list.first = xfrm_state_gc_list.first;
217 INIT_HLIST_HEAD(&xfrm_state_gc_list);
218 spin_unlock_bh(&xfrm_state_gc_lock);
220 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
221 xfrm_state_gc_destroy(x);
226 static inline unsigned long make_jiffies(long secs)
228 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
229 return MAX_SCHEDULE_TIMEOUT-1;
234 static void xfrm_timer_handler(unsigned long data)
236 struct xfrm_state *x = (struct xfrm_state*)data;
237 unsigned long now = get_seconds();
238 long next = LONG_MAX;
243 if (x->km.state == XFRM_STATE_DEAD)
245 if (x->km.state == XFRM_STATE_EXPIRED)
247 if (x->lft.hard_add_expires_seconds) {
248 long tmo = x->lft.hard_add_expires_seconds +
249 x->curlft.add_time - now;
255 if (x->lft.hard_use_expires_seconds) {
256 long tmo = x->lft.hard_use_expires_seconds +
257 (x->curlft.use_time ? : now) - now;
265 if (x->lft.soft_add_expires_seconds) {
266 long tmo = x->lft.soft_add_expires_seconds +
267 x->curlft.add_time - now;
273 if (x->lft.soft_use_expires_seconds) {
274 long tmo = x->lft.soft_use_expires_seconds +
275 (x->curlft.use_time ? : now) - now;
284 km_state_expired(x, 0, 0);
286 if (next != LONG_MAX)
287 mod_timer(&x->timer, jiffies + make_jiffies(next));
292 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
293 x->km.state = XFRM_STATE_EXPIRED;
299 err = __xfrm_state_delete(x);
300 if (!err && x->id.spi)
301 km_state_expired(x, 1, 0);
303 xfrm_audit_state_delete(x, err ? 0 : 1,
304 audit_get_loginuid(current->audit_context), 0);
307 spin_unlock(&x->lock);
310 static void xfrm_replay_timer_handler(unsigned long data);
312 struct xfrm_state *xfrm_state_alloc(void)
314 struct xfrm_state *x;
316 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
319 atomic_set(&x->refcnt, 1);
320 atomic_set(&x->tunnel_users, 0);
321 INIT_HLIST_NODE(&x->bydst);
322 INIT_HLIST_NODE(&x->bysrc);
323 INIT_HLIST_NODE(&x->byspi);
324 init_timer(&x->timer);
325 x->timer.function = xfrm_timer_handler;
326 x->timer.data = (unsigned long)x;
327 init_timer(&x->rtimer);
328 x->rtimer.function = xfrm_replay_timer_handler;
329 x->rtimer.data = (unsigned long)x;
330 x->curlft.add_time = get_seconds();
331 x->lft.soft_byte_limit = XFRM_INF;
332 x->lft.soft_packet_limit = XFRM_INF;
333 x->lft.hard_byte_limit = XFRM_INF;
334 x->lft.hard_packet_limit = XFRM_INF;
335 x->replay_maxage = 0;
336 x->replay_maxdiff = 0;
337 spin_lock_init(&x->lock);
341 EXPORT_SYMBOL(xfrm_state_alloc);
343 void __xfrm_state_destroy(struct xfrm_state *x)
345 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
347 spin_lock_bh(&xfrm_state_gc_lock);
348 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
349 spin_unlock_bh(&xfrm_state_gc_lock);
350 schedule_work(&xfrm_state_gc_work);
352 EXPORT_SYMBOL(__xfrm_state_destroy);
354 int __xfrm_state_delete(struct xfrm_state *x)
358 if (x->km.state != XFRM_STATE_DEAD) {
359 x->km.state = XFRM_STATE_DEAD;
360 spin_lock(&xfrm_state_lock);
361 hlist_del(&x->bydst);
362 hlist_del(&x->bysrc);
364 hlist_del(&x->byspi);
366 spin_unlock(&xfrm_state_lock);
368 /* All xfrm_state objects are created by xfrm_state_alloc.
369 * The xfrm_state_alloc call gives a reference, and that
370 * is what we are dropping here.
378 EXPORT_SYMBOL(__xfrm_state_delete);
380 int xfrm_state_delete(struct xfrm_state *x)
384 spin_lock_bh(&x->lock);
385 err = __xfrm_state_delete(x);
386 spin_unlock_bh(&x->lock);
390 EXPORT_SYMBOL(xfrm_state_delete);
392 #ifdef CONFIG_SECURITY_NETWORK_XFRM
394 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
398 for (i = 0; i <= xfrm_state_hmask; i++) {
399 struct hlist_node *entry;
400 struct xfrm_state *x;
402 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
403 if (xfrm_id_proto_match(x->id.proto, proto) &&
404 (err = security_xfrm_state_delete(x)) != 0) {
405 xfrm_audit_state_delete(x, 0,
406 audit_info->loginuid,
417 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
423 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
427 spin_lock_bh(&xfrm_state_lock);
428 err = xfrm_state_flush_secctx_check(proto, audit_info);
432 for (i = 0; i <= xfrm_state_hmask; i++) {
433 struct hlist_node *entry;
434 struct xfrm_state *x;
436 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
437 if (!xfrm_state_kern(x) &&
438 xfrm_id_proto_match(x->id.proto, proto)) {
440 spin_unlock_bh(&xfrm_state_lock);
442 err = xfrm_state_delete(x);
443 xfrm_audit_state_delete(x, err ? 0 : 1,
444 audit_info->loginuid,
448 spin_lock_bh(&xfrm_state_lock);
456 spin_unlock_bh(&xfrm_state_lock);
460 EXPORT_SYMBOL(xfrm_state_flush);
462 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
464 spin_lock_bh(&xfrm_state_lock);
465 si->sadcnt = xfrm_state_num;
466 si->sadhcnt = xfrm_state_hmask;
467 si->sadhmcnt = xfrm_state_hashmax;
468 spin_unlock_bh(&xfrm_state_lock);
470 EXPORT_SYMBOL(xfrm_sad_getinfo);
473 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
474 struct xfrm_tmpl *tmpl,
475 xfrm_address_t *daddr, xfrm_address_t *saddr,
476 unsigned short family)
478 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
481 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
482 xfrm_state_put_afinfo(afinfo);
486 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
488 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
489 struct xfrm_state *x;
490 struct hlist_node *entry;
492 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
493 if (x->props.family != family ||
495 x->id.proto != proto)
500 if (x->id.daddr.a4 != daddr->a4)
504 if (!ipv6_addr_equal((struct in6_addr *)daddr,
518 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
520 unsigned int h = xfrm_src_hash(daddr, saddr, family);
521 struct xfrm_state *x;
522 struct hlist_node *entry;
524 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
525 if (x->props.family != family ||
526 x->id.proto != proto)
531 if (x->id.daddr.a4 != daddr->a4 ||
532 x->props.saddr.a4 != saddr->a4)
536 if (!ipv6_addr_equal((struct in6_addr *)daddr,
539 !ipv6_addr_equal((struct in6_addr *)saddr,
553 static inline struct xfrm_state *
554 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
557 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
558 x->id.proto, family);
560 return __xfrm_state_lookup_byaddr(&x->id.daddr,
562 x->id.proto, family);
565 static void xfrm_hash_grow_check(int have_hash_collision)
567 if (have_hash_collision &&
568 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
569 xfrm_state_num > xfrm_state_hmask)
570 schedule_work(&xfrm_hash_work);
574 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
575 struct flowi *fl, struct xfrm_tmpl *tmpl,
576 struct xfrm_policy *pol, int *err,
577 unsigned short family)
579 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
580 struct hlist_node *entry;
581 struct xfrm_state *x, *x0;
582 int acquire_in_progress = 0;
584 struct xfrm_state *best = NULL;
586 spin_lock_bh(&xfrm_state_lock);
587 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
588 if (x->props.family == family &&
589 x->props.reqid == tmpl->reqid &&
590 !(x->props.flags & XFRM_STATE_WILDRECV) &&
591 xfrm_state_addr_check(x, daddr, saddr, family) &&
592 tmpl->mode == x->props.mode &&
593 tmpl->id.proto == x->id.proto &&
594 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
596 1. There is a valid state with matching selector.
598 2. Valid state with inappropriate selector. Skip.
600 Entering area of "sysdeps".
602 3. If state is not valid, selector is temporary,
603 it selects only session which triggered
604 previous resolution. Key manager will do
605 something to install a state with proper
608 if (x->km.state == XFRM_STATE_VALID) {
609 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
610 !security_xfrm_state_pol_flow_match(x, pol, fl))
613 best->km.dying > x->km.dying ||
614 (best->km.dying == x->km.dying &&
615 best->curlft.add_time < x->curlft.add_time))
617 } else if (x->km.state == XFRM_STATE_ACQ) {
618 acquire_in_progress = 1;
619 } else if (x->km.state == XFRM_STATE_ERROR ||
620 x->km.state == XFRM_STATE_EXPIRED) {
621 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
622 security_xfrm_state_pol_flow_match(x, pol, fl))
629 if (!x && !error && !acquire_in_progress) {
631 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
632 tmpl->id.proto, family)) != NULL) {
637 x = xfrm_state_alloc();
642 /* Initialize temporary selector matching only
643 * to current session. */
644 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
646 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
648 x->km.state = XFRM_STATE_DEAD;
654 if (km_query(x, tmpl, pol) == 0) {
655 x->km.state = XFRM_STATE_ACQ;
656 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
657 h = xfrm_src_hash(daddr, saddr, family);
658 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
660 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
661 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
663 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
664 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
665 add_timer(&x->timer);
667 xfrm_hash_grow_check(x->bydst.next != NULL);
669 x->km.state = XFRM_STATE_DEAD;
679 *err = acquire_in_progress ? -EAGAIN : error;
680 spin_unlock_bh(&xfrm_state_lock);
685 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
686 unsigned short family, u8 mode, u8 proto, u32 reqid)
688 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
689 struct xfrm_state *rx = NULL, *x = NULL;
690 struct hlist_node *entry;
692 spin_lock(&xfrm_state_lock);
693 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
694 if (x->props.family == family &&
695 x->props.reqid == reqid &&
696 !(x->props.flags & XFRM_STATE_WILDRECV) &&
697 xfrm_state_addr_check(x, daddr, saddr, family) &&
698 mode == x->props.mode &&
699 proto == x->id.proto &&
700 x->km.state == XFRM_STATE_VALID) {
708 spin_unlock(&xfrm_state_lock);
713 EXPORT_SYMBOL(xfrm_stateonly_find);
715 static void __xfrm_state_insert(struct xfrm_state *x)
719 x->genid = ++xfrm_state_genid;
721 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
722 x->props.reqid, x->props.family);
723 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
725 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
726 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
729 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
732 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
735 mod_timer(&x->timer, jiffies + HZ);
736 if (x->replay_maxage)
737 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
743 xfrm_hash_grow_check(x->bydst.next != NULL);
746 /* xfrm_state_lock is held */
747 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
749 unsigned short family = xnew->props.family;
750 u32 reqid = xnew->props.reqid;
751 struct xfrm_state *x;
752 struct hlist_node *entry;
755 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
756 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
757 if (x->props.family == family &&
758 x->props.reqid == reqid &&
759 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
760 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
761 x->genid = xfrm_state_genid;
765 void xfrm_state_insert(struct xfrm_state *x)
767 spin_lock_bh(&xfrm_state_lock);
768 __xfrm_state_bump_genids(x);
769 __xfrm_state_insert(x);
770 spin_unlock_bh(&xfrm_state_lock);
772 EXPORT_SYMBOL(xfrm_state_insert);
774 /* xfrm_state_lock is held */
775 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
777 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
778 struct hlist_node *entry;
779 struct xfrm_state *x;
781 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
782 if (x->props.reqid != reqid ||
783 x->props.mode != mode ||
784 x->props.family != family ||
785 x->km.state != XFRM_STATE_ACQ ||
787 x->id.proto != proto)
792 if (x->id.daddr.a4 != daddr->a4 ||
793 x->props.saddr.a4 != saddr->a4)
797 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
798 (struct in6_addr *)daddr) ||
799 !ipv6_addr_equal((struct in6_addr *)
801 (struct in6_addr *)saddr))
813 x = xfrm_state_alloc();
817 x->sel.daddr.a4 = daddr->a4;
818 x->sel.saddr.a4 = saddr->a4;
819 x->sel.prefixlen_d = 32;
820 x->sel.prefixlen_s = 32;
821 x->props.saddr.a4 = saddr->a4;
822 x->id.daddr.a4 = daddr->a4;
826 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
827 (struct in6_addr *)daddr);
828 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
829 (struct in6_addr *)saddr);
830 x->sel.prefixlen_d = 128;
831 x->sel.prefixlen_s = 128;
832 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
833 (struct in6_addr *)saddr);
834 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
835 (struct in6_addr *)daddr);
839 x->km.state = XFRM_STATE_ACQ;
841 x->props.family = family;
842 x->props.mode = mode;
843 x->props.reqid = reqid;
844 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
846 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
847 add_timer(&x->timer);
848 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
849 h = xfrm_src_hash(daddr, saddr, family);
850 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
855 xfrm_hash_grow_check(x->bydst.next != NULL);
861 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
863 int xfrm_state_add(struct xfrm_state *x)
865 struct xfrm_state *x1;
868 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
870 family = x->props.family;
872 spin_lock_bh(&xfrm_state_lock);
874 x1 = __xfrm_state_locate(x, use_spi, family);
882 if (use_spi && x->km.seq) {
883 x1 = __xfrm_find_acq_byseq(x->km.seq);
884 if (x1 && ((x1->id.proto != x->id.proto) ||
885 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
892 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
894 &x->id.daddr, &x->props.saddr, 0);
896 __xfrm_state_bump_genids(x);
897 __xfrm_state_insert(x);
901 spin_unlock_bh(&xfrm_state_lock);
904 xfrm_state_delete(x1);
910 EXPORT_SYMBOL(xfrm_state_add);
912 #ifdef CONFIG_XFRM_MIGRATE
913 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
916 struct xfrm_state *x = xfrm_state_alloc();
920 memcpy(&x->id, &orig->id, sizeof(x->id));
921 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
922 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
923 x->props.mode = orig->props.mode;
924 x->props.replay_window = orig->props.replay_window;
925 x->props.reqid = orig->props.reqid;
926 x->props.family = orig->props.family;
927 x->props.saddr = orig->props.saddr;
930 x->aalg = xfrm_algo_clone(orig->aalg);
934 x->props.aalgo = orig->props.aalgo;
937 x->ealg = xfrm_algo_clone(orig->ealg);
941 x->props.ealgo = orig->props.ealgo;
944 x->calg = xfrm_algo_clone(orig->calg);
948 x->props.calgo = orig->props.calgo;
951 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
957 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
963 err = xfrm_init_state(x);
967 x->props.flags = orig->props.flags;
969 x->curlft.add_time = orig->curlft.add_time;
970 x->km.state = orig->km.state;
971 x->km.seq = orig->km.seq;
988 EXPORT_SYMBOL(xfrm_state_clone);
990 /* xfrm_state_lock is held */
991 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
994 struct xfrm_state *x;
995 struct hlist_node *entry;
998 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
999 m->reqid, m->old_family);
1000 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1001 if (x->props.mode != m->mode ||
1002 x->id.proto != m->proto)
1004 if (m->reqid && x->props.reqid != m->reqid)
1006 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1008 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1015 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1017 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1018 if (x->props.mode != m->mode ||
1019 x->id.proto != m->proto)
1021 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1023 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1033 EXPORT_SYMBOL(xfrm_migrate_state_find);
1035 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1036 struct xfrm_migrate *m)
1038 struct xfrm_state *xc;
1041 xc = xfrm_state_clone(x, &err);
1045 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1046 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1049 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1050 /* a care is needed when the destination address of the
1051 state is to be updated as it is a part of triplet */
1052 xfrm_state_insert(xc);
1054 if ((err = xfrm_state_add(xc)) < 0)
1063 EXPORT_SYMBOL(xfrm_state_migrate);
1066 int xfrm_state_update(struct xfrm_state *x)
1068 struct xfrm_state *x1;
1070 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1072 spin_lock_bh(&xfrm_state_lock);
1073 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1079 if (xfrm_state_kern(x1)) {
1085 if (x1->km.state == XFRM_STATE_ACQ) {
1086 __xfrm_state_insert(x);
1092 spin_unlock_bh(&xfrm_state_lock);
1098 xfrm_state_delete(x1);
1104 spin_lock_bh(&x1->lock);
1105 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1106 if (x->encap && x1->encap)
1107 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1108 if (x->coaddr && x1->coaddr) {
1109 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1111 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1112 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1113 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1116 mod_timer(&x1->timer, jiffies + HZ);
1117 if (x1->curlft.use_time)
1118 xfrm_state_check_expire(x1);
1122 spin_unlock_bh(&x1->lock);
1128 EXPORT_SYMBOL(xfrm_state_update);
1130 int xfrm_state_check_expire(struct xfrm_state *x)
1132 if (!x->curlft.use_time)
1133 x->curlft.use_time = get_seconds();
1135 if (x->km.state != XFRM_STATE_VALID)
1138 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1139 x->curlft.packets >= x->lft.hard_packet_limit) {
1140 x->km.state = XFRM_STATE_EXPIRED;
1141 mod_timer(&x->timer, jiffies);
1146 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1147 x->curlft.packets >= x->lft.soft_packet_limit)) {
1149 km_state_expired(x, 0, 0);
1153 EXPORT_SYMBOL(xfrm_state_check_expire);
1155 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
1157 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
1158 - skb_headroom(skb);
1161 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
1163 /* Check tail too... */
1167 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
1169 int err = xfrm_state_check_expire(x);
1172 err = xfrm_state_check_space(x, skb);
1176 EXPORT_SYMBOL(xfrm_state_check);
1179 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1180 unsigned short family)
1182 struct xfrm_state *x;
1184 spin_lock_bh(&xfrm_state_lock);
1185 x = __xfrm_state_lookup(daddr, spi, proto, family);
1186 spin_unlock_bh(&xfrm_state_lock);
1189 EXPORT_SYMBOL(xfrm_state_lookup);
1192 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1193 u8 proto, unsigned short family)
1195 struct xfrm_state *x;
1197 spin_lock_bh(&xfrm_state_lock);
1198 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1199 spin_unlock_bh(&xfrm_state_lock);
1202 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1205 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1206 xfrm_address_t *daddr, xfrm_address_t *saddr,
1207 int create, unsigned short family)
1209 struct xfrm_state *x;
1211 spin_lock_bh(&xfrm_state_lock);
1212 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1213 spin_unlock_bh(&xfrm_state_lock);
1217 EXPORT_SYMBOL(xfrm_find_acq);
1219 #ifdef CONFIG_XFRM_SUB_POLICY
1221 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1222 unsigned short family)
1225 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1227 return -EAFNOSUPPORT;
1229 spin_lock_bh(&xfrm_state_lock);
1230 if (afinfo->tmpl_sort)
1231 err = afinfo->tmpl_sort(dst, src, n);
1232 spin_unlock_bh(&xfrm_state_lock);
1233 xfrm_state_put_afinfo(afinfo);
1236 EXPORT_SYMBOL(xfrm_tmpl_sort);
1239 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1240 unsigned short family)
1243 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1245 return -EAFNOSUPPORT;
1247 spin_lock_bh(&xfrm_state_lock);
1248 if (afinfo->state_sort)
1249 err = afinfo->state_sort(dst, src, n);
1250 spin_unlock_bh(&xfrm_state_lock);
1251 xfrm_state_put_afinfo(afinfo);
1254 EXPORT_SYMBOL(xfrm_state_sort);
1257 /* Silly enough, but I'm lazy to build resolution list */
1259 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1263 for (i = 0; i <= xfrm_state_hmask; i++) {
1264 struct hlist_node *entry;
1265 struct xfrm_state *x;
1267 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1268 if (x->km.seq == seq &&
1269 x->km.state == XFRM_STATE_ACQ) {
1278 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1280 struct xfrm_state *x;
1282 spin_lock_bh(&xfrm_state_lock);
1283 x = __xfrm_find_acq_byseq(seq);
1284 spin_unlock_bh(&xfrm_state_lock);
1287 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1289 u32 xfrm_get_acqseq(void)
1293 static DEFINE_SPINLOCK(acqseq_lock);
1295 spin_lock_bh(&acqseq_lock);
1296 res = (++acqseq ? : ++acqseq);
1297 spin_unlock_bh(&acqseq_lock);
1300 EXPORT_SYMBOL(xfrm_get_acqseq);
1303 xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi)
1306 struct xfrm_state *x0;
1311 if (minspi == maxspi) {
1312 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1320 u32 low = ntohl(minspi);
1321 u32 high = ntohl(maxspi);
1322 for (h=0; h<high-low+1; h++) {
1323 spi = low + net_random()%(high-low+1);
1324 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1326 x->id.spi = htonl(spi);
1333 spin_lock_bh(&xfrm_state_lock);
1334 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1335 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1336 spin_unlock_bh(&xfrm_state_lock);
1340 EXPORT_SYMBOL(xfrm_alloc_spi);
1342 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1346 struct xfrm_state *x, *last = NULL;
1347 struct hlist_node *entry;
1351 spin_lock_bh(&xfrm_state_lock);
1352 for (i = 0; i <= xfrm_state_hmask; i++) {
1353 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1354 if (!xfrm_id_proto_match(x->id.proto, proto))
1357 err = func(last, count, data);
1369 err = func(last, 0, data);
1371 spin_unlock_bh(&xfrm_state_lock);
1374 EXPORT_SYMBOL(xfrm_state_walk);
1377 void xfrm_replay_notify(struct xfrm_state *x, int event)
1380 /* we send notify messages in case
1381 * 1. we updated on of the sequence numbers, and the seqno difference
1382 * is at least x->replay_maxdiff, in this case we also update the
1383 * timeout of our timer function
1384 * 2. if x->replay_maxage has elapsed since last update,
1385 * and there were changes
1387 * The state structure must be locked!
1391 case XFRM_REPLAY_UPDATE:
1392 if (x->replay_maxdiff &&
1393 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1394 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1395 if (x->xflags & XFRM_TIME_DEFER)
1396 event = XFRM_REPLAY_TIMEOUT;
1403 case XFRM_REPLAY_TIMEOUT:
1404 if ((x->replay.seq == x->preplay.seq) &&
1405 (x->replay.bitmap == x->preplay.bitmap) &&
1406 (x->replay.oseq == x->preplay.oseq)) {
1407 x->xflags |= XFRM_TIME_DEFER;
1414 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1415 c.event = XFRM_MSG_NEWAE;
1416 c.data.aevent = event;
1417 km_state_notify(x, &c);
1419 if (x->replay_maxage &&
1420 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1421 x->xflags &= ~XFRM_TIME_DEFER;
1423 EXPORT_SYMBOL(xfrm_replay_notify);
1425 static void xfrm_replay_timer_handler(unsigned long data)
1427 struct xfrm_state *x = (struct xfrm_state*)data;
1429 spin_lock(&x->lock);
1431 if (x->km.state == XFRM_STATE_VALID) {
1432 if (xfrm_aevent_is_on())
1433 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1435 x->xflags |= XFRM_TIME_DEFER;
1438 spin_unlock(&x->lock);
1441 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1444 u32 seq = ntohl(net_seq);
1446 if (unlikely(seq == 0))
1449 if (likely(seq > x->replay.seq))
1452 diff = x->replay.seq - seq;
1453 if (diff >= min_t(unsigned int, x->props.replay_window,
1454 sizeof(x->replay.bitmap) * 8)) {
1455 x->stats.replay_window++;
1459 if (x->replay.bitmap & (1U << diff)) {
1465 EXPORT_SYMBOL(xfrm_replay_check);
1467 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1470 u32 seq = ntohl(net_seq);
1472 if (seq > x->replay.seq) {
1473 diff = seq - x->replay.seq;
1474 if (diff < x->props.replay_window)
1475 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1477 x->replay.bitmap = 1;
1478 x->replay.seq = seq;
1480 diff = x->replay.seq - seq;
1481 x->replay.bitmap |= (1U << diff);
1484 if (xfrm_aevent_is_on())
1485 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1487 EXPORT_SYMBOL(xfrm_replay_advance);
1489 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1490 static DEFINE_RWLOCK(xfrm_km_lock);
1492 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1494 struct xfrm_mgr *km;
1496 read_lock(&xfrm_km_lock);
1497 list_for_each_entry(km, &xfrm_km_list, list)
1498 if (km->notify_policy)
1499 km->notify_policy(xp, dir, c);
1500 read_unlock(&xfrm_km_lock);
1503 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1505 struct xfrm_mgr *km;
1506 read_lock(&xfrm_km_lock);
1507 list_for_each_entry(km, &xfrm_km_list, list)
1510 read_unlock(&xfrm_km_lock);
1513 EXPORT_SYMBOL(km_policy_notify);
1514 EXPORT_SYMBOL(km_state_notify);
1516 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1522 c.event = XFRM_MSG_EXPIRE;
1523 km_state_notify(x, &c);
1529 EXPORT_SYMBOL(km_state_expired);
1531 * We send to all registered managers regardless of failure
1532 * We are happy with one success
1534 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1536 int err = -EINVAL, acqret;
1537 struct xfrm_mgr *km;
1539 read_lock(&xfrm_km_lock);
1540 list_for_each_entry(km, &xfrm_km_list, list) {
1541 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1545 read_unlock(&xfrm_km_lock);
1548 EXPORT_SYMBOL(km_query);
1550 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1553 struct xfrm_mgr *km;
1555 read_lock(&xfrm_km_lock);
1556 list_for_each_entry(km, &xfrm_km_list, list) {
1557 if (km->new_mapping)
1558 err = km->new_mapping(x, ipaddr, sport);
1562 read_unlock(&xfrm_km_lock);
1565 EXPORT_SYMBOL(km_new_mapping);
1567 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1573 c.event = XFRM_MSG_POLEXPIRE;
1574 km_policy_notify(pol, dir, &c);
1579 EXPORT_SYMBOL(km_policy_expired);
1581 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1582 struct xfrm_migrate *m, int num_migrate)
1586 struct xfrm_mgr *km;
1588 read_lock(&xfrm_km_lock);
1589 list_for_each_entry(km, &xfrm_km_list, list) {
1591 ret = km->migrate(sel, dir, type, m, num_migrate);
1596 read_unlock(&xfrm_km_lock);
1599 EXPORT_SYMBOL(km_migrate);
1601 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1605 struct xfrm_mgr *km;
1607 read_lock(&xfrm_km_lock);
1608 list_for_each_entry(km, &xfrm_km_list, list) {
1610 ret = km->report(proto, sel, addr);
1615 read_unlock(&xfrm_km_lock);
1618 EXPORT_SYMBOL(km_report);
1620 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1624 struct xfrm_mgr *km;
1625 struct xfrm_policy *pol = NULL;
1627 if (optlen <= 0 || optlen > PAGE_SIZE)
1630 data = kmalloc(optlen, GFP_KERNEL);
1635 if (copy_from_user(data, optval, optlen))
1639 read_lock(&xfrm_km_lock);
1640 list_for_each_entry(km, &xfrm_km_list, list) {
1641 pol = km->compile_policy(sk, optname, data,
1646 read_unlock(&xfrm_km_lock);
1649 xfrm_sk_policy_insert(sk, err, pol);
1658 EXPORT_SYMBOL(xfrm_user_policy);
1660 int xfrm_register_km(struct xfrm_mgr *km)
1662 write_lock_bh(&xfrm_km_lock);
1663 list_add_tail(&km->list, &xfrm_km_list);
1664 write_unlock_bh(&xfrm_km_lock);
1667 EXPORT_SYMBOL(xfrm_register_km);
1669 int xfrm_unregister_km(struct xfrm_mgr *km)
1671 write_lock_bh(&xfrm_km_lock);
1672 list_del(&km->list);
1673 write_unlock_bh(&xfrm_km_lock);
1676 EXPORT_SYMBOL(xfrm_unregister_km);
1678 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1681 if (unlikely(afinfo == NULL))
1683 if (unlikely(afinfo->family >= NPROTO))
1684 return -EAFNOSUPPORT;
1685 write_lock_bh(&xfrm_state_afinfo_lock);
1686 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1689 xfrm_state_afinfo[afinfo->family] = afinfo;
1690 write_unlock_bh(&xfrm_state_afinfo_lock);
1693 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1695 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1698 if (unlikely(afinfo == NULL))
1700 if (unlikely(afinfo->family >= NPROTO))
1701 return -EAFNOSUPPORT;
1702 write_lock_bh(&xfrm_state_afinfo_lock);
1703 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1704 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1707 xfrm_state_afinfo[afinfo->family] = NULL;
1709 write_unlock_bh(&xfrm_state_afinfo_lock);
1712 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1714 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1716 struct xfrm_state_afinfo *afinfo;
1717 if (unlikely(family >= NPROTO))
1719 read_lock(&xfrm_state_afinfo_lock);
1720 afinfo = xfrm_state_afinfo[family];
1721 if (unlikely(!afinfo))
1722 read_unlock(&xfrm_state_afinfo_lock);
1726 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1728 read_unlock(&xfrm_state_afinfo_lock);
1731 EXPORT_SYMBOL(xfrm_state_get_afinfo);
1732 EXPORT_SYMBOL(xfrm_state_put_afinfo);
1734 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1735 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1738 struct xfrm_state *t = x->tunnel;
1740 if (atomic_read(&t->tunnel_users) == 2)
1741 xfrm_state_delete(t);
1742 atomic_dec(&t->tunnel_users);
1747 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1749 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1753 spin_lock_bh(&x->lock);
1754 if (x->km.state == XFRM_STATE_VALID &&
1755 x->type && x->type->get_mtu)
1756 res = x->type->get_mtu(x, mtu);
1758 res = mtu - x->props.header_len;
1759 spin_unlock_bh(&x->lock);
1763 int xfrm_init_state(struct xfrm_state *x)
1765 struct xfrm_state_afinfo *afinfo;
1766 int family = x->props.family;
1769 err = -EAFNOSUPPORT;
1770 afinfo = xfrm_state_get_afinfo(family);
1775 if (afinfo->init_flags)
1776 err = afinfo->init_flags(x);
1778 xfrm_state_put_afinfo(afinfo);
1783 err = -EPROTONOSUPPORT;
1784 x->type = xfrm_get_type(x->id.proto, family);
1785 if (x->type == NULL)
1788 err = x->type->init_state(x);
1792 x->mode = xfrm_get_mode(x->props.mode, family);
1793 if (x->mode == NULL)
1796 x->km.state = XFRM_STATE_VALID;
1802 EXPORT_SYMBOL(xfrm_init_state);
1804 void __init xfrm_state_init(void)
1808 sz = sizeof(struct hlist_head) * 8;
1810 xfrm_state_bydst = xfrm_hash_alloc(sz);
1811 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1812 xfrm_state_byspi = xfrm_hash_alloc(sz);
1813 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1814 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1815 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1817 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1820 #ifdef CONFIG_AUDITSYSCALL
1821 static inline void xfrm_audit_common_stateinfo(struct xfrm_state *x,
1822 struct audit_buffer *audit_buf)
1825 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
1826 x->security->ctx_alg, x->security->ctx_doi,
1827 x->security->ctx_str);
1829 switch(x->props.family) {
1831 audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
1832 NIPQUAD(x->props.saddr.a4),
1833 NIPQUAD(x->id.daddr.a4));
1837 struct in6_addr saddr6, daddr6;
1839 memcpy(&saddr6, x->props.saddr.a6,
1840 sizeof(struct in6_addr));
1841 memcpy(&daddr6, x->id.daddr.a6,
1842 sizeof(struct in6_addr));
1843 audit_log_format(audit_buf,
1844 " src=" NIP6_FMT " dst=" NIP6_FMT,
1845 NIP6(saddr6), NIP6(daddr6));
1852 xfrm_audit_state_add(struct xfrm_state *x, int result, u32 auid, u32 sid)
1854 struct audit_buffer *audit_buf;
1855 extern int audit_enabled;
1857 if (audit_enabled == 0)
1859 audit_buf = xfrm_audit_start(sid, auid);
1860 if (audit_buf == NULL)
1862 audit_log_format(audit_buf, " op=SAD-add res=%u",result);
1863 xfrm_audit_common_stateinfo(x, audit_buf);
1864 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
1865 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
1866 audit_log_end(audit_buf);
1868 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
1871 xfrm_audit_state_delete(struct xfrm_state *x, int result, u32 auid, u32 sid)
1873 struct audit_buffer *audit_buf;
1874 extern int audit_enabled;
1876 if (audit_enabled == 0)
1878 audit_buf = xfrm_audit_start(sid, auid);
1879 if (audit_buf == NULL)
1881 audit_log_format(audit_buf, " op=SAD-delete res=%u",result);
1882 xfrm_audit_common_stateinfo(x, audit_buf);
1883 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
1884 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
1885 audit_log_end(audit_buf);
1887 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
1888 #endif /* CONFIG_AUDITSYSCALL */