6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <asm/uaccess.h>
24 #include "xfrm_hash.h"
27 EXPORT_SYMBOL(xfrm_nl);
29 u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME;
30 EXPORT_SYMBOL(sysctl_xfrm_aevent_etime);
32 u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE;
33 EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth);
35 u32 sysctl_xfrm_acq_expires __read_mostly = 30;
37 /* Each xfrm_state may be linked to two tables:
39 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
40 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
41 destination/tunnel endpoint. (output)
44 static DEFINE_SPINLOCK(xfrm_state_lock);
46 /* Hash table to find appropriate SA towards given target (endpoint
47 * of tunnel or destination of transport mode) allowed by selector.
49 * Main use is finding SA after policy selected tunnel or transport mode.
50 * Also, it can be used by ah/esp icmp error handler to find offending SA.
52 static struct hlist_head *xfrm_state_bydst __read_mostly;
53 static struct hlist_head *xfrm_state_bysrc __read_mostly;
54 static struct hlist_head *xfrm_state_byspi __read_mostly;
55 static unsigned int xfrm_state_hmask __read_mostly;
56 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
57 static unsigned int xfrm_state_num;
58 static unsigned int xfrm_state_genid;
60 static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
61 xfrm_address_t *saddr,
63 unsigned short family)
65 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
68 static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
69 xfrm_address_t *saddr,
70 unsigned short family)
72 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
75 static inline unsigned int
76 xfrm_spi_hash(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
78 return __xfrm_spi_hash(daddr, spi, proto, family, xfrm_state_hmask);
81 static void xfrm_hash_transfer(struct hlist_head *list,
82 struct hlist_head *ndsttable,
83 struct hlist_head *nsrctable,
84 struct hlist_head *nspitable,
85 unsigned int nhashmask)
87 struct hlist_node *entry, *tmp;
90 hlist_for_each_entry_safe(x, entry, tmp, list, bydst) {
93 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
94 x->props.reqid, x->props.family,
96 hlist_add_head(&x->bydst, ndsttable+h);
98 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
101 hlist_add_head(&x->bysrc, nsrctable+h);
104 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
105 x->id.proto, x->props.family,
107 hlist_add_head(&x->byspi, nspitable+h);
112 static unsigned long xfrm_hash_new_size(void)
114 return ((xfrm_state_hmask + 1) << 1) *
115 sizeof(struct hlist_head);
118 static DEFINE_MUTEX(hash_resize_mutex);
120 static void xfrm_hash_resize(struct work_struct *__unused)
122 struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
123 unsigned long nsize, osize;
124 unsigned int nhashmask, ohashmask;
127 mutex_lock(&hash_resize_mutex);
129 nsize = xfrm_hash_new_size();
130 ndst = xfrm_hash_alloc(nsize);
133 nsrc = xfrm_hash_alloc(nsize);
135 xfrm_hash_free(ndst, nsize);
138 nspi = xfrm_hash_alloc(nsize);
140 xfrm_hash_free(ndst, nsize);
141 xfrm_hash_free(nsrc, nsize);
145 spin_lock_bh(&xfrm_state_lock);
147 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
148 for (i = xfrm_state_hmask; i >= 0; i--)
149 xfrm_hash_transfer(xfrm_state_bydst+i, ndst, nsrc, nspi,
152 odst = xfrm_state_bydst;
153 osrc = xfrm_state_bysrc;
154 ospi = xfrm_state_byspi;
155 ohashmask = xfrm_state_hmask;
157 xfrm_state_bydst = ndst;
158 xfrm_state_bysrc = nsrc;
159 xfrm_state_byspi = nspi;
160 xfrm_state_hmask = nhashmask;
162 spin_unlock_bh(&xfrm_state_lock);
164 osize = (ohashmask + 1) * sizeof(struct hlist_head);
165 xfrm_hash_free(odst, osize);
166 xfrm_hash_free(osrc, osize);
167 xfrm_hash_free(ospi, osize);
170 mutex_unlock(&hash_resize_mutex);
173 static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
175 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
176 EXPORT_SYMBOL(km_waitq);
178 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
179 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
181 static struct work_struct xfrm_state_gc_work;
182 static HLIST_HEAD(xfrm_state_gc_list);
183 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
185 int __xfrm_state_delete(struct xfrm_state *x);
187 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
188 void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
190 static void xfrm_state_gc_destroy(struct xfrm_state *x)
192 del_timer_sync(&x->timer);
193 del_timer_sync(&x->rtimer);
200 xfrm_put_mode(x->mode);
202 x->type->destructor(x);
203 xfrm_put_type(x->type);
205 security_xfrm_state_free(x);
209 static void xfrm_state_gc_task(struct work_struct *data)
211 struct xfrm_state *x;
212 struct hlist_node *entry, *tmp;
213 struct hlist_head gc_list;
215 spin_lock_bh(&xfrm_state_gc_lock);
216 gc_list.first = xfrm_state_gc_list.first;
217 INIT_HLIST_HEAD(&xfrm_state_gc_list);
218 spin_unlock_bh(&xfrm_state_gc_lock);
220 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst)
221 xfrm_state_gc_destroy(x);
226 static inline unsigned long make_jiffies(long secs)
228 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
229 return MAX_SCHEDULE_TIMEOUT-1;
234 static void xfrm_timer_handler(unsigned long data)
236 struct xfrm_state *x = (struct xfrm_state*)data;
237 unsigned long now = get_seconds();
238 long next = LONG_MAX;
243 if (x->km.state == XFRM_STATE_DEAD)
245 if (x->km.state == XFRM_STATE_EXPIRED)
247 if (x->lft.hard_add_expires_seconds) {
248 long tmo = x->lft.hard_add_expires_seconds +
249 x->curlft.add_time - now;
255 if (x->lft.hard_use_expires_seconds) {
256 long tmo = x->lft.hard_use_expires_seconds +
257 (x->curlft.use_time ? : now) - now;
265 if (x->lft.soft_add_expires_seconds) {
266 long tmo = x->lft.soft_add_expires_seconds +
267 x->curlft.add_time - now;
273 if (x->lft.soft_use_expires_seconds) {
274 long tmo = x->lft.soft_use_expires_seconds +
275 (x->curlft.use_time ? : now) - now;
284 km_state_expired(x, 0, 0);
286 if (next != LONG_MAX)
287 mod_timer(&x->timer, jiffies + make_jiffies(next));
292 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
293 x->km.state = XFRM_STATE_EXPIRED;
299 err = __xfrm_state_delete(x);
300 if (!err && x->id.spi)
301 km_state_expired(x, 1, 0);
303 xfrm_audit_state_delete(x, err ? 0 : 1,
304 audit_get_loginuid(current->audit_context), 0);
307 spin_unlock(&x->lock);
310 static void xfrm_replay_timer_handler(unsigned long data);
312 struct xfrm_state *xfrm_state_alloc(void)
314 struct xfrm_state *x;
316 x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
319 atomic_set(&x->refcnt, 1);
320 atomic_set(&x->tunnel_users, 0);
321 INIT_HLIST_NODE(&x->bydst);
322 INIT_HLIST_NODE(&x->bysrc);
323 INIT_HLIST_NODE(&x->byspi);
324 init_timer(&x->timer);
325 x->timer.function = xfrm_timer_handler;
326 x->timer.data = (unsigned long)x;
327 init_timer(&x->rtimer);
328 x->rtimer.function = xfrm_replay_timer_handler;
329 x->rtimer.data = (unsigned long)x;
330 x->curlft.add_time = get_seconds();
331 x->lft.soft_byte_limit = XFRM_INF;
332 x->lft.soft_packet_limit = XFRM_INF;
333 x->lft.hard_byte_limit = XFRM_INF;
334 x->lft.hard_packet_limit = XFRM_INF;
335 x->replay_maxage = 0;
336 x->replay_maxdiff = 0;
337 spin_lock_init(&x->lock);
341 EXPORT_SYMBOL(xfrm_state_alloc);
343 void __xfrm_state_destroy(struct xfrm_state *x)
345 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
347 spin_lock_bh(&xfrm_state_gc_lock);
348 hlist_add_head(&x->bydst, &xfrm_state_gc_list);
349 spin_unlock_bh(&xfrm_state_gc_lock);
350 schedule_work(&xfrm_state_gc_work);
352 EXPORT_SYMBOL(__xfrm_state_destroy);
354 int __xfrm_state_delete(struct xfrm_state *x)
358 if (x->km.state != XFRM_STATE_DEAD) {
359 x->km.state = XFRM_STATE_DEAD;
360 spin_lock(&xfrm_state_lock);
361 hlist_del(&x->bydst);
362 hlist_del(&x->bysrc);
364 hlist_del(&x->byspi);
366 spin_unlock(&xfrm_state_lock);
368 /* All xfrm_state objects are created by xfrm_state_alloc.
369 * The xfrm_state_alloc call gives a reference, and that
370 * is what we are dropping here.
378 EXPORT_SYMBOL(__xfrm_state_delete);
380 int xfrm_state_delete(struct xfrm_state *x)
384 spin_lock_bh(&x->lock);
385 err = __xfrm_state_delete(x);
386 spin_unlock_bh(&x->lock);
390 EXPORT_SYMBOL(xfrm_state_delete);
392 #ifdef CONFIG_SECURITY_NETWORK_XFRM
394 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
398 for (i = 0; i <= xfrm_state_hmask; i++) {
399 struct hlist_node *entry;
400 struct xfrm_state *x;
402 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
403 if (xfrm_id_proto_match(x->id.proto, proto) &&
404 (err = security_xfrm_state_delete(x)) != 0) {
405 xfrm_audit_state_delete(x, 0,
406 audit_info->loginuid,
417 xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
423 int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
427 spin_lock_bh(&xfrm_state_lock);
428 err = xfrm_state_flush_secctx_check(proto, audit_info);
432 for (i = 0; i <= xfrm_state_hmask; i++) {
433 struct hlist_node *entry;
434 struct xfrm_state *x;
436 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
437 if (!xfrm_state_kern(x) &&
438 xfrm_id_proto_match(x->id.proto, proto)) {
440 spin_unlock_bh(&xfrm_state_lock);
442 err = xfrm_state_delete(x);
443 xfrm_audit_state_delete(x, err ? 0 : 1,
444 audit_info->loginuid,
448 spin_lock_bh(&xfrm_state_lock);
456 spin_unlock_bh(&xfrm_state_lock);
460 EXPORT_SYMBOL(xfrm_state_flush);
462 void xfrm_sad_getinfo(struct xfrmk_sadinfo *si)
464 spin_lock_bh(&xfrm_state_lock);
465 si->sadcnt = xfrm_state_num;
466 si->sadhcnt = xfrm_state_hmask;
467 si->sadhmcnt = xfrm_state_hashmax;
468 spin_unlock_bh(&xfrm_state_lock);
470 EXPORT_SYMBOL(xfrm_sad_getinfo);
473 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
474 struct xfrm_tmpl *tmpl,
475 xfrm_address_t *daddr, xfrm_address_t *saddr,
476 unsigned short family)
478 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
481 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
482 xfrm_state_put_afinfo(afinfo);
486 static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
488 unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
489 struct xfrm_state *x;
490 struct hlist_node *entry;
492 hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
493 if (x->props.family != family ||
495 x->id.proto != proto)
500 if (x->id.daddr.a4 != daddr->a4)
504 if (!ipv6_addr_equal((struct in6_addr *)daddr,
518 static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
520 unsigned int h = xfrm_src_hash(daddr, saddr, family);
521 struct xfrm_state *x;
522 struct hlist_node *entry;
524 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
525 if (x->props.family != family ||
526 x->id.proto != proto)
531 if (x->id.daddr.a4 != daddr->a4 ||
532 x->props.saddr.a4 != saddr->a4)
536 if (!ipv6_addr_equal((struct in6_addr *)daddr,
539 !ipv6_addr_equal((struct in6_addr *)saddr,
553 static inline struct xfrm_state *
554 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
557 return __xfrm_state_lookup(&x->id.daddr, x->id.spi,
558 x->id.proto, family);
560 return __xfrm_state_lookup_byaddr(&x->id.daddr,
562 x->id.proto, family);
565 static void xfrm_hash_grow_check(int have_hash_collision)
567 if (have_hash_collision &&
568 (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
569 xfrm_state_num > xfrm_state_hmask)
570 schedule_work(&xfrm_hash_work);
574 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
575 struct flowi *fl, struct xfrm_tmpl *tmpl,
576 struct xfrm_policy *pol, int *err,
577 unsigned short family)
579 unsigned int h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
580 struct hlist_node *entry;
581 struct xfrm_state *x, *x0;
582 int acquire_in_progress = 0;
584 struct xfrm_state *best = NULL;
586 spin_lock_bh(&xfrm_state_lock);
587 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
588 if (x->props.family == family &&
589 x->props.reqid == tmpl->reqid &&
590 !(x->props.flags & XFRM_STATE_WILDRECV) &&
591 xfrm_state_addr_check(x, daddr, saddr, family) &&
592 tmpl->mode == x->props.mode &&
593 tmpl->id.proto == x->id.proto &&
594 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) {
596 1. There is a valid state with matching selector.
598 2. Valid state with inappropriate selector. Skip.
600 Entering area of "sysdeps".
602 3. If state is not valid, selector is temporary,
603 it selects only session which triggered
604 previous resolution. Key manager will do
605 something to install a state with proper
608 if (x->km.state == XFRM_STATE_VALID) {
609 if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
610 !security_xfrm_state_pol_flow_match(x, pol, fl))
613 best->km.dying > x->km.dying ||
614 (best->km.dying == x->km.dying &&
615 best->curlft.add_time < x->curlft.add_time))
617 } else if (x->km.state == XFRM_STATE_ACQ) {
618 acquire_in_progress = 1;
619 } else if (x->km.state == XFRM_STATE_ERROR ||
620 x->km.state == XFRM_STATE_EXPIRED) {
621 if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
622 security_xfrm_state_pol_flow_match(x, pol, fl))
629 if (!x && !error && !acquire_in_progress) {
631 (x0 = __xfrm_state_lookup(daddr, tmpl->id.spi,
632 tmpl->id.proto, family)) != NULL) {
637 x = xfrm_state_alloc();
642 /* Initialize temporary selector matching only
643 * to current session. */
644 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
646 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
648 x->km.state = XFRM_STATE_DEAD;
654 if (km_query(x, tmpl, pol) == 0) {
655 x->km.state = XFRM_STATE_ACQ;
656 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
657 h = xfrm_src_hash(daddr, saddr, family);
658 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
660 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
661 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
663 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
664 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
665 add_timer(&x->timer);
667 xfrm_hash_grow_check(x->bydst.next != NULL);
669 x->km.state = XFRM_STATE_DEAD;
679 *err = acquire_in_progress ? -EAGAIN : error;
680 spin_unlock_bh(&xfrm_state_lock);
685 xfrm_stateonly_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
686 unsigned short family, u8 mode, u8 proto, u32 reqid)
688 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
689 struct xfrm_state *rx = NULL, *x = NULL;
690 struct hlist_node *entry;
692 spin_lock(&xfrm_state_lock);
693 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
694 if (x->props.family == family &&
695 x->props.reqid == reqid &&
696 !(x->props.flags & XFRM_STATE_WILDRECV) &&
697 xfrm_state_addr_check(x, daddr, saddr, family) &&
698 mode == x->props.mode &&
699 proto == x->id.proto &&
700 x->km.state == XFRM_STATE_VALID) {
708 spin_unlock(&xfrm_state_lock);
713 EXPORT_SYMBOL(xfrm_stateonly_find);
715 static void __xfrm_state_insert(struct xfrm_state *x)
719 x->genid = ++xfrm_state_genid;
721 h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
722 x->props.reqid, x->props.family);
723 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
725 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
726 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
729 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
732 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
735 mod_timer(&x->timer, jiffies + HZ);
736 if (x->replay_maxage)
737 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
743 xfrm_hash_grow_check(x->bydst.next != NULL);
746 /* xfrm_state_lock is held */
747 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
749 unsigned short family = xnew->props.family;
750 u32 reqid = xnew->props.reqid;
751 struct xfrm_state *x;
752 struct hlist_node *entry;
755 h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
756 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
757 if (x->props.family == family &&
758 x->props.reqid == reqid &&
759 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
760 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
761 x->genid = xfrm_state_genid;
765 void xfrm_state_insert(struct xfrm_state *x)
767 spin_lock_bh(&xfrm_state_lock);
768 __xfrm_state_bump_genids(x);
769 __xfrm_state_insert(x);
770 spin_unlock_bh(&xfrm_state_lock);
772 EXPORT_SYMBOL(xfrm_state_insert);
774 /* xfrm_state_lock is held */
775 static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
777 unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
778 struct hlist_node *entry;
779 struct xfrm_state *x;
781 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
782 if (x->props.reqid != reqid ||
783 x->props.mode != mode ||
784 x->props.family != family ||
785 x->km.state != XFRM_STATE_ACQ ||
787 x->id.proto != proto)
792 if (x->id.daddr.a4 != daddr->a4 ||
793 x->props.saddr.a4 != saddr->a4)
797 if (!ipv6_addr_equal((struct in6_addr *)x->id.daddr.a6,
798 (struct in6_addr *)daddr) ||
799 !ipv6_addr_equal((struct in6_addr *)
801 (struct in6_addr *)saddr))
813 x = xfrm_state_alloc();
817 x->sel.daddr.a4 = daddr->a4;
818 x->sel.saddr.a4 = saddr->a4;
819 x->sel.prefixlen_d = 32;
820 x->sel.prefixlen_s = 32;
821 x->props.saddr.a4 = saddr->a4;
822 x->id.daddr.a4 = daddr->a4;
826 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
827 (struct in6_addr *)daddr);
828 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
829 (struct in6_addr *)saddr);
830 x->sel.prefixlen_d = 128;
831 x->sel.prefixlen_s = 128;
832 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
833 (struct in6_addr *)saddr);
834 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
835 (struct in6_addr *)daddr);
839 x->km.state = XFRM_STATE_ACQ;
841 x->props.family = family;
842 x->props.mode = mode;
843 x->props.reqid = reqid;
844 x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires;
846 x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ;
847 add_timer(&x->timer);
848 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
849 h = xfrm_src_hash(daddr, saddr, family);
850 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
855 xfrm_hash_grow_check(x->bydst.next != NULL);
861 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
863 int xfrm_state_add(struct xfrm_state *x)
865 struct xfrm_state *x1;
868 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
870 family = x->props.family;
872 spin_lock_bh(&xfrm_state_lock);
874 x1 = __xfrm_state_locate(x, use_spi, family);
882 if (use_spi && x->km.seq) {
883 x1 = __xfrm_find_acq_byseq(x->km.seq);
884 if (x1 && ((x1->id.proto != x->id.proto) ||
885 xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
892 x1 = __find_acq_core(family, x->props.mode, x->props.reqid,
894 &x->id.daddr, &x->props.saddr, 0);
896 __xfrm_state_bump_genids(x);
897 __xfrm_state_insert(x);
901 spin_unlock_bh(&xfrm_state_lock);
904 xfrm_state_delete(x1);
910 EXPORT_SYMBOL(xfrm_state_add);
912 #ifdef CONFIG_XFRM_MIGRATE
913 struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
916 struct xfrm_state *x = xfrm_state_alloc();
920 memcpy(&x->id, &orig->id, sizeof(x->id));
921 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
922 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
923 x->props.mode = orig->props.mode;
924 x->props.replay_window = orig->props.replay_window;
925 x->props.reqid = orig->props.reqid;
926 x->props.family = orig->props.family;
927 x->props.saddr = orig->props.saddr;
930 x->aalg = xfrm_algo_clone(orig->aalg);
934 x->props.aalgo = orig->props.aalgo;
937 x->ealg = xfrm_algo_clone(orig->ealg);
941 x->props.ealgo = orig->props.ealgo;
944 x->calg = xfrm_algo_clone(orig->calg);
948 x->props.calgo = orig->props.calgo;
951 x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
957 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
963 err = xfrm_init_state(x);
967 x->props.flags = orig->props.flags;
969 x->curlft.add_time = orig->curlft.add_time;
970 x->km.state = orig->km.state;
971 x->km.seq = orig->km.seq;
988 EXPORT_SYMBOL(xfrm_state_clone);
990 /* xfrm_state_lock is held */
991 struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
994 struct xfrm_state *x;
995 struct hlist_node *entry;
998 h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
999 m->reqid, m->old_family);
1000 hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
1001 if (x->props.mode != m->mode ||
1002 x->id.proto != m->proto)
1004 if (m->reqid && x->props.reqid != m->reqid)
1006 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1008 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1015 h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
1017 hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
1018 if (x->props.mode != m->mode ||
1019 x->id.proto != m->proto)
1021 if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
1023 xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
1033 EXPORT_SYMBOL(xfrm_migrate_state_find);
1035 struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1036 struct xfrm_migrate *m)
1038 struct xfrm_state *xc;
1041 xc = xfrm_state_clone(x, &err);
1045 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1046 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1049 if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
1050 /* a care is needed when the destination address of the
1051 state is to be updated as it is a part of triplet */
1052 xfrm_state_insert(xc);
1054 if ((err = xfrm_state_add(xc)) < 0)
1063 EXPORT_SYMBOL(xfrm_state_migrate);
1066 int xfrm_state_update(struct xfrm_state *x)
1068 struct xfrm_state *x1;
1070 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1072 spin_lock_bh(&xfrm_state_lock);
1073 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1079 if (xfrm_state_kern(x1)) {
1085 if (x1->km.state == XFRM_STATE_ACQ) {
1086 __xfrm_state_insert(x);
1092 spin_unlock_bh(&xfrm_state_lock);
1098 xfrm_state_delete(x1);
1104 spin_lock_bh(&x1->lock);
1105 if (likely(x1->km.state == XFRM_STATE_VALID)) {
1106 if (x->encap && x1->encap)
1107 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1108 if (x->coaddr && x1->coaddr) {
1109 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1111 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1112 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1113 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1116 mod_timer(&x1->timer, jiffies + HZ);
1117 if (x1->curlft.use_time)
1118 xfrm_state_check_expire(x1);
1122 spin_unlock_bh(&x1->lock);
1128 EXPORT_SYMBOL(xfrm_state_update);
1130 int xfrm_state_check_expire(struct xfrm_state *x)
1132 if (!x->curlft.use_time)
1133 x->curlft.use_time = get_seconds();
1135 if (x->km.state != XFRM_STATE_VALID)
1138 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1139 x->curlft.packets >= x->lft.hard_packet_limit) {
1140 x->km.state = XFRM_STATE_EXPIRED;
1141 mod_timer(&x->timer, jiffies);
1146 (x->curlft.bytes >= x->lft.soft_byte_limit ||
1147 x->curlft.packets >= x->lft.soft_packet_limit)) {
1149 km_state_expired(x, 0, 0);
1153 EXPORT_SYMBOL(xfrm_state_check_expire);
1156 xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto,
1157 unsigned short family)
1159 struct xfrm_state *x;
1161 spin_lock_bh(&xfrm_state_lock);
1162 x = __xfrm_state_lookup(daddr, spi, proto, family);
1163 spin_unlock_bh(&xfrm_state_lock);
1166 EXPORT_SYMBOL(xfrm_state_lookup);
1169 xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr,
1170 u8 proto, unsigned short family)
1172 struct xfrm_state *x;
1174 spin_lock_bh(&xfrm_state_lock);
1175 x = __xfrm_state_lookup_byaddr(daddr, saddr, proto, family);
1176 spin_unlock_bh(&xfrm_state_lock);
1179 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1182 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1183 xfrm_address_t *daddr, xfrm_address_t *saddr,
1184 int create, unsigned short family)
1186 struct xfrm_state *x;
1188 spin_lock_bh(&xfrm_state_lock);
1189 x = __find_acq_core(family, mode, reqid, proto, daddr, saddr, create);
1190 spin_unlock_bh(&xfrm_state_lock);
1194 EXPORT_SYMBOL(xfrm_find_acq);
1196 #ifdef CONFIG_XFRM_SUB_POLICY
1198 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1199 unsigned short family)
1202 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1204 return -EAFNOSUPPORT;
1206 spin_lock_bh(&xfrm_state_lock);
1207 if (afinfo->tmpl_sort)
1208 err = afinfo->tmpl_sort(dst, src, n);
1209 spin_unlock_bh(&xfrm_state_lock);
1210 xfrm_state_put_afinfo(afinfo);
1213 EXPORT_SYMBOL(xfrm_tmpl_sort);
1216 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1217 unsigned short family)
1220 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1222 return -EAFNOSUPPORT;
1224 spin_lock_bh(&xfrm_state_lock);
1225 if (afinfo->state_sort)
1226 err = afinfo->state_sort(dst, src, n);
1227 spin_unlock_bh(&xfrm_state_lock);
1228 xfrm_state_put_afinfo(afinfo);
1231 EXPORT_SYMBOL(xfrm_state_sort);
1234 /* Silly enough, but I'm lazy to build resolution list */
1236 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
1240 for (i = 0; i <= xfrm_state_hmask; i++) {
1241 struct hlist_node *entry;
1242 struct xfrm_state *x;
1244 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1245 if (x->km.seq == seq &&
1246 x->km.state == XFRM_STATE_ACQ) {
1255 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
1257 struct xfrm_state *x;
1259 spin_lock_bh(&xfrm_state_lock);
1260 x = __xfrm_find_acq_byseq(seq);
1261 spin_unlock_bh(&xfrm_state_lock);
1264 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1266 u32 xfrm_get_acqseq(void)
1270 static DEFINE_SPINLOCK(acqseq_lock);
1272 spin_lock_bh(&acqseq_lock);
1273 res = (++acqseq ? : ++acqseq);
1274 spin_unlock_bh(&acqseq_lock);
1277 EXPORT_SYMBOL(xfrm_get_acqseq);
1280 xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi)
1283 struct xfrm_state *x0;
1288 if (minspi == maxspi) {
1289 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
1297 u32 low = ntohl(minspi);
1298 u32 high = ntohl(maxspi);
1299 for (h=0; h<high-low+1; h++) {
1300 spi = low + net_random()%(high-low+1);
1301 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1303 x->id.spi = htonl(spi);
1310 spin_lock_bh(&xfrm_state_lock);
1311 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1312 hlist_add_head(&x->byspi, xfrm_state_byspi+h);
1313 spin_unlock_bh(&xfrm_state_lock);
1317 EXPORT_SYMBOL(xfrm_alloc_spi);
1319 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
1323 struct xfrm_state *x, *last = NULL;
1324 struct hlist_node *entry;
1328 spin_lock_bh(&xfrm_state_lock);
1329 for (i = 0; i <= xfrm_state_hmask; i++) {
1330 hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
1331 if (!xfrm_id_proto_match(x->id.proto, proto))
1334 err = func(last, count, data);
1346 err = func(last, 0, data);
1348 spin_unlock_bh(&xfrm_state_lock);
1351 EXPORT_SYMBOL(xfrm_state_walk);
1354 void xfrm_replay_notify(struct xfrm_state *x, int event)
1357 /* we send notify messages in case
1358 * 1. we updated on of the sequence numbers, and the seqno difference
1359 * is at least x->replay_maxdiff, in this case we also update the
1360 * timeout of our timer function
1361 * 2. if x->replay_maxage has elapsed since last update,
1362 * and there were changes
1364 * The state structure must be locked!
1368 case XFRM_REPLAY_UPDATE:
1369 if (x->replay_maxdiff &&
1370 (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
1371 (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
1372 if (x->xflags & XFRM_TIME_DEFER)
1373 event = XFRM_REPLAY_TIMEOUT;
1380 case XFRM_REPLAY_TIMEOUT:
1381 if ((x->replay.seq == x->preplay.seq) &&
1382 (x->replay.bitmap == x->preplay.bitmap) &&
1383 (x->replay.oseq == x->preplay.oseq)) {
1384 x->xflags |= XFRM_TIME_DEFER;
1391 memcpy(&x->preplay, &x->replay, sizeof(struct xfrm_replay_state));
1392 c.event = XFRM_MSG_NEWAE;
1393 c.data.aevent = event;
1394 km_state_notify(x, &c);
1396 if (x->replay_maxage &&
1397 !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
1398 x->xflags &= ~XFRM_TIME_DEFER;
1401 static void xfrm_replay_timer_handler(unsigned long data)
1403 struct xfrm_state *x = (struct xfrm_state*)data;
1405 spin_lock(&x->lock);
1407 if (x->km.state == XFRM_STATE_VALID) {
1408 if (xfrm_aevent_is_on())
1409 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
1411 x->xflags |= XFRM_TIME_DEFER;
1414 spin_unlock(&x->lock);
1417 int xfrm_replay_check(struct xfrm_state *x, __be32 net_seq)
1420 u32 seq = ntohl(net_seq);
1422 if (unlikely(seq == 0))
1425 if (likely(seq > x->replay.seq))
1428 diff = x->replay.seq - seq;
1429 if (diff >= min_t(unsigned int, x->props.replay_window,
1430 sizeof(x->replay.bitmap) * 8)) {
1431 x->stats.replay_window++;
1435 if (x->replay.bitmap & (1U << diff)) {
1441 EXPORT_SYMBOL(xfrm_replay_check);
1443 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
1446 u32 seq = ntohl(net_seq);
1448 if (seq > x->replay.seq) {
1449 diff = seq - x->replay.seq;
1450 if (diff < x->props.replay_window)
1451 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
1453 x->replay.bitmap = 1;
1454 x->replay.seq = seq;
1456 diff = x->replay.seq - seq;
1457 x->replay.bitmap |= (1U << diff);
1460 if (xfrm_aevent_is_on())
1461 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
1463 EXPORT_SYMBOL(xfrm_replay_advance);
1465 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
1466 static DEFINE_RWLOCK(xfrm_km_lock);
1468 void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
1470 struct xfrm_mgr *km;
1472 read_lock(&xfrm_km_lock);
1473 list_for_each_entry(km, &xfrm_km_list, list)
1474 if (km->notify_policy)
1475 km->notify_policy(xp, dir, c);
1476 read_unlock(&xfrm_km_lock);
1479 void km_state_notify(struct xfrm_state *x, struct km_event *c)
1481 struct xfrm_mgr *km;
1482 read_lock(&xfrm_km_lock);
1483 list_for_each_entry(km, &xfrm_km_list, list)
1486 read_unlock(&xfrm_km_lock);
1489 EXPORT_SYMBOL(km_policy_notify);
1490 EXPORT_SYMBOL(km_state_notify);
1492 void km_state_expired(struct xfrm_state *x, int hard, u32 pid)
1498 c.event = XFRM_MSG_EXPIRE;
1499 km_state_notify(x, &c);
1505 EXPORT_SYMBOL(km_state_expired);
1507 * We send to all registered managers regardless of failure
1508 * We are happy with one success
1510 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1512 int err = -EINVAL, acqret;
1513 struct xfrm_mgr *km;
1515 read_lock(&xfrm_km_lock);
1516 list_for_each_entry(km, &xfrm_km_list, list) {
1517 acqret = km->acquire(x, t, pol, XFRM_POLICY_OUT);
1521 read_unlock(&xfrm_km_lock);
1524 EXPORT_SYMBOL(km_query);
1526 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1529 struct xfrm_mgr *km;
1531 read_lock(&xfrm_km_lock);
1532 list_for_each_entry(km, &xfrm_km_list, list) {
1533 if (km->new_mapping)
1534 err = km->new_mapping(x, ipaddr, sport);
1538 read_unlock(&xfrm_km_lock);
1541 EXPORT_SYMBOL(km_new_mapping);
1543 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
1549 c.event = XFRM_MSG_POLEXPIRE;
1550 km_policy_notify(pol, dir, &c);
1555 EXPORT_SYMBOL(km_policy_expired);
1557 int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1558 struct xfrm_migrate *m, int num_migrate)
1562 struct xfrm_mgr *km;
1564 read_lock(&xfrm_km_lock);
1565 list_for_each_entry(km, &xfrm_km_list, list) {
1567 ret = km->migrate(sel, dir, type, m, num_migrate);
1572 read_unlock(&xfrm_km_lock);
1575 EXPORT_SYMBOL(km_migrate);
1577 int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
1581 struct xfrm_mgr *km;
1583 read_lock(&xfrm_km_lock);
1584 list_for_each_entry(km, &xfrm_km_list, list) {
1586 ret = km->report(proto, sel, addr);
1591 read_unlock(&xfrm_km_lock);
1594 EXPORT_SYMBOL(km_report);
1596 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1600 struct xfrm_mgr *km;
1601 struct xfrm_policy *pol = NULL;
1603 if (optlen <= 0 || optlen > PAGE_SIZE)
1606 data = kmalloc(optlen, GFP_KERNEL);
1611 if (copy_from_user(data, optval, optlen))
1615 read_lock(&xfrm_km_lock);
1616 list_for_each_entry(km, &xfrm_km_list, list) {
1617 pol = km->compile_policy(sk, optname, data,
1622 read_unlock(&xfrm_km_lock);
1625 xfrm_sk_policy_insert(sk, err, pol);
1634 EXPORT_SYMBOL(xfrm_user_policy);
1636 int xfrm_register_km(struct xfrm_mgr *km)
1638 write_lock_bh(&xfrm_km_lock);
1639 list_add_tail(&km->list, &xfrm_km_list);
1640 write_unlock_bh(&xfrm_km_lock);
1643 EXPORT_SYMBOL(xfrm_register_km);
1645 int xfrm_unregister_km(struct xfrm_mgr *km)
1647 write_lock_bh(&xfrm_km_lock);
1648 list_del(&km->list);
1649 write_unlock_bh(&xfrm_km_lock);
1652 EXPORT_SYMBOL(xfrm_unregister_km);
1654 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
1657 if (unlikely(afinfo == NULL))
1659 if (unlikely(afinfo->family >= NPROTO))
1660 return -EAFNOSUPPORT;
1661 write_lock_bh(&xfrm_state_afinfo_lock);
1662 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
1665 xfrm_state_afinfo[afinfo->family] = afinfo;
1666 write_unlock_bh(&xfrm_state_afinfo_lock);
1669 EXPORT_SYMBOL(xfrm_state_register_afinfo);
1671 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1674 if (unlikely(afinfo == NULL))
1676 if (unlikely(afinfo->family >= NPROTO))
1677 return -EAFNOSUPPORT;
1678 write_lock_bh(&xfrm_state_afinfo_lock);
1679 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
1680 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
1683 xfrm_state_afinfo[afinfo->family] = NULL;
1685 write_unlock_bh(&xfrm_state_afinfo_lock);
1688 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1690 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
1692 struct xfrm_state_afinfo *afinfo;
1693 if (unlikely(family >= NPROTO))
1695 read_lock(&xfrm_state_afinfo_lock);
1696 afinfo = xfrm_state_afinfo[family];
1697 if (unlikely(!afinfo))
1698 read_unlock(&xfrm_state_afinfo_lock);
1702 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1704 read_unlock(&xfrm_state_afinfo_lock);
1707 EXPORT_SYMBOL(xfrm_state_get_afinfo);
1708 EXPORT_SYMBOL(xfrm_state_put_afinfo);
1710 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
1711 void xfrm_state_delete_tunnel(struct xfrm_state *x)
1714 struct xfrm_state *t = x->tunnel;
1716 if (atomic_read(&t->tunnel_users) == 2)
1717 xfrm_state_delete(t);
1718 atomic_dec(&t->tunnel_users);
1723 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1725 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1729 spin_lock_bh(&x->lock);
1730 if (x->km.state == XFRM_STATE_VALID &&
1731 x->type && x->type->get_mtu)
1732 res = x->type->get_mtu(x, mtu);
1734 res = mtu - x->props.header_len;
1735 spin_unlock_bh(&x->lock);
1739 int xfrm_init_state(struct xfrm_state *x)
1741 struct xfrm_state_afinfo *afinfo;
1742 int family = x->props.family;
1745 err = -EAFNOSUPPORT;
1746 afinfo = xfrm_state_get_afinfo(family);
1751 if (afinfo->init_flags)
1752 err = afinfo->init_flags(x);
1754 xfrm_state_put_afinfo(afinfo);
1759 err = -EPROTONOSUPPORT;
1760 x->type = xfrm_get_type(x->id.proto, family);
1761 if (x->type == NULL)
1764 err = x->type->init_state(x);
1768 x->mode = xfrm_get_mode(x->props.mode, family);
1769 if (x->mode == NULL)
1772 x->km.state = XFRM_STATE_VALID;
1778 EXPORT_SYMBOL(xfrm_init_state);
1780 void __init xfrm_state_init(void)
1784 sz = sizeof(struct hlist_head) * 8;
1786 xfrm_state_bydst = xfrm_hash_alloc(sz);
1787 xfrm_state_bysrc = xfrm_hash_alloc(sz);
1788 xfrm_state_byspi = xfrm_hash_alloc(sz);
1789 if (!xfrm_state_bydst || !xfrm_state_bysrc || !xfrm_state_byspi)
1790 panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
1791 xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
1793 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
1796 #ifdef CONFIG_AUDITSYSCALL
1797 static inline void xfrm_audit_common_stateinfo(struct xfrm_state *x,
1798 struct audit_buffer *audit_buf)
1801 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
1802 x->security->ctx_alg, x->security->ctx_doi,
1803 x->security->ctx_str);
1805 switch(x->props.family) {
1807 audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
1808 NIPQUAD(x->props.saddr.a4),
1809 NIPQUAD(x->id.daddr.a4));
1813 struct in6_addr saddr6, daddr6;
1815 memcpy(&saddr6, x->props.saddr.a6,
1816 sizeof(struct in6_addr));
1817 memcpy(&daddr6, x->id.daddr.a6,
1818 sizeof(struct in6_addr));
1819 audit_log_format(audit_buf,
1820 " src=" NIP6_FMT " dst=" NIP6_FMT,
1821 NIP6(saddr6), NIP6(daddr6));
1828 xfrm_audit_state_add(struct xfrm_state *x, int result, u32 auid, u32 sid)
1830 struct audit_buffer *audit_buf;
1831 extern int audit_enabled;
1833 if (audit_enabled == 0)
1835 audit_buf = xfrm_audit_start(sid, auid);
1836 if (audit_buf == NULL)
1838 audit_log_format(audit_buf, " op=SAD-add res=%u",result);
1839 xfrm_audit_common_stateinfo(x, audit_buf);
1840 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
1841 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
1842 audit_log_end(audit_buf);
1844 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
1847 xfrm_audit_state_delete(struct xfrm_state *x, int result, u32 auid, u32 sid)
1849 struct audit_buffer *audit_buf;
1850 extern int audit_enabled;
1852 if (audit_enabled == 0)
1854 audit_buf = xfrm_audit_start(sid, auid);
1855 if (audit_buf == NULL)
1857 audit_log_format(audit_buf, " op=SAD-delete res=%u",result);
1858 xfrm_audit_common_stateinfo(x, audit_buf);
1859 audit_log_format(audit_buf, " spi=%lu(0x%lx)",
1860 (unsigned long)x->id.spi, (unsigned long)x->id.spi);
1861 audit_log_end(audit_buf);
1863 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
1864 #endif /* CONFIG_AUDITSYSCALL */