1 /* xfrm_user.c: User interface to configure xfrm engine.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
13 #include <linux/crypto.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/socket.h>
19 #include <linux/string.h>
20 #include <linux/net.h>
21 #include <linux/skbuff.h>
22 #include <linux/pfkeyv2.h>
23 #include <linux/ipsec.h>
24 #include <linux/init.h>
25 #include <linux/security.h>
28 #include <net/netlink.h>
29 #include <asm/uaccess.h>
30 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
31 #include <linux/in6.h>
34 static inline int aead_len(struct xfrm_algo_aead *alg)
36 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
39 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
41 struct nlattr *rt = attrs[type];
42 struct xfrm_algo *algp;
48 if (nla_len(rt) < xfrm_alg_len(algp))
53 if (!algp->alg_key_len &&
54 strcmp(algp->alg_name, "digest_null") != 0)
59 if (!algp->alg_key_len &&
60 strcmp(algp->alg_name, "cipher_null") != 0)
65 /* Zero length keys are legal. */
72 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
76 static int verify_aead(struct nlattr **attrs)
78 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
79 struct xfrm_algo_aead *algp;
85 if (nla_len(rt) < aead_len(algp))
88 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
92 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
93 xfrm_address_t **addrp)
95 struct nlattr *rt = attrs[type];
98 *addrp = nla_data(rt);
101 static inline int verify_sec_ctx_len(struct nlattr **attrs)
103 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
104 struct xfrm_user_sec_ctx *uctx;
110 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
117 static int verify_newsa_info(struct xfrm_usersa_info *p,
118 struct nlattr **attrs)
128 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
140 switch (p->id.proto) {
142 if (!attrs[XFRMA_ALG_AUTH] ||
143 attrs[XFRMA_ALG_AEAD] ||
144 attrs[XFRMA_ALG_CRYPT] ||
145 attrs[XFRMA_ALG_COMP])
150 if (attrs[XFRMA_ALG_COMP])
152 if (!attrs[XFRMA_ALG_AUTH] &&
153 !attrs[XFRMA_ALG_CRYPT] &&
154 !attrs[XFRMA_ALG_AEAD])
156 if ((attrs[XFRMA_ALG_AUTH] ||
157 attrs[XFRMA_ALG_CRYPT]) &&
158 attrs[XFRMA_ALG_AEAD])
163 if (!attrs[XFRMA_ALG_COMP] ||
164 attrs[XFRMA_ALG_AEAD] ||
165 attrs[XFRMA_ALG_AUTH] ||
166 attrs[XFRMA_ALG_CRYPT])
170 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
171 case IPPROTO_DSTOPTS:
172 case IPPROTO_ROUTING:
173 if (attrs[XFRMA_ALG_COMP] ||
174 attrs[XFRMA_ALG_AUTH] ||
175 attrs[XFRMA_ALG_AEAD] ||
176 attrs[XFRMA_ALG_CRYPT] ||
177 attrs[XFRMA_ENCAP] ||
178 attrs[XFRMA_SEC_CTX] ||
179 !attrs[XFRMA_COADDR])
188 if ((err = verify_aead(attrs)))
190 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
192 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
194 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
196 if ((err = verify_sec_ctx_len(attrs)))
201 case XFRM_MODE_TRANSPORT:
202 case XFRM_MODE_TUNNEL:
203 case XFRM_MODE_ROUTEOPTIMIZATION:
217 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
218 struct xfrm_algo_desc *(*get_byname)(char *, int),
221 struct xfrm_algo *p, *ualg;
222 struct xfrm_algo_desc *algo;
227 ualg = nla_data(rta);
229 algo = get_byname(ualg->alg_name, 1);
232 *props = algo->desc.sadb_alg_id;
234 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
238 strcpy(p->alg_name, algo->name);
243 static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
246 struct xfrm_algo_aead *p, *ualg;
247 struct xfrm_algo_desc *algo;
252 ualg = nla_data(rta);
254 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
257 *props = algo->desc.sadb_alg_id;
259 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
263 strcpy(p->alg_name, algo->name);
268 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
273 len += sizeof(struct xfrm_user_sec_ctx);
274 len += xfrm_ctx->ctx_len;
279 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
281 memcpy(&x->id, &p->id, sizeof(x->id));
282 memcpy(&x->sel, &p->sel, sizeof(x->sel));
283 memcpy(&x->lft, &p->lft, sizeof(x->lft));
284 x->props.mode = p->mode;
285 x->props.replay_window = p->replay_window;
286 x->props.reqid = p->reqid;
287 x->props.family = p->family;
288 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
289 x->props.flags = p->flags;
292 x->sel.family = p->family;
297 * someday when pfkey also has support, we could have the code
298 * somehow made shareable and move it to xfrm_state.c - JHS
301 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
303 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
304 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
305 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
306 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
309 struct xfrm_replay_state *replay;
310 replay = nla_data(rp);
311 memcpy(&x->replay, replay, sizeof(*replay));
312 memcpy(&x->preplay, replay, sizeof(*replay));
316 struct xfrm_lifetime_cur *ltime;
317 ltime = nla_data(lt);
318 x->curlft.bytes = ltime->bytes;
319 x->curlft.packets = ltime->packets;
320 x->curlft.add_time = ltime->add_time;
321 x->curlft.use_time = ltime->use_time;
325 x->replay_maxage = nla_get_u32(et);
328 x->replay_maxdiff = nla_get_u32(rt);
331 static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
332 struct nlattr **attrs,
335 struct xfrm_state *x = xfrm_state_alloc();
341 copy_from_user_state(x, p);
343 if ((err = attach_aead(&x->aead, &x->props.ealgo,
344 attrs[XFRMA_ALG_AEAD])))
346 if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
347 xfrm_aalg_get_byname,
348 attrs[XFRMA_ALG_AUTH])))
350 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
351 xfrm_ealg_get_byname,
352 attrs[XFRMA_ALG_CRYPT])))
354 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
355 xfrm_calg_get_byname,
356 attrs[XFRMA_ALG_COMP])))
359 if (attrs[XFRMA_ENCAP]) {
360 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
361 sizeof(*x->encap), GFP_KERNEL);
362 if (x->encap == NULL)
366 if (attrs[XFRMA_COADDR]) {
367 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
368 sizeof(*x->coaddr), GFP_KERNEL);
369 if (x->coaddr == NULL)
373 err = xfrm_init_state(x);
377 if (attrs[XFRMA_SEC_CTX] &&
378 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
382 x->replay_maxdiff = sysctl_xfrm_aevent_rseqth;
383 /* sysctl_xfrm_aevent_etime is in 100ms units */
384 x->replay_maxage = (sysctl_xfrm_aevent_etime*HZ)/XFRM_AE_ETH_M;
385 x->preplay.bitmap = 0;
386 x->preplay.seq = x->replay.seq+x->replay_maxdiff;
387 x->preplay.oseq = x->replay.oseq +x->replay_maxdiff;
389 /* override default values from above */
391 xfrm_update_ae_params(x, attrs);
396 x->km.state = XFRM_STATE_DEAD;
403 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
404 struct nlattr **attrs)
406 struct xfrm_usersa_info *p = nlmsg_data(nlh);
407 struct xfrm_state *x;
410 uid_t loginuid = NETLINK_CB(skb).loginuid;
411 u32 sessionid = NETLINK_CB(skb).sessionid;
412 u32 sid = NETLINK_CB(skb).sid;
414 err = verify_newsa_info(p, attrs);
418 x = xfrm_state_construct(p, attrs, &err);
423 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
424 err = xfrm_state_add(x);
426 err = xfrm_state_update(x);
428 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
431 x->km.state = XFRM_STATE_DEAD;
436 c.seq = nlh->nlmsg_seq;
437 c.pid = nlh->nlmsg_pid;
438 c.event = nlh->nlmsg_type;
440 km_state_notify(x, &c);
446 static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p,
447 struct nlattr **attrs,
450 struct xfrm_state *x = NULL;
453 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
455 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
457 xfrm_address_t *saddr = NULL;
459 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
466 x = xfrm_state_lookup_byaddr(&p->daddr, saddr, p->proto,
476 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
477 struct nlattr **attrs)
479 struct xfrm_state *x;
482 struct xfrm_usersa_id *p = nlmsg_data(nlh);
483 uid_t loginuid = NETLINK_CB(skb).loginuid;
484 u32 sessionid = NETLINK_CB(skb).sessionid;
485 u32 sid = NETLINK_CB(skb).sid;
487 x = xfrm_user_state_lookup(p, attrs, &err);
491 if ((err = security_xfrm_state_delete(x)) != 0)
494 if (xfrm_state_kern(x)) {
499 err = xfrm_state_delete(x);
504 c.seq = nlh->nlmsg_seq;
505 c.pid = nlh->nlmsg_pid;
506 c.event = nlh->nlmsg_type;
507 km_state_notify(x, &c);
510 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
515 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
517 memcpy(&p->id, &x->id, sizeof(p->id));
518 memcpy(&p->sel, &x->sel, sizeof(p->sel));
519 memcpy(&p->lft, &x->lft, sizeof(p->lft));
520 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
521 memcpy(&p->stats, &x->stats, sizeof(p->stats));
522 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
523 p->mode = x->props.mode;
524 p->replay_window = x->props.replay_window;
525 p->reqid = x->props.reqid;
526 p->family = x->props.family;
527 p->flags = x->props.flags;
531 struct xfrm_dump_info {
532 struct sk_buff *in_skb;
533 struct sk_buff *out_skb;
538 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
540 struct xfrm_user_sec_ctx *uctx;
542 int ctx_size = sizeof(*uctx) + s->ctx_len;
544 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
548 uctx = nla_data(attr);
549 uctx->exttype = XFRMA_SEC_CTX;
550 uctx->len = ctx_size;
551 uctx->ctx_doi = s->ctx_doi;
552 uctx->ctx_alg = s->ctx_alg;
553 uctx->ctx_len = s->ctx_len;
554 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
559 /* Don't change this without updating xfrm_sa_len! */
560 static int copy_to_user_state_extra(struct xfrm_state *x,
561 struct xfrm_usersa_info *p,
564 copy_to_user_state(x, p);
567 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
570 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused);
573 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
575 NLA_PUT(skb, XFRMA_ALG_AUTH, xfrm_alg_len(x->aalg), x->aalg);
577 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
579 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
582 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
584 if (x->security && copy_sec_ctx(x->security, skb) < 0)
585 goto nla_put_failure;
593 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
595 struct xfrm_dump_info *sp = ptr;
596 struct sk_buff *in_skb = sp->in_skb;
597 struct sk_buff *skb = sp->out_skb;
598 struct xfrm_usersa_info *p;
599 struct nlmsghdr *nlh;
602 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
603 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
609 err = copy_to_user_state_extra(x, p, skb);
611 goto nla_put_failure;
617 nlmsg_cancel(skb, nlh);
621 static int xfrm_dump_sa_done(struct netlink_callback *cb)
623 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
624 xfrm_state_walk_done(walk);
628 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
630 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
631 struct xfrm_dump_info info;
633 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
634 sizeof(cb->args) - sizeof(cb->args[0]));
636 info.in_skb = cb->skb;
638 info.nlmsg_seq = cb->nlh->nlmsg_seq;
639 info.nlmsg_flags = NLM_F_MULTI;
643 xfrm_state_walk_init(walk, 0);
646 (void) xfrm_state_walk(walk, dump_one_state, &info);
651 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
652 struct xfrm_state *x, u32 seq)
654 struct xfrm_dump_info info;
657 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
659 return ERR_PTR(-ENOMEM);
661 info.in_skb = in_skb;
663 info.nlmsg_seq = seq;
664 info.nlmsg_flags = 0;
666 if (dump_one_state(x, 0, &info)) {
674 static inline size_t xfrm_spdinfo_msgsize(void)
676 return NLMSG_ALIGN(4)
677 + nla_total_size(sizeof(struct xfrmu_spdinfo))
678 + nla_total_size(sizeof(struct xfrmu_spdhinfo));
681 static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
683 struct xfrmk_spdinfo si;
684 struct xfrmu_spdinfo spc;
685 struct xfrmu_spdhinfo sph;
686 struct nlmsghdr *nlh;
689 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
690 if (nlh == NULL) /* shouldnt really happen ... */
695 xfrm_spd_getinfo(&si);
696 spc.incnt = si.incnt;
697 spc.outcnt = si.outcnt;
698 spc.fwdcnt = si.fwdcnt;
699 spc.inscnt = si.inscnt;
700 spc.outscnt = si.outscnt;
701 spc.fwdscnt = si.fwdscnt;
702 sph.spdhcnt = si.spdhcnt;
703 sph.spdhmcnt = si.spdhmcnt;
705 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
706 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
708 return nlmsg_end(skb, nlh);
711 nlmsg_cancel(skb, nlh);
715 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
716 struct nlattr **attrs)
718 struct sk_buff *r_skb;
719 u32 *flags = nlmsg_data(nlh);
720 u32 spid = NETLINK_CB(skb).pid;
721 u32 seq = nlh->nlmsg_seq;
723 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
727 if (build_spdinfo(r_skb, spid, seq, *flags) < 0)
730 return nlmsg_unicast(xfrm_nl, r_skb, spid);
733 static inline size_t xfrm_sadinfo_msgsize(void)
735 return NLMSG_ALIGN(4)
736 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
737 + nla_total_size(4); /* XFRMA_SAD_CNT */
740 static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags)
742 struct xfrmk_sadinfo si;
743 struct xfrmu_sadhinfo sh;
744 struct nlmsghdr *nlh;
747 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
748 if (nlh == NULL) /* shouldnt really happen ... */
753 xfrm_sad_getinfo(&si);
755 sh.sadhmcnt = si.sadhmcnt;
756 sh.sadhcnt = si.sadhcnt;
758 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt);
759 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
761 return nlmsg_end(skb, nlh);
764 nlmsg_cancel(skb, nlh);
768 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
769 struct nlattr **attrs)
771 struct sk_buff *r_skb;
772 u32 *flags = nlmsg_data(nlh);
773 u32 spid = NETLINK_CB(skb).pid;
774 u32 seq = nlh->nlmsg_seq;
776 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
780 if (build_sadinfo(r_skb, spid, seq, *flags) < 0)
783 return nlmsg_unicast(xfrm_nl, r_skb, spid);
786 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
787 struct nlattr **attrs)
789 struct xfrm_usersa_id *p = nlmsg_data(nlh);
790 struct xfrm_state *x;
791 struct sk_buff *resp_skb;
794 x = xfrm_user_state_lookup(p, attrs, &err);
798 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
799 if (IS_ERR(resp_skb)) {
800 err = PTR_ERR(resp_skb);
802 err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid);
809 static int verify_userspi_info(struct xfrm_userspi_info *p)
811 switch (p->info.id.proto) {
817 /* IPCOMP spi is 16-bits. */
818 if (p->max >= 0x10000)
832 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
833 struct nlattr **attrs)
835 struct xfrm_state *x;
836 struct xfrm_userspi_info *p;
837 struct sk_buff *resp_skb;
838 xfrm_address_t *daddr;
843 err = verify_userspi_info(p);
847 family = p->info.family;
848 daddr = &p->info.id.daddr;
852 x = xfrm_find_acq_byseq(p->info.seq);
853 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
860 x = xfrm_find_acq(p->info.mode, p->info.reqid,
861 p->info.id.proto, daddr,
868 err = xfrm_alloc_spi(x, p->min, p->max);
872 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
873 if (IS_ERR(resp_skb)) {
874 err = PTR_ERR(resp_skb);
878 err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid);
886 static int verify_policy_dir(u8 dir)
890 case XFRM_POLICY_OUT:
891 case XFRM_POLICY_FWD:
901 static int verify_policy_type(u8 type)
904 case XFRM_POLICY_TYPE_MAIN:
905 #ifdef CONFIG_XFRM_SUB_POLICY
906 case XFRM_POLICY_TYPE_SUB:
917 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
921 case XFRM_SHARE_SESSION:
922 case XFRM_SHARE_USER:
923 case XFRM_SHARE_UNIQUE:
931 case XFRM_POLICY_ALLOW:
932 case XFRM_POLICY_BLOCK:
939 switch (p->sel.family) {
944 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
947 return -EAFNOSUPPORT;
954 return verify_policy_dir(p->dir);
957 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
959 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
960 struct xfrm_user_sec_ctx *uctx;
966 return security_xfrm_policy_alloc(&pol->security, uctx);
969 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
975 for (i = 0; i < nr; i++, ut++) {
976 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
978 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
979 memcpy(&t->saddr, &ut->saddr,
980 sizeof(xfrm_address_t));
981 t->reqid = ut->reqid;
983 t->share = ut->share;
984 t->optional = ut->optional;
985 t->aalgos = ut->aalgos;
986 t->ealgos = ut->ealgos;
987 t->calgos = ut->calgos;
988 /* If all masks are ~0, then we allow all algorithms. */
989 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
990 t->encap_family = ut->family;
994 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
998 if (nr > XFRM_MAX_DEPTH)
1001 for (i = 0; i < nr; i++) {
1002 /* We never validated the ut->family value, so many
1003 * applications simply leave it at zero. The check was
1004 * never made and ut->family was ignored because all
1005 * templates could be assumed to have the same family as
1006 * the policy itself. Now that we will have ipv4-in-ipv6
1007 * and ipv6-in-ipv4 tunnels, this is no longer true.
1010 ut[i].family = family;
1012 switch (ut[i].family) {
1015 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1027 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1029 struct nlattr *rt = attrs[XFRMA_TMPL];
1034 struct xfrm_user_tmpl *utmpl = nla_data(rt);
1035 int nr = nla_len(rt) / sizeof(*utmpl);
1038 err = validate_tmpl(nr, utmpl, pol->family);
1042 copy_templates(pol, utmpl, nr);
1047 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1049 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1050 struct xfrm_userpolicy_type *upt;
1051 u8 type = XFRM_POLICY_TYPE_MAIN;
1059 err = verify_policy_type(type);
1067 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1069 xp->priority = p->priority;
1070 xp->index = p->index;
1071 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1072 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1073 xp->action = p->action;
1074 xp->flags = p->flags;
1075 xp->family = p->sel.family;
1076 /* XXX xp->share = p->share; */
1079 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1081 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1082 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1083 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1084 p->priority = xp->priority;
1085 p->index = xp->index;
1086 p->sel.family = xp->family;
1088 p->action = xp->action;
1089 p->flags = xp->flags;
1090 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1093 static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1095 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL);
1103 copy_from_user_policy(xp, p);
1105 err = copy_from_user_policy_type(&xp->type, attrs);
1109 if (!(err = copy_from_user_tmpl(xp, attrs)))
1110 err = copy_from_user_sec_ctx(xp, attrs);
1118 xfrm_policy_destroy(xp);
1122 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1123 struct nlattr **attrs)
1125 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1126 struct xfrm_policy *xp;
1130 uid_t loginuid = NETLINK_CB(skb).loginuid;
1131 u32 sessionid = NETLINK_CB(skb).sessionid;
1132 u32 sid = NETLINK_CB(skb).sid;
1134 err = verify_newpolicy_info(p);
1137 err = verify_sec_ctx_len(attrs);
1141 xp = xfrm_policy_construct(p, attrs, &err);
1145 /* shouldnt excl be based on nlh flags??
1146 * Aha! this is anti-netlink really i.e more pfkey derived
1147 * in netlink excl is a flag and you wouldnt need
1148 * a type XFRM_MSG_UPDPOLICY - JHS */
1149 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1150 err = xfrm_policy_insert(p->dir, xp, excl);
1151 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
1154 security_xfrm_policy_free(xp->security);
1159 c.event = nlh->nlmsg_type;
1160 c.seq = nlh->nlmsg_seq;
1161 c.pid = nlh->nlmsg_pid;
1162 km_policy_notify(xp, p->dir, &c);
1169 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1171 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1174 if (xp->xfrm_nr == 0)
1177 for (i = 0; i < xp->xfrm_nr; i++) {
1178 struct xfrm_user_tmpl *up = &vec[i];
1179 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1181 memcpy(&up->id, &kp->id, sizeof(up->id));
1182 up->family = kp->encap_family;
1183 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1184 up->reqid = kp->reqid;
1185 up->mode = kp->mode;
1186 up->share = kp->share;
1187 up->optional = kp->optional;
1188 up->aalgos = kp->aalgos;
1189 up->ealgos = kp->ealgos;
1190 up->calgos = kp->calgos;
1193 return nla_put(skb, XFRMA_TMPL,
1194 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1197 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1200 return copy_sec_ctx(x->security, skb);
1205 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1208 return copy_sec_ctx(xp->security, skb);
1212 static inline size_t userpolicy_type_attrsize(void)
1214 #ifdef CONFIG_XFRM_SUB_POLICY
1215 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1221 #ifdef CONFIG_XFRM_SUB_POLICY
1222 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1224 struct xfrm_userpolicy_type upt = {
1228 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1232 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1238 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1240 struct xfrm_dump_info *sp = ptr;
1241 struct xfrm_userpolicy_info *p;
1242 struct sk_buff *in_skb = sp->in_skb;
1243 struct sk_buff *skb = sp->out_skb;
1244 struct nlmsghdr *nlh;
1246 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
1247 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1251 p = nlmsg_data(nlh);
1252 copy_to_user_policy(xp, p, dir);
1253 if (copy_to_user_tmpl(xp, skb) < 0)
1255 if (copy_to_user_sec_ctx(xp, skb))
1257 if (copy_to_user_policy_type(xp->type, skb) < 0)
1260 nlmsg_end(skb, nlh);
1264 nlmsg_cancel(skb, nlh);
1268 static int xfrm_dump_policy_done(struct netlink_callback *cb)
1270 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1272 xfrm_policy_walk_done(walk);
1276 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1278 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
1279 struct xfrm_dump_info info;
1281 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1282 sizeof(cb->args) - sizeof(cb->args[0]));
1284 info.in_skb = cb->skb;
1286 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1287 info.nlmsg_flags = NLM_F_MULTI;
1291 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1294 (void) xfrm_policy_walk(walk, dump_one_policy, &info);
1299 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1300 struct xfrm_policy *xp,
1303 struct xfrm_dump_info info;
1304 struct sk_buff *skb;
1306 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1308 return ERR_PTR(-ENOMEM);
1310 info.in_skb = in_skb;
1312 info.nlmsg_seq = seq;
1313 info.nlmsg_flags = 0;
1315 if (dump_one_policy(xp, dir, 0, &info) < 0) {
1323 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1324 struct nlattr **attrs)
1326 struct xfrm_policy *xp;
1327 struct xfrm_userpolicy_id *p;
1328 u8 type = XFRM_POLICY_TYPE_MAIN;
1333 p = nlmsg_data(nlh);
1334 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
1336 err = copy_from_user_policy_type(&type, attrs);
1340 err = verify_policy_dir(p->dir);
1345 xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err);
1347 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1348 struct xfrm_sec_ctx *ctx;
1350 err = verify_sec_ctx_len(attrs);
1356 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1358 err = security_xfrm_policy_alloc(&ctx, uctx);
1362 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, ctx,
1364 security_xfrm_policy_free(ctx);
1370 struct sk_buff *resp_skb;
1372 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
1373 if (IS_ERR(resp_skb)) {
1374 err = PTR_ERR(resp_skb);
1376 err = nlmsg_unicast(xfrm_nl, resp_skb,
1377 NETLINK_CB(skb).pid);
1380 uid_t loginuid = NETLINK_CB(skb).loginuid;
1381 u32 sessionid = NETLINK_CB(skb).sessionid;
1382 u32 sid = NETLINK_CB(skb).sid;
1384 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
1390 c.data.byid = p->index;
1391 c.event = nlh->nlmsg_type;
1392 c.seq = nlh->nlmsg_seq;
1393 c.pid = nlh->nlmsg_pid;
1394 km_policy_notify(xp, p->dir, &c);
1402 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1403 struct nlattr **attrs)
1406 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
1407 struct xfrm_audit audit_info;
1410 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1411 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1412 audit_info.secid = NETLINK_CB(skb).sid;
1413 err = xfrm_state_flush(p->proto, &audit_info);
1416 c.data.proto = p->proto;
1417 c.event = nlh->nlmsg_type;
1418 c.seq = nlh->nlmsg_seq;
1419 c.pid = nlh->nlmsg_pid;
1420 km_state_notify(NULL, &c);
1425 static inline size_t xfrm_aevent_msgsize(void)
1427 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
1428 + nla_total_size(sizeof(struct xfrm_replay_state))
1429 + nla_total_size(sizeof(struct xfrm_lifetime_cur))
1430 + nla_total_size(4) /* XFRM_AE_RTHR */
1431 + nla_total_size(4); /* XFRM_AE_ETHR */
1434 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1436 struct xfrm_aevent_id *id;
1437 struct nlmsghdr *nlh;
1439 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
1443 id = nlmsg_data(nlh);
1444 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr));
1445 id->sa_id.spi = x->id.spi;
1446 id->sa_id.family = x->props.family;
1447 id->sa_id.proto = x->id.proto;
1448 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr));
1449 id->reqid = x->props.reqid;
1450 id->flags = c->data.aevent;
1452 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay);
1453 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
1455 if (id->flags & XFRM_AE_RTHR)
1456 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
1458 if (id->flags & XFRM_AE_ETHR)
1459 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
1460 x->replay_maxage * 10 / HZ);
1462 return nlmsg_end(skb, nlh);
1465 nlmsg_cancel(skb, nlh);
1469 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1470 struct nlattr **attrs)
1472 struct xfrm_state *x;
1473 struct sk_buff *r_skb;
1476 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1477 struct xfrm_usersa_id *id = &p->sa_id;
1479 r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
1483 x = xfrm_state_lookup(&id->daddr, id->spi, id->proto, id->family);
1490 * XXX: is this lock really needed - none of the other
1491 * gets lock (the concern is things getting updated
1492 * while we are still reading) - jhs
1494 spin_lock_bh(&x->lock);
1495 c.data.aevent = p->flags;
1496 c.seq = nlh->nlmsg_seq;
1497 c.pid = nlh->nlmsg_pid;
1499 if (build_aevent(r_skb, x, &c) < 0)
1501 err = nlmsg_unicast(xfrm_nl, r_skb, NETLINK_CB(skb).pid);
1502 spin_unlock_bh(&x->lock);
1507 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
1508 struct nlattr **attrs)
1510 struct xfrm_state *x;
1513 struct xfrm_aevent_id *p = nlmsg_data(nlh);
1514 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
1515 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
1520 /* pedantic mode - thou shalt sayeth replaceth */
1521 if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
1524 x = xfrm_state_lookup(&p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
1528 if (x->km.state != XFRM_STATE_VALID)
1531 spin_lock_bh(&x->lock);
1532 xfrm_update_ae_params(x, attrs);
1533 spin_unlock_bh(&x->lock);
1535 c.event = nlh->nlmsg_type;
1536 c.seq = nlh->nlmsg_seq;
1537 c.pid = nlh->nlmsg_pid;
1538 c.data.aevent = XFRM_AE_CU;
1539 km_state_notify(x, &c);
1546 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1547 struct nlattr **attrs)
1550 u8 type = XFRM_POLICY_TYPE_MAIN;
1552 struct xfrm_audit audit_info;
1554 err = copy_from_user_policy_type(&type, attrs);
1558 audit_info.loginuid = NETLINK_CB(skb).loginuid;
1559 audit_info.sessionid = NETLINK_CB(skb).sessionid;
1560 audit_info.secid = NETLINK_CB(skb).sid;
1561 err = xfrm_policy_flush(type, &audit_info);
1565 c.event = nlh->nlmsg_type;
1566 c.seq = nlh->nlmsg_seq;
1567 c.pid = nlh->nlmsg_pid;
1568 km_policy_notify(NULL, 0, &c);
1572 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1573 struct nlattr **attrs)
1575 struct xfrm_policy *xp;
1576 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
1577 struct xfrm_userpolicy_info *p = &up->pol;
1578 u8 type = XFRM_POLICY_TYPE_MAIN;
1581 err = copy_from_user_policy_type(&type, attrs);
1586 xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err);
1588 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1589 struct xfrm_sec_ctx *ctx;
1591 err = verify_sec_ctx_len(attrs);
1597 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1599 err = security_xfrm_policy_alloc(&ctx, uctx);
1603 xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, ctx, 0, &err);
1604 security_xfrm_policy_free(ctx);
1609 read_lock(&xp->lock);
1611 read_unlock(&xp->lock);
1615 read_unlock(&xp->lock);
1618 uid_t loginuid = NETLINK_CB(skb).loginuid;
1619 uid_t sessionid = NETLINK_CB(skb).sessionid;
1620 u32 sid = NETLINK_CB(skb).sid;
1621 xfrm_policy_delete(xp, p->dir);
1622 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
1625 // reset the timers here?
1626 printk("Dont know what to do with soft policy expire\n");
1628 km_policy_expired(xp, p->dir, up->hard, current->pid);
1635 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1636 struct nlattr **attrs)
1638 struct xfrm_state *x;
1640 struct xfrm_user_expire *ue = nlmsg_data(nlh);
1641 struct xfrm_usersa_info *p = &ue->state;
1643 x = xfrm_state_lookup(&p->id.daddr, p->id.spi, p->id.proto, p->family);
1649 spin_lock_bh(&x->lock);
1651 if (x->km.state != XFRM_STATE_VALID)
1653 km_state_expired(x, ue->hard, current->pid);
1656 uid_t loginuid = NETLINK_CB(skb).loginuid;
1657 uid_t sessionid = NETLINK_CB(skb).sessionid;
1658 u32 sid = NETLINK_CB(skb).sid;
1659 __xfrm_state_delete(x);
1660 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
1664 spin_unlock_bh(&x->lock);
1669 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
1670 struct nlattr **attrs)
1672 struct xfrm_policy *xp;
1673 struct xfrm_user_tmpl *ut;
1675 struct nlattr *rt = attrs[XFRMA_TMPL];
1677 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
1678 struct xfrm_state *x = xfrm_state_alloc();
1684 err = verify_newpolicy_info(&ua->policy);
1686 printk("BAD policy passed\n");
1692 xp = xfrm_policy_construct(&ua->policy, attrs, &err);
1698 memcpy(&x->id, &ua->id, sizeof(ua->id));
1699 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
1700 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
1703 /* extract the templates and for each call km_key */
1704 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
1705 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1706 memcpy(&x->id, &t->id, sizeof(x->id));
1707 x->props.mode = t->mode;
1708 x->props.reqid = t->reqid;
1709 x->props.family = ut->family;
1710 t->aalgos = ua->aalgos;
1711 t->ealgos = ua->ealgos;
1712 t->calgos = ua->calgos;
1713 err = km_query(x, t, xp);
1723 #ifdef CONFIG_XFRM_MIGRATE
1724 static int copy_from_user_migrate(struct xfrm_migrate *ma,
1725 struct nlattr **attrs, int *num)
1727 struct nlattr *rt = attrs[XFRMA_MIGRATE];
1728 struct xfrm_user_migrate *um;
1732 num_migrate = nla_len(rt) / sizeof(*um);
1734 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
1737 for (i = 0; i < num_migrate; i++, um++, ma++) {
1738 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
1739 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
1740 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
1741 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
1743 ma->proto = um->proto;
1744 ma->mode = um->mode;
1745 ma->reqid = um->reqid;
1747 ma->old_family = um->old_family;
1748 ma->new_family = um->new_family;
1755 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1756 struct nlattr **attrs)
1758 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
1759 struct xfrm_migrate m[XFRM_MAX_DEPTH];
1764 if (attrs[XFRMA_MIGRATE] == NULL)
1767 err = copy_from_user_policy_type(&type, attrs);
1771 err = copy_from_user_migrate((struct xfrm_migrate *)m,
1779 xfrm_migrate(&pi->sel, pi->dir, type, m, n);
1784 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
1785 struct nlattr **attrs)
1787 return -ENOPROTOOPT;
1791 #ifdef CONFIG_XFRM_MIGRATE
1792 static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
1794 struct xfrm_user_migrate um;
1796 memset(&um, 0, sizeof(um));
1797 um.proto = m->proto;
1799 um.reqid = m->reqid;
1800 um.old_family = m->old_family;
1801 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
1802 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
1803 um.new_family = m->new_family;
1804 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
1805 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
1807 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
1810 static inline size_t xfrm_migrate_msgsize(int num_migrate)
1812 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
1813 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
1814 + userpolicy_type_attrsize();
1817 static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
1818 int num_migrate, struct xfrm_selector *sel,
1821 struct xfrm_migrate *mp;
1822 struct xfrm_userpolicy_id *pol_id;
1823 struct nlmsghdr *nlh;
1826 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
1830 pol_id = nlmsg_data(nlh);
1831 /* copy data from selector, dir, and type to the pol_id */
1832 memset(pol_id, 0, sizeof(*pol_id));
1833 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
1836 if (copy_to_user_policy_type(type, skb) < 0)
1839 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
1840 if (copy_to_user_migrate(mp, skb) < 0)
1844 return nlmsg_end(skb, nlh);
1846 nlmsg_cancel(skb, nlh);
1850 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1851 struct xfrm_migrate *m, int num_migrate)
1853 struct sk_buff *skb;
1855 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate), GFP_ATOMIC);
1860 if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0)
1863 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
1866 static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1867 struct xfrm_migrate *m, int num_migrate)
1869 return -ENOPROTOOPT;
1873 #define XMSGSIZE(type) sizeof(struct type)
1875 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
1876 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1877 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1878 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
1879 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1880 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1881 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1882 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
1883 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
1884 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
1885 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
1886 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
1887 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
1888 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
1889 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
1890 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1891 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
1892 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
1893 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
1894 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
1895 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
1900 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
1901 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
1902 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
1903 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
1904 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
1905 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
1906 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
1907 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
1908 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
1909 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
1910 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
1911 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
1912 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
1913 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
1914 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
1915 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
1918 static struct xfrm_link {
1919 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
1920 int (*dump)(struct sk_buff *, struct netlink_callback *);
1921 int (*done)(struct netlink_callback *);
1922 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
1923 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1924 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
1925 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
1926 .dump = xfrm_dump_sa,
1927 .done = xfrm_dump_sa_done },
1928 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1929 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
1930 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
1931 .dump = xfrm_dump_policy,
1932 .done = xfrm_dump_policy_done },
1933 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
1934 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
1935 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
1936 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
1937 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
1938 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
1939 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
1940 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
1941 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
1942 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
1943 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
1944 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
1945 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
1948 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1950 struct nlattr *attrs[XFRMA_MAX+1];
1951 struct xfrm_link *link;
1954 type = nlh->nlmsg_type;
1955 if (type > XFRM_MSG_MAX)
1958 type -= XFRM_MSG_BASE;
1959 link = &xfrm_dispatch[type];
1961 /* All operations require privileges, even GET */
1962 if (security_netlink_recv(skb, CAP_NET_ADMIN))
1965 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
1966 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
1967 (nlh->nlmsg_flags & NLM_F_DUMP)) {
1968 if (link->dump == NULL)
1971 return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, link->done);
1974 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
1979 if (link->doit == NULL)
1982 return link->doit(skb, nlh, attrs);
1985 static void xfrm_netlink_rcv(struct sk_buff *skb)
1987 mutex_lock(&xfrm_cfg_mutex);
1988 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
1989 mutex_unlock(&xfrm_cfg_mutex);
1992 static inline size_t xfrm_expire_msgsize(void)
1994 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire));
1997 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
1999 struct xfrm_user_expire *ue;
2000 struct nlmsghdr *nlh;
2002 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2006 ue = nlmsg_data(nlh);
2007 copy_to_user_state(x, &ue->state);
2008 ue->hard = (c->data.hard != 0) ? 1 : 0;
2010 return nlmsg_end(skb, nlh);
2013 static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
2015 struct sk_buff *skb;
2017 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2021 if (build_expire(skb, x, c) < 0)
2024 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2027 static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c)
2029 struct sk_buff *skb;
2031 skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
2035 if (build_aevent(skb, x, c) < 0)
2038 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
2041 static int xfrm_notify_sa_flush(struct km_event *c)
2043 struct xfrm_usersa_flush *p;
2044 struct nlmsghdr *nlh;
2045 struct sk_buff *skb;
2046 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
2048 skb = nlmsg_new(len, GFP_ATOMIC);
2052 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2058 p = nlmsg_data(nlh);
2059 p->proto = c->data.proto;
2061 nlmsg_end(skb, nlh);
2063 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2066 static inline size_t xfrm_sa_len(struct xfrm_state *x)
2070 l += nla_total_size(aead_len(x->aead));
2072 l += nla_total_size(xfrm_alg_len(x->aalg));
2074 l += nla_total_size(xfrm_alg_len(x->ealg));
2076 l += nla_total_size(sizeof(*x->calg));
2078 l += nla_total_size(sizeof(*x->encap));
2080 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
2081 x->security->ctx_len);
2083 l += nla_total_size(sizeof(*x->coaddr));
2085 /* Must count x->lastused as it may become non-zero behind our back. */
2086 l += nla_total_size(sizeof(u64));
2091 static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
2093 struct xfrm_usersa_info *p;
2094 struct xfrm_usersa_id *id;
2095 struct nlmsghdr *nlh;
2096 struct sk_buff *skb;
2097 int len = xfrm_sa_len(x);
2100 headlen = sizeof(*p);
2101 if (c->event == XFRM_MSG_DELSA) {
2102 len += nla_total_size(headlen);
2103 headlen = sizeof(*id);
2105 len += NLMSG_ALIGN(headlen);
2107 skb = nlmsg_new(len, GFP_ATOMIC);
2111 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2113 goto nla_put_failure;
2115 p = nlmsg_data(nlh);
2116 if (c->event == XFRM_MSG_DELSA) {
2117 struct nlattr *attr;
2119 id = nlmsg_data(nlh);
2120 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
2121 id->spi = x->id.spi;
2122 id->family = x->props.family;
2123 id->proto = x->id.proto;
2125 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
2127 goto nla_put_failure;
2132 if (copy_to_user_state_extra(x, p, skb))
2133 goto nla_put_failure;
2135 nlmsg_end(skb, nlh);
2137 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
2140 /* Somebody screwed up with xfrm_sa_len! */
2146 static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c)
2150 case XFRM_MSG_EXPIRE:
2151 return xfrm_exp_state_notify(x, c);
2152 case XFRM_MSG_NEWAE:
2153 return xfrm_aevent_state_notify(x, c);
2154 case XFRM_MSG_DELSA:
2155 case XFRM_MSG_UPDSA:
2156 case XFRM_MSG_NEWSA:
2157 return xfrm_notify_sa(x, c);
2158 case XFRM_MSG_FLUSHSA:
2159 return xfrm_notify_sa_flush(c);
2161 printk("xfrm_user: Unknown SA event %d\n", c->event);
2169 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
2170 struct xfrm_policy *xp)
2172 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
2173 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2174 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
2175 + userpolicy_type_attrsize();
2178 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
2179 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
2182 struct xfrm_user_acquire *ua;
2183 struct nlmsghdr *nlh;
2184 __u32 seq = xfrm_get_acqseq();
2186 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
2190 ua = nlmsg_data(nlh);
2191 memcpy(&ua->id, &x->id, sizeof(ua->id));
2192 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
2193 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
2194 copy_to_user_policy(xp, &ua->policy, dir);
2195 ua->aalgos = xt->aalgos;
2196 ua->ealgos = xt->ealgos;
2197 ua->calgos = xt->calgos;
2198 ua->seq = x->km.seq = seq;
2200 if (copy_to_user_tmpl(xp, skb) < 0)
2202 if (copy_to_user_state_sec_ctx(x, skb))
2204 if (copy_to_user_policy_type(xp->type, skb) < 0)
2207 return nlmsg_end(skb, nlh);
2210 nlmsg_cancel(skb, nlh);
2214 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
2215 struct xfrm_policy *xp, int dir)
2217 struct sk_buff *skb;
2219 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
2223 if (build_acquire(skb, x, xt, xp, dir) < 0)
2226 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
2229 /* User gives us xfrm_user_policy_info followed by an array of 0
2230 * or more templates.
2232 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
2233 u8 *data, int len, int *dir)
2235 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
2236 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
2237 struct xfrm_policy *xp;
2240 switch (sk->sk_family) {
2242 if (opt != IP_XFRM_POLICY) {
2247 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2249 if (opt != IPV6_XFRM_POLICY) {
2262 if (len < sizeof(*p) ||
2263 verify_newpolicy_info(p))
2266 nr = ((len - sizeof(*p)) / sizeof(*ut));
2267 if (validate_tmpl(nr, ut, p->sel.family))
2270 if (p->dir > XFRM_POLICY_OUT)
2273 xp = xfrm_policy_alloc(GFP_KERNEL);
2279 copy_from_user_policy(xp, p);
2280 xp->type = XFRM_POLICY_TYPE_MAIN;
2281 copy_templates(xp, ut, nr);
2288 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
2290 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
2291 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
2292 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
2293 + userpolicy_type_attrsize();
2296 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
2297 int dir, struct km_event *c)
2299 struct xfrm_user_polexpire *upe;
2300 struct nlmsghdr *nlh;
2301 int hard = c->data.hard;
2303 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
2307 upe = nlmsg_data(nlh);
2308 copy_to_user_policy(xp, &upe->pol, dir);
2309 if (copy_to_user_tmpl(xp, skb) < 0)
2311 if (copy_to_user_sec_ctx(xp, skb))
2313 if (copy_to_user_policy_type(xp->type, skb) < 0)
2317 return nlmsg_end(skb, nlh);
2320 nlmsg_cancel(skb, nlh);
2324 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2326 struct sk_buff *skb;
2328 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
2332 if (build_polexpire(skb, xp, dir, c) < 0)
2335 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
2338 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c)
2340 struct xfrm_userpolicy_info *p;
2341 struct xfrm_userpolicy_id *id;
2342 struct nlmsghdr *nlh;
2343 struct sk_buff *skb;
2344 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
2347 headlen = sizeof(*p);
2348 if (c->event == XFRM_MSG_DELPOLICY) {
2349 len += nla_total_size(headlen);
2350 headlen = sizeof(*id);
2352 len += userpolicy_type_attrsize();
2353 len += NLMSG_ALIGN(headlen);
2355 skb = nlmsg_new(len, GFP_ATOMIC);
2359 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
2363 p = nlmsg_data(nlh);
2364 if (c->event == XFRM_MSG_DELPOLICY) {
2365 struct nlattr *attr;
2367 id = nlmsg_data(nlh);
2368 memset(id, 0, sizeof(*id));
2371 id->index = xp->index;
2373 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
2375 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
2382 copy_to_user_policy(xp, p, dir);
2383 if (copy_to_user_tmpl(xp, skb) < 0)
2385 if (copy_to_user_policy_type(xp->type, skb) < 0)
2388 nlmsg_end(skb, nlh);
2390 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2397 static int xfrm_notify_policy_flush(struct km_event *c)
2399 struct nlmsghdr *nlh;
2400 struct sk_buff *skb;
2402 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
2406 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
2409 if (copy_to_user_policy_type(c->data.type, skb) < 0)
2412 nlmsg_end(skb, nlh);
2414 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
2421 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c)
2425 case XFRM_MSG_NEWPOLICY:
2426 case XFRM_MSG_UPDPOLICY:
2427 case XFRM_MSG_DELPOLICY:
2428 return xfrm_notify_policy(xp, dir, c);
2429 case XFRM_MSG_FLUSHPOLICY:
2430 return xfrm_notify_policy_flush(c);
2431 case XFRM_MSG_POLEXPIRE:
2432 return xfrm_exp_policy_notify(xp, dir, c);
2434 printk("xfrm_user: Unknown Policy event %d\n", c->event);
2441 static inline size_t xfrm_report_msgsize(void)
2443 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
2446 static int build_report(struct sk_buff *skb, u8 proto,
2447 struct xfrm_selector *sel, xfrm_address_t *addr)
2449 struct xfrm_user_report *ur;
2450 struct nlmsghdr *nlh;
2452 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
2456 ur = nlmsg_data(nlh);
2458 memcpy(&ur->sel, sel, sizeof(ur->sel));
2461 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr);
2463 return nlmsg_end(skb, nlh);
2466 nlmsg_cancel(skb, nlh);
2470 static int xfrm_send_report(u8 proto, struct xfrm_selector *sel,
2471 xfrm_address_t *addr)
2473 struct sk_buff *skb;
2475 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
2479 if (build_report(skb, proto, sel, addr) < 0)
2482 return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
2485 static struct xfrm_mgr netlink_mgr = {
2487 .notify = xfrm_send_state_notify,
2488 .acquire = xfrm_send_acquire,
2489 .compile_policy = xfrm_compile_policy,
2490 .notify_policy = xfrm_send_policy_notify,
2491 .report = xfrm_send_report,
2492 .migrate = xfrm_send_migrate,
2495 static int __init xfrm_user_init(void)
2499 printk(KERN_INFO "Initializing XFRM netlink socket\n");
2501 nlsk = netlink_kernel_create(&init_net, NETLINK_XFRM, XFRMNLGRP_MAX,
2502 xfrm_netlink_rcv, NULL, THIS_MODULE);
2505 rcu_assign_pointer(xfrm_nl, nlsk);
2507 xfrm_register_km(&netlink_mgr);
2512 static void __exit xfrm_user_exit(void)
2514 struct sock *nlsk = xfrm_nl;
2516 xfrm_unregister_km(&netlink_mgr);
2517 rcu_assign_pointer(xfrm_nl, NULL);
2519 netlink_kernel_release(nlsk);
2522 module_init(xfrm_user_init);
2523 module_exit(xfrm_user_exit);
2524 MODULE_LICENSE("GPL");
2525 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);