2 * xfrm algorithm interface
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pfkeyv2.h>
15 #include <linux/crypto.h>
17 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
20 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
23 #include <asm/scatterlist.h>
26 * Algorithms supported by IPsec. These entries contain properties which
27 * are used in key negotiation and xfrm processing, and are used to verify
28 * that instantiated crypto transforms have correct parameters for IPsec
31 static struct xfrm_algo_desc aalg_list[] = {
33 .name = "digest_null",
43 .sadb_alg_id = SADB_X_AALG_NULL,
45 .sadb_alg_minbits = 0,
60 .sadb_alg_id = SADB_AALG_MD5HMAC,
62 .sadb_alg_minbits = 128,
63 .sadb_alg_maxbits = 128
77 .sadb_alg_id = SADB_AALG_SHA1HMAC,
79 .sadb_alg_minbits = 160,
80 .sadb_alg_maxbits = 160
94 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
96 .sadb_alg_minbits = 256,
97 .sadb_alg_maxbits = 256
111 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
113 .sadb_alg_minbits = 160,
114 .sadb_alg_maxbits = 160
119 static struct xfrm_algo_desc ealg_list[] = {
121 .name = "ecb(cipher_null)",
122 .compat = "cipher_null",
132 .sadb_alg_id = SADB_EALG_NULL,
134 .sadb_alg_minbits = 0,
135 .sadb_alg_maxbits = 0
150 .sadb_alg_id = SADB_EALG_DESCBC,
152 .sadb_alg_minbits = 64,
153 .sadb_alg_maxbits = 64
157 .name = "cbc(des3_ede)",
158 .compat = "des3_ede",
168 .sadb_alg_id = SADB_EALG_3DESCBC,
170 .sadb_alg_minbits = 192,
171 .sadb_alg_maxbits = 192
175 .name = "cbc(cast128)",
186 .sadb_alg_id = SADB_X_EALG_CASTCBC,
188 .sadb_alg_minbits = 40,
189 .sadb_alg_maxbits = 128
193 .name = "cbc(blowfish)",
194 .compat = "blowfish",
204 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
206 .sadb_alg_minbits = 40,
207 .sadb_alg_maxbits = 448
222 .sadb_alg_id = SADB_X_EALG_AESCBC,
224 .sadb_alg_minbits = 128,
225 .sadb_alg_maxbits = 256
229 .name = "cbc(serpent)",
240 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
242 .sadb_alg_minbits = 128,
243 .sadb_alg_maxbits = 256,
247 .name = "cbc(twofish)",
258 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
260 .sadb_alg_minbits = 128,
261 .sadb_alg_maxbits = 256
266 static struct xfrm_algo_desc calg_list[] = {
274 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
283 .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
292 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
296 static inline int aalg_entries(void)
298 return ARRAY_SIZE(aalg_list);
301 static inline int ealg_entries(void)
303 return ARRAY_SIZE(ealg_list);
306 static inline int calg_entries(void)
308 return ARRAY_SIZE(calg_list);
311 /* Todo: generic iterators */
312 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
316 for (i = 0; i < aalg_entries(); i++) {
317 if (aalg_list[i].desc.sadb_alg_id == alg_id) {
318 if (aalg_list[i].available)
319 return &aalg_list[i];
326 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
328 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
332 for (i = 0; i < ealg_entries(); i++) {
333 if (ealg_list[i].desc.sadb_alg_id == alg_id) {
334 if (ealg_list[i].available)
335 return &ealg_list[i];
342 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
344 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
348 for (i = 0; i < calg_entries(); i++) {
349 if (calg_list[i].desc.sadb_alg_id == alg_id) {
350 if (calg_list[i].available)
351 return &calg_list[i];
358 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
360 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
361 int entries, char *name,
369 for (i = 0; i < entries; i++) {
370 if (strcmp(name, list[i].name) &&
371 (!list[i].compat || strcmp(name, list[i].compat)))
374 if (list[i].available)
380 status = crypto_alg_available(name, 0);
384 list[i].available = status;
390 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
392 return xfrm_get_byname(aalg_list, aalg_entries(), name, probe);
394 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
396 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
398 return xfrm_get_byname(ealg_list, ealg_entries(), name, probe);
400 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
402 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
404 return xfrm_get_byname(calg_list, calg_entries(), name, probe);
406 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
408 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
410 if (idx >= aalg_entries())
413 return &aalg_list[idx];
415 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
417 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
419 if (idx >= ealg_entries())
422 return &ealg_list[idx];
424 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
427 * Probe for the availability of crypto algorithms, and set the available
428 * flag for any algorithms found on the system. This is typically called by
429 * pfkey during userspace SA add, update or register.
431 void xfrm_probe_algs(void)
436 BUG_ON(in_softirq());
438 for (i = 0; i < aalg_entries(); i++) {
439 status = crypto_alg_available(aalg_list[i].name, 0);
440 if (aalg_list[i].available != status)
441 aalg_list[i].available = status;
444 for (i = 0; i < ealg_entries(); i++) {
445 status = crypto_alg_available(ealg_list[i].name, 0);
446 if (ealg_list[i].available != status)
447 ealg_list[i].available = status;
450 for (i = 0; i < calg_entries(); i++) {
451 status = crypto_alg_available(calg_list[i].name, 0);
452 if (calg_list[i].available != status)
453 calg_list[i].available = status;
457 EXPORT_SYMBOL_GPL(xfrm_probe_algs);
459 int xfrm_count_auth_supported(void)
463 for (i = 0, n = 0; i < aalg_entries(); i++)
464 if (aalg_list[i].available)
468 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
470 int xfrm_count_enc_supported(void)
474 for (i = 0, n = 0; i < ealg_entries(); i++)
475 if (ealg_list[i].available)
479 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
481 /* Move to common area: it is shared with AH. */
483 void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
484 int offset, int len, icv_update_fn_t icv_update)
486 int start = skb_headlen(skb);
487 int i, copy = start - offset;
488 struct scatterlist sg;
490 /* Checksum header. */
495 sg.page = virt_to_page(skb->data + offset);
496 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
499 icv_update(tfm, &sg, 1);
501 if ((len -= copy) == 0)
506 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
509 BUG_TRAP(start <= offset + len);
511 end = start + skb_shinfo(skb)->frags[i].size;
512 if ((copy = end - offset) > 0) {
513 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
518 sg.page = frag->page;
519 sg.offset = frag->page_offset + offset-start;
522 icv_update(tfm, &sg, 1);
531 if (skb_shinfo(skb)->frag_list) {
532 struct sk_buff *list = skb_shinfo(skb)->frag_list;
534 for (; list; list = list->next) {
537 BUG_TRAP(start <= offset + len);
539 end = start + list->len;
540 if ((copy = end - offset) > 0) {
543 skb_icv_walk(list, tfm, offset-start, copy, icv_update);
544 if ((len -= copy) == 0)
553 EXPORT_SYMBOL_GPL(skb_icv_walk);
555 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
557 /* Looking generic it is not used in another places. */
560 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
562 int start = skb_headlen(skb);
563 int i, copy = start - offset;
569 sg[elt].page = virt_to_page(skb->data + offset);
570 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
571 sg[elt].length = copy;
573 if ((len -= copy) == 0)
578 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
581 BUG_TRAP(start <= offset + len);
583 end = start + skb_shinfo(skb)->frags[i].size;
584 if ((copy = end - offset) > 0) {
585 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
589 sg[elt].page = frag->page;
590 sg[elt].offset = frag->page_offset+offset-start;
591 sg[elt].length = copy;
600 if (skb_shinfo(skb)->frag_list) {
601 struct sk_buff *list = skb_shinfo(skb)->frag_list;
603 for (; list; list = list->next) {
606 BUG_TRAP(start <= offset + len);
608 end = start + list->len;
609 if ((copy = end - offset) > 0) {
612 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
613 if ((len -= copy) == 0)
623 EXPORT_SYMBOL_GPL(skb_to_sgvec);
625 /* Check that skb data bits are writable. If they are not, copy data
626 * to newly created private area. If "tailbits" is given, make sure that
627 * tailbits bytes beyond current end of skb are writable.
629 * Returns amount of elements of scatterlist to load for subsequent
630 * transformations and pointer to writable trailer skb.
633 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
637 struct sk_buff *skb1, **skb_p;
639 /* If skb is cloned or its head is paged, reallocate
640 * head pulling out all the pages (pages are considered not writable
641 * at the moment even if they are anonymous).
643 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
644 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
647 /* Easy case. Most of packets will go this way. */
648 if (!skb_shinfo(skb)->frag_list) {
649 /* A little of trouble, not enough of space for trailer.
650 * This should not happen, when stack is tuned to generate
651 * good frames. OK, on miss we reallocate and reserve even more
652 * space, 128 bytes is fair. */
654 if (skb_tailroom(skb) < tailbits &&
655 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
663 /* Misery. We are in troubles, going to mincer fragments... */
666 skb_p = &skb_shinfo(skb)->frag_list;
669 while ((skb1 = *skb_p) != NULL) {
672 /* The fragment is partially pulled by someone,
673 * this can happen on input. Copy it and everything
676 if (skb_shared(skb1))
679 /* If the skb is the last, worry about trailer. */
681 if (skb1->next == NULL && tailbits) {
682 if (skb_shinfo(skb1)->nr_frags ||
683 skb_shinfo(skb1)->frag_list ||
684 skb_tailroom(skb1) < tailbits)
685 ntail = tailbits + 128;
691 skb_shinfo(skb1)->nr_frags ||
692 skb_shinfo(skb1)->frag_list) {
693 struct sk_buff *skb2;
695 /* Fuck, we are miserable poor guys... */
697 skb2 = skb_copy(skb1, GFP_ATOMIC);
699 skb2 = skb_copy_expand(skb1,
703 if (unlikely(skb2 == NULL))
707 skb_set_owner_w(skb2, skb1->sk);
709 /* Looking around. Are we still alive?
710 * OK, link new skb, drop old one */
712 skb2->next = skb1->next;
724 EXPORT_SYMBOL_GPL(skb_cow_data);
726 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
729 skb->data_len += len;
732 return skb_put(tail, len);
734 EXPORT_SYMBOL_GPL(pskb_put);