2 * xfrm algorithm interface
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pfkeyv2.h>
15 #include <linux/crypto.h>
17 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
20 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
23 #include <asm/scatterlist.h>
26 * Algorithms supported by IPsec. These entries contain properties which
27 * are used in key negotiation and xfrm processing, and are used to verify
28 * that instantiated crypto transforms have correct parameters for IPsec
31 static struct xfrm_algo_desc aalg_list[] = {
33 .name = "hmac(digest_null)",
34 .compat = "digest_null",
44 .sadb_alg_id = SADB_X_AALG_NULL,
46 .sadb_alg_minbits = 0,
62 .sadb_alg_id = SADB_AALG_MD5HMAC,
64 .sadb_alg_minbits = 128,
65 .sadb_alg_maxbits = 128
80 .sadb_alg_id = SADB_AALG_SHA1HMAC,
82 .sadb_alg_minbits = 160,
83 .sadb_alg_maxbits = 160
87 .name = "hmac(sha256)",
98 .sadb_alg_id = SADB_X_AALG_SHA2_256HMAC,
100 .sadb_alg_minbits = 256,
101 .sadb_alg_maxbits = 256
105 .name = "hmac(ripemd160)",
106 .compat = "ripemd160",
116 .sadb_alg_id = SADB_X_AALG_RIPEMD160HMAC,
118 .sadb_alg_minbits = 160,
119 .sadb_alg_maxbits = 160
133 .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC,
135 .sadb_alg_minbits = 128,
136 .sadb_alg_maxbits = 128
141 static struct xfrm_algo_desc ealg_list[] = {
143 .name = "ecb(cipher_null)",
144 .compat = "cipher_null",
154 .sadb_alg_id = SADB_EALG_NULL,
156 .sadb_alg_minbits = 0,
157 .sadb_alg_maxbits = 0
172 .sadb_alg_id = SADB_EALG_DESCBC,
174 .sadb_alg_minbits = 64,
175 .sadb_alg_maxbits = 64
179 .name = "cbc(des3_ede)",
180 .compat = "des3_ede",
190 .sadb_alg_id = SADB_EALG_3DESCBC,
192 .sadb_alg_minbits = 192,
193 .sadb_alg_maxbits = 192
197 .name = "cbc(cast128)",
208 .sadb_alg_id = SADB_X_EALG_CASTCBC,
210 .sadb_alg_minbits = 40,
211 .sadb_alg_maxbits = 128
215 .name = "cbc(blowfish)",
216 .compat = "blowfish",
226 .sadb_alg_id = SADB_X_EALG_BLOWFISHCBC,
228 .sadb_alg_minbits = 40,
229 .sadb_alg_maxbits = 448
244 .sadb_alg_id = SADB_X_EALG_AESCBC,
246 .sadb_alg_minbits = 128,
247 .sadb_alg_maxbits = 256
251 .name = "cbc(serpent)",
262 .sadb_alg_id = SADB_X_EALG_SERPENTCBC,
264 .sadb_alg_minbits = 128,
265 .sadb_alg_maxbits = 256,
269 .name = "cbc(twofish)",
280 .sadb_alg_id = SADB_X_EALG_TWOFISHCBC,
282 .sadb_alg_minbits = 128,
283 .sadb_alg_maxbits = 256
288 static struct xfrm_algo_desc calg_list[] = {
296 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
305 .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
314 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
318 static inline int aalg_entries(void)
320 return ARRAY_SIZE(aalg_list);
323 static inline int ealg_entries(void)
325 return ARRAY_SIZE(ealg_list);
328 static inline int calg_entries(void)
330 return ARRAY_SIZE(calg_list);
333 /* Todo: generic iterators */
334 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id)
338 for (i = 0; i < aalg_entries(); i++) {
339 if (aalg_list[i].desc.sadb_alg_id == alg_id) {
340 if (aalg_list[i].available)
341 return &aalg_list[i];
348 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid);
350 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id)
354 for (i = 0; i < ealg_entries(); i++) {
355 if (ealg_list[i].desc.sadb_alg_id == alg_id) {
356 if (ealg_list[i].available)
357 return &ealg_list[i];
364 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid);
366 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
370 for (i = 0; i < calg_entries(); i++) {
371 if (calg_list[i].desc.sadb_alg_id == alg_id) {
372 if (calg_list[i].available)
373 return &calg_list[i];
380 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
382 static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
383 int entries, u32 type, u32 mask,
384 char *name, int probe)
391 for (i = 0; i < entries; i++) {
392 if (strcmp(name, list[i].name) &&
393 (!list[i].compat || strcmp(name, list[i].compat)))
396 if (list[i].available)
402 status = crypto_has_alg(list[i].name, type,
403 mask | CRYPTO_ALG_ASYNC);
407 list[i].available = status;
413 struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
415 return xfrm_get_byname(aalg_list, aalg_entries(),
416 CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK,
419 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
421 struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
423 return xfrm_get_byname(ealg_list, ealg_entries(),
424 CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK,
427 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
429 struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
431 return xfrm_get_byname(calg_list, calg_entries(),
432 CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK,
435 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
437 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx)
439 if (idx >= aalg_entries())
442 return &aalg_list[idx];
444 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx);
446 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx)
448 if (idx >= ealg_entries())
451 return &ealg_list[idx];
453 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx);
456 * Probe for the availability of crypto algorithms, and set the available
457 * flag for any algorithms found on the system. This is typically called by
458 * pfkey during userspace SA add, update or register.
460 void xfrm_probe_algs(void)
465 BUG_ON(in_softirq());
467 for (i = 0; i < aalg_entries(); i++) {
468 status = crypto_has_hash(aalg_list[i].name, 0,
470 if (aalg_list[i].available != status)
471 aalg_list[i].available = status;
474 for (i = 0; i < ealg_entries(); i++) {
475 status = crypto_has_blkcipher(ealg_list[i].name, 0,
477 if (ealg_list[i].available != status)
478 ealg_list[i].available = status;
481 for (i = 0; i < calg_entries(); i++) {
482 status = crypto_has_comp(calg_list[i].name, 0,
484 if (calg_list[i].available != status)
485 calg_list[i].available = status;
489 EXPORT_SYMBOL_GPL(xfrm_probe_algs);
491 int xfrm_count_auth_supported(void)
495 for (i = 0, n = 0; i < aalg_entries(); i++)
496 if (aalg_list[i].available)
500 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported);
502 int xfrm_count_enc_supported(void)
506 for (i = 0, n = 0; i < ealg_entries(); i++)
507 if (ealg_list[i].available)
511 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
513 /* Move to common area: it is shared with AH. */
515 int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
516 int offset, int len, icv_update_fn_t icv_update)
518 int start = skb_headlen(skb);
519 int i, copy = start - offset;
521 struct scatterlist sg;
523 /* Checksum header. */
528 sg.page = virt_to_page(skb->data + offset);
529 sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
532 err = icv_update(desc, &sg, copy);
536 if ((len -= copy) == 0)
541 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
544 BUG_TRAP(start <= offset + len);
546 end = start + skb_shinfo(skb)->frags[i].size;
547 if ((copy = end - offset) > 0) {
548 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
553 sg.page = frag->page;
554 sg.offset = frag->page_offset + offset-start;
557 err = icv_update(desc, &sg, copy);
568 if (skb_shinfo(skb)->frag_list) {
569 struct sk_buff *list = skb_shinfo(skb)->frag_list;
571 for (; list; list = list->next) {
574 BUG_TRAP(start <= offset + len);
576 end = start + list->len;
577 if ((copy = end - offset) > 0) {
580 err = skb_icv_walk(list, desc, offset-start,
584 if ((len -= copy) == 0)
594 EXPORT_SYMBOL_GPL(skb_icv_walk);
596 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
598 /* Looking generic it is not used in another places. */
601 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
603 int start = skb_headlen(skb);
604 int i, copy = start - offset;
610 sg[elt].page = virt_to_page(skb->data + offset);
611 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
612 sg[elt].length = copy;
614 if ((len -= copy) == 0)
619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
622 BUG_TRAP(start <= offset + len);
624 end = start + skb_shinfo(skb)->frags[i].size;
625 if ((copy = end - offset) > 0) {
626 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
630 sg[elt].page = frag->page;
631 sg[elt].offset = frag->page_offset+offset-start;
632 sg[elt].length = copy;
641 if (skb_shinfo(skb)->frag_list) {
642 struct sk_buff *list = skb_shinfo(skb)->frag_list;
644 for (; list; list = list->next) {
647 BUG_TRAP(start <= offset + len);
649 end = start + list->len;
650 if ((copy = end - offset) > 0) {
653 elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
654 if ((len -= copy) == 0)
664 EXPORT_SYMBOL_GPL(skb_to_sgvec);
666 /* Check that skb data bits are writable. If they are not, copy data
667 * to newly created private area. If "tailbits" is given, make sure that
668 * tailbits bytes beyond current end of skb are writable.
670 * Returns amount of elements of scatterlist to load for subsequent
671 * transformations and pointer to writable trailer skb.
674 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
678 struct sk_buff *skb1, **skb_p;
680 /* If skb is cloned or its head is paged, reallocate
681 * head pulling out all the pages (pages are considered not writable
682 * at the moment even if they are anonymous).
684 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
685 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
688 /* Easy case. Most of packets will go this way. */
689 if (!skb_shinfo(skb)->frag_list) {
690 /* A little of trouble, not enough of space for trailer.
691 * This should not happen, when stack is tuned to generate
692 * good frames. OK, on miss we reallocate and reserve even more
693 * space, 128 bytes is fair. */
695 if (skb_tailroom(skb) < tailbits &&
696 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
704 /* Misery. We are in troubles, going to mincer fragments... */
707 skb_p = &skb_shinfo(skb)->frag_list;
710 while ((skb1 = *skb_p) != NULL) {
713 /* The fragment is partially pulled by someone,
714 * this can happen on input. Copy it and everything
717 if (skb_shared(skb1))
720 /* If the skb is the last, worry about trailer. */
722 if (skb1->next == NULL && tailbits) {
723 if (skb_shinfo(skb1)->nr_frags ||
724 skb_shinfo(skb1)->frag_list ||
725 skb_tailroom(skb1) < tailbits)
726 ntail = tailbits + 128;
732 skb_shinfo(skb1)->nr_frags ||
733 skb_shinfo(skb1)->frag_list) {
734 struct sk_buff *skb2;
736 /* Fuck, we are miserable poor guys... */
738 skb2 = skb_copy(skb1, GFP_ATOMIC);
740 skb2 = skb_copy_expand(skb1,
744 if (unlikely(skb2 == NULL))
748 skb_set_owner_w(skb2, skb1->sk);
750 /* Looking around. Are we still alive?
751 * OK, link new skb, drop old one */
753 skb2->next = skb1->next;
765 EXPORT_SYMBOL_GPL(skb_cow_data);
767 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
770 skb->data_len += len;
773 return skb_put(tail, len);
775 EXPORT_SYMBOL_GPL(pskb_put);