2 #include <linux/module.h>
6 #include <asm/scatterlist.h>
7 #include <linux/crypto.h>
8 #include <linux/kernel.h>
9 #include <linux/pfkeyv2.h>
10 #include <linux/random.h>
11 #include <linux/spinlock.h>
13 #include <net/protocol.h>
16 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
19 struct ip_esp_hdr *esph;
20 struct crypto_blkcipher *tfm;
21 struct blkcipher_desc desc;
23 struct sk_buff *trailer;
30 /* skb is pure payload to encrypt */
34 /* Round to block size */
38 alen = esp->auth.icv_trunc_len;
42 blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
43 clen = ALIGN(clen + 2, blksize);
45 clen = ALIGN(clen, esp->conf.padlen);
47 if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
51 tail = skb_tail_pointer(trailer);
54 for (i=0; i<clen-skb->len - 2; i++)
57 tail[clen - skb->len - 2] = (clen - skb->len) - 2;
58 pskb_put(skb, trailer, clen - skb->len);
60 skb_push(skb, -skb_network_offset(skb));
61 esph = ip_esp_hdr(skb);
62 *(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb);
63 *skb_mac_header(skb) = IPPROTO_ESP;
65 spin_lock_bh(&x->lock);
67 /* this is non-NULL only with UDP Encapsulation */
69 struct xfrm_encap_tmpl *encap = x->encap;
73 uh = (struct udphdr *)esph;
74 uh->source = encap->encap_sport;
75 uh->dest = encap->encap_dport;
76 uh->len = htons(skb->len + alen - skb_transport_offset(skb));
79 switch (encap->encap_type) {
81 case UDP_ENCAP_ESPINUDP:
82 esph = (struct ip_esp_hdr *)(uh + 1);
84 case UDP_ENCAP_ESPINUDP_NON_IKE:
85 udpdata32 = (__be32 *)(uh + 1);
86 udpdata32[0] = udpdata32[1] = 0;
87 esph = (struct ip_esp_hdr *)(udpdata32 + 2);
91 *skb_mac_header(skb) = IPPROTO_UDP;
94 esph->spi = x->id.spi;
95 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq);
97 if (esp->conf.ivlen) {
98 if (unlikely(!esp->conf.ivinitted)) {
99 get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
100 esp->conf.ivinitted = 1;
102 crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
106 struct scatterlist *sg = &esp->sgbuf[0];
108 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
109 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
113 skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
114 err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
115 if (unlikely(sg != &esp->sgbuf[0]))
122 if (esp->conf.ivlen) {
123 memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
124 crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
127 if (esp->auth.icv_full_len) {
128 err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
129 sizeof(*esph) + esp->conf.ivlen + clen);
130 memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
134 spin_unlock_bh(&x->lock);
141 * Note: detecting truncated vs. non-truncated authentication data is very
142 * expensive, so we only support truncated data, which is the recommended
145 static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
148 struct ip_esp_hdr *esph;
149 struct esp_data *esp = x->data;
150 struct crypto_blkcipher *tfm = esp->conf.tfm;
151 struct blkcipher_desc desc = { .tfm = tfm };
152 struct sk_buff *trailer;
153 int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
154 int alen = esp->auth.icv_trunc_len;
155 int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen;
159 struct scatterlist *sg;
163 if (!pskb_may_pull(skb, sizeof(*esph)))
166 if (elen <= 0 || (elen & (blksize-1)))
169 /* If integrity check is required, do this. */
170 if (esp->auth.icv_full_len) {
173 err = esp_mac_digest(esp, skb, 0, skb->len - alen);
177 if (skb_copy_bits(skb, skb->len - alen, sum, alen))
180 if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
181 x->stats.integrity_failed++;
186 if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0)
189 skb->ip_summed = CHECKSUM_NONE;
191 esph = (struct ip_esp_hdr *)skb->data;
193 /* Get ivec. This can be wrong, check against another impls. */
195 crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
199 if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
200 sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
204 skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen);
205 err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
206 if (unlikely(sg != &esp->sgbuf[0]))
211 if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
215 if (padlen+2 >= elen)
218 /* ... check padding bits here. Silly. :-) */
224 struct xfrm_encap_tmpl *encap = x->encap;
225 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
228 * 1) if the NAT-T peer's IP or port changed then
229 * advertize the change to the keying daemon.
230 * This is an inbound SA, so just compare
233 if (iph->saddr != x->props.saddr.a4 ||
234 uh->source != encap->encap_sport) {
235 xfrm_address_t ipaddr;
237 ipaddr.a4 = iph->saddr;
238 km_new_mapping(x, &ipaddr, uh->source);
240 /* XXX: perhaps add an extra
241 * policy check here, to see
242 * if we should allow or
243 * reject a packet from a
250 * 2) ignore UDP/TCP checksums in case
251 * of NAT-T in Transport Mode, or
252 * perform other post-processing fixes
253 * as per draft-ietf-ipsec-udp-encaps-06,
256 if (x->props.mode == XFRM_MODE_TRANSPORT)
257 skb->ip_summed = CHECKSUM_UNNECESSARY;
260 pskb_trim(skb, skb->len - alen - padlen - 2);
261 __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen);
262 skb_set_transport_header(skb, -ihl);
270 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
272 struct esp_data *esp = x->data;
273 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
274 u32 align = max_t(u32, blksize, esp->conf.padlen);
277 mtu -= x->props.header_len + esp->auth.icv_trunc_len;
278 rem = mtu & (align - 1);
281 switch (x->props.mode) {
282 case XFRM_MODE_TUNNEL:
285 case XFRM_MODE_TRANSPORT:
288 mtu += min_t(u32, blksize - 4, rem);
291 /* The worst case. */
292 mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
299 static void esp4_err(struct sk_buff *skb, u32 info)
301 struct iphdr *iph = (struct iphdr*)skb->data;
302 struct ip_esp_hdr *esph = (struct ip_esp_hdr*)(skb->data+(iph->ihl<<2));
303 struct xfrm_state *x;
305 if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
306 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
309 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
312 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
313 ntohl(esph->spi), ntohl(iph->daddr));
317 static void esp_destroy(struct xfrm_state *x)
319 struct esp_data *esp = x->data;
324 crypto_free_blkcipher(esp->conf.tfm);
325 esp->conf.tfm = NULL;
326 kfree(esp->conf.ivec);
327 esp->conf.ivec = NULL;
328 crypto_free_hash(esp->auth.tfm);
329 esp->auth.tfm = NULL;
330 kfree(esp->auth.work_icv);
331 esp->auth.work_icv = NULL;
335 static int esp_init_state(struct xfrm_state *x)
337 struct esp_data *esp = NULL;
338 struct crypto_blkcipher *tfm;
344 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
349 struct xfrm_algo_desc *aalg_desc;
350 struct crypto_hash *hash;
352 hash = crypto_alloc_hash(x->aalg->alg_name, 0,
357 esp->auth.tfm = hash;
358 if (crypto_hash_setkey(hash, x->aalg->alg_key,
359 (x->aalg->alg_key_len + 7) / 8))
362 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
365 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
366 crypto_hash_digestsize(hash)) {
367 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
369 crypto_hash_digestsize(hash),
370 aalg_desc->uinfo.auth.icv_fullbits/8);
374 esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
375 esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
377 esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
378 if (!esp->auth.work_icv)
382 tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
386 esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
387 esp->conf.padlen = 0;
388 if (esp->conf.ivlen) {
389 esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
390 if (unlikely(esp->conf.ivec == NULL))
392 esp->conf.ivinitted = 0;
394 if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key,
395 (x->ealg->alg_key_len + 7) / 8))
397 x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
398 if (x->props.mode == XFRM_MODE_TUNNEL)
399 x->props.header_len += sizeof(struct iphdr);
400 else if (x->props.mode == XFRM_MODE_BEET)
401 x->props.header_len += IPV4_BEET_PHMAXLEN;
403 struct xfrm_encap_tmpl *encap = x->encap;
405 switch (encap->encap_type) {
408 case UDP_ENCAP_ESPINUDP:
409 x->props.header_len += sizeof(struct udphdr);
411 case UDP_ENCAP_ESPINUDP_NON_IKE:
412 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
417 align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
418 if (esp->conf.padlen)
419 align = max_t(u32, align, esp->conf.padlen);
420 x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len;
430 static struct xfrm_type esp_type =
432 .description = "ESP4",
433 .owner = THIS_MODULE,
434 .proto = IPPROTO_ESP,
435 .flags = XFRM_TYPE_REPLAY_PROT,
436 .init_state = esp_init_state,
437 .destructor = esp_destroy,
438 .get_mtu = esp4_get_mtu,
443 static struct net_protocol esp4_protocol = {
444 .handler = xfrm4_rcv,
445 .err_handler = esp4_err,
449 static int __init esp4_init(void)
451 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
452 printk(KERN_INFO "ip esp init: can't add xfrm type\n");
455 if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
456 printk(KERN_INFO "ip esp init: can't add protocol\n");
457 xfrm_unregister_type(&esp_type, AF_INET);
463 static void __exit esp4_fini(void)
465 if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
466 printk(KERN_INFO "ip esp close: can't remove protocol\n");
467 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
468 printk(KERN_INFO "ip esp close: can't remove xfrm type\n");
471 module_init(esp4_init);
472 module_exit(esp4_fini);
473 MODULE_LICENSE("GPL");
474 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);