1 #include <linux/types.h>
2 #include <linux/slab.h>
3 #include <linux/jiffies.h>
4 #include <linux/sunrpc/gss_krb5.h>
5 #include <linux/random.h>
6 #include <linux/pagemap.h>
7 #include <asm/scatterlist.h>
8 #include <linux/crypto.h>
11 # define RPCDBG_FACILITY RPCDBG_AUTH
15 gss_krb5_padding(int blocksize, int length)
17 /* Most of the code is block-size independent but currently we
19 BUG_ON(blocksize != 8);
20 return 8 - (length & 7);
24 gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
26 int padding = gss_krb5_padding(blocksize, buf->len - offset);
30 if (buf->page_len || buf->tail[0].iov_len)
34 p = iov->iov_base + iov->iov_len;
35 iov->iov_len += padding;
37 memset(p, padding, padding);
41 gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
47 if (len <= buf->head[0].iov_len) {
48 pad = *(u8 *)(buf->head[0].iov_base + len - 1);
49 if (pad > buf->head[0].iov_len)
51 buf->head[0].iov_len -= pad;
54 len -= buf->head[0].iov_len;
55 if (len <= buf->page_len) {
56 int last = (buf->page_base + len - 1)
58 int offset = (buf->page_base + len - 1)
59 & (PAGE_CACHE_SIZE - 1);
60 ptr = kmap_atomic(buf->pages[last], KM_USER0);
61 pad = *(ptr + offset);
62 kunmap_atomic(ptr, KM_USER0);
66 BUG_ON(len > buf->tail[0].iov_len);
67 pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
69 /* XXX: NOTE: we do not adjust the page lengths--they represent
70 * a range of data in the real filesystem page cache, and we need
71 * to know that range so the xdr code can properly place read data.
72 * However adjusting the head length, as we do above, is harmless.
73 * In the case of a request that fits into a single page, the server
74 * also uses length and head length together to determine the original
75 * start of the request to copy the request for deferal; so it's
76 * easier on the server if we adjust head and tail length in tandem.
77 * It's not really a problem that we don't fool with the page and
78 * tail lengths, though--at worst badly formed xdr might lead the
79 * server to attempt to parse the padding.
80 * XXX: Document all these weird requirements for gss mechanism
81 * wrap/unwrap functions. */
92 make_confounder(char *p, int blocksize)
97 /* rfc1964 claims this should be "random". But all that's really
98 * necessary is that it be unique. And not even that is necessary in
99 * our case since our "gssapi" implementation exists only to support
100 * rpcsec_gss, so we know that the only buffers we will ever encrypt
101 * already begin with a unique sequence number. Just to hedge my bets
102 * I'll make a half-hearted attempt at something unique, but ensuring
103 * uniqueness would mean worrying about atomicity and rollover, and I
104 * don't care enough. */
106 BUG_ON(blocksize != 8);
110 /* Assumptions: the head and tail of inbuf are ours to play with.
111 * The pages, however, may be real pages in the page cache and we replace
112 * them with scratch pages from **pages before writing to them. */
113 /* XXX: obviously the above should be documentation of wrap interface,
114 * and shouldn't be in this kerberos-specific file. */
116 /* XXX factor out common code with seal/unseal. */
119 gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
120 struct xdr_buf *buf, struct page **pages)
122 struct krb5_ctx *kctx = ctx->internal_ctx_id;
124 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
125 int blocksize = 0, plainlen;
126 unsigned char *ptr, *krb5_hdr, *msg_start;
129 struct page **tmp_pages;
132 dprintk("RPC: gss_wrap_kerberos\n");
136 if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) {
137 dprintk("RPC: gss_krb5_seal: kctx->sealalg %d not supported\n",
142 blocksize = crypto_blkcipher_blocksize(kctx->enc);
143 gss_krb5_add_padding(buf, offset, blocksize);
144 BUG_ON((buf->len - offset) % blocksize);
145 plainlen = blocksize + buf->len - offset;
147 headlen = g_token_size(&kctx->mech_used, 22 + plainlen) -
150 ptr = buf->head[0].iov_base + offset;
151 /* shift data to make room for header. */
152 /* XXX Would be cleverer to encrypt while copying. */
153 /* XXX bounds checking, slack, etc. */
154 memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
155 buf->head[0].iov_len += headlen;
157 BUG_ON((buf->len - offset - headlen) % blocksize);
159 g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr);
162 *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff);
163 *ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff);
165 /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
167 msg_start = krb5_hdr + 24;
168 /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize);
170 *(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5);
171 memset(krb5_hdr + 4, 0xff, 4);
172 *(__be16 *)(krb5_hdr + 4) = htons(kctx->sealalg);
174 make_confounder(msg_start, blocksize);
177 tmp_pages = buf->pages;
179 if (make_checksum(CKSUMTYPE_RSA_MD5, krb5_hdr, 8, buf,
180 offset + headlen - blocksize, &md5cksum))
182 buf->pages = tmp_pages;
184 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
185 md5cksum.data, md5cksum.len))
187 memcpy(krb5_hdr + 16,
188 md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
191 spin_lock(&krb5_seq_lock);
192 seq_send = kctx->seq_send++;
193 spin_unlock(&krb5_seq_lock);
195 /* XXX would probably be more efficient to compute checksum
196 * and encrypt at the same time: */
197 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
198 seq_send, krb5_hdr + 16, krb5_hdr + 8)))
201 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
205 return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
207 return GSS_S_FAILURE;
211 gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
213 struct krb5_ctx *kctx = ctx->internal_ctx_id;
217 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
223 u32 ret = GSS_S_DEFECTIVE_TOKEN;
224 void *data_start, *orig_start;
228 dprintk("RPC: gss_unwrap_kerberos\n");
230 ptr = (u8 *)buf->head[0].iov_base + offset;
231 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
235 if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) ||
236 (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) )
239 /* XXX sanity-check bodysize?? */
241 /* get the sign and seal algorithms */
243 signalg = ptr[0] + (ptr[1] << 8);
244 sealalg = ptr[2] + (ptr[3] << 8);
248 if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
251 if (sealalg == 0xffff)
253 if (signalg != SGN_ALG_DES_MAC_MD5)
256 /* in the current spec, there is only one valid seal algorithm per
257 key type, so a simple comparison is ok */
259 if (sealalg != kctx->sealalg)
262 /* there are several mappings of seal algorithms to sign algorithms,
263 but few enough that we can try them all. */
265 if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) ||
266 (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
267 (kctx->sealalg == SEAL_ALG_DES3KD &&
268 signalg != SGN_ALG_HMAC_SHA1_DES3_KD))
271 if (gss_decrypt_xdr_buf(kctx->enc, buf,
272 ptr + 22 - (unsigned char *)buf->head[0].iov_base))
275 ret = make_checksum(CKSUMTYPE_RSA_MD5, ptr - 2, 8, buf,
276 ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum);
280 ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data,
281 md5cksum.data, md5cksum.len);
285 if (memcmp(md5cksum.data + 8, ptr + 14, 8)) {
290 /* it got through unscathed. Make sure the context is unexpired */
294 ret = GSS_S_CONTEXT_EXPIRED;
295 if (now > kctx->endtime)
298 /* do sequencing checks */
301 if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
305 if ((kctx->initiate && direction != 0xff) ||
306 (!kctx->initiate && direction != 0))
309 /* Copy the data back to the right position. XXX: Would probably be
310 * better to copy and encrypt at the same time. */
312 blocksize = crypto_blkcipher_blocksize(kctx->enc);
313 data_start = ptr + 22 + blocksize;
314 orig_start = buf->head[0].iov_base + offset;
315 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
316 memmove(orig_start, data_start, data_len);
317 buf->head[0].iov_len -= (data_start - orig_start);
318 buf->len -= (data_start - orig_start);
320 ret = GSS_S_DEFECTIVE_TOKEN;
321 if (gss_krb5_remove_padding(buf, blocksize))
324 ret = GSS_S_COMPLETE;