Print this page
4896 Performance improvements for KCF AES modes
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/common/crypto/modes/ccm.c
+++ new/usr/src/common/crypto/modes/ccm.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 + * Copyright 2015 by Saso Kiselkov. All rights reserved.
23 24 */
24 25
25 26 #ifndef _KERNEL
26 27 #include <strings.h>
27 28 #include <limits.h>
28 29 #include <assert.h>
29 30 #include <security/cryptoki.h>
30 31 #endif
31 32
32 33 #include <sys/types.h>
33 34 #include <sys/kmem.h>
35 +#define INLINE_CRYPTO_GET_PTRS
34 36 #include <modes/modes.h>
35 37 #include <sys/crypto/common.h>
36 38 #include <sys/crypto/impl.h>
37 39 #include <sys/byteorder.h>
38 40
39 41 #if defined(__i386) || defined(__amd64)
40 42 #define UNALIGNED_POINTERS_PERMITTED
41 43 #endif
42 44
43 45 /*
44 46 * Encrypt multiple blocks of data in CCM mode. Decrypt for CCM mode
45 47 * is done in another function.
46 48 */
47 49 int
48 50 ccm_mode_encrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
49 51 crypto_data_t *out, size_t block_size,
50 52 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
51 - void (*copy_block)(uint8_t *, uint8_t *),
52 - void (*xor_block)(uint8_t *, uint8_t *))
53 + void (*copy_block)(const uint8_t *, uint8_t *),
54 + void (*xor_block)(const uint8_t *, uint8_t *))
53 55 {
54 56 size_t remainder = length;
55 57 size_t need;
56 58 uint8_t *datap = (uint8_t *)data;
57 59 uint8_t *blockp;
58 60 uint8_t *lastp;
59 61 void *iov_or_mp;
60 62 offset_t offset;
61 63 uint8_t *out_data_1;
62 64 uint8_t *out_data_2;
63 65 size_t out_data_1_len;
64 66 uint64_t counter;
65 67 uint8_t *mac_buf;
66 68
67 69 if (length + ctx->ccm_remainder_len < block_size) {
68 70 /* accumulate bytes here and return */
69 71 bcopy(datap,
70 72 (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
71 73 length);
72 74 ctx->ccm_remainder_len += length;
73 75 ctx->ccm_copy_to = datap;
74 76 return (CRYPTO_SUCCESS);
75 77 }
76 78
77 79 lastp = (uint8_t *)ctx->ccm_cb;
78 80 if (out != NULL)
79 81 crypto_init_ptrs(out, &iov_or_mp, &offset);
80 82
81 83 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
82 84
83 85 do {
84 86 /* Unprocessed data from last call. */
85 87 if (ctx->ccm_remainder_len > 0) {
86 88 need = block_size - ctx->ccm_remainder_len;
87 89
88 90 if (need > remainder)
89 91 return (CRYPTO_DATA_LEN_RANGE);
90 92
91 93 bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
92 94 [ctx->ccm_remainder_len], need);
93 95
94 96 blockp = (uint8_t *)ctx->ccm_remainder;
95 97 } else {
96 98 blockp = datap;
97 99 }
98 100
99 101 /*
100 102 * do CBC MAC
101 103 *
102 104 * XOR the previous cipher block current clear block.
103 105 * mac_buf always contain previous cipher block.
104 106 */
105 107 xor_block(blockp, mac_buf);
106 108 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
107 109
108 110 /* ccm_cb is the counter block */
109 111 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb,
110 112 (uint8_t *)ctx->ccm_tmp);
111 113
112 114 lastp = (uint8_t *)ctx->ccm_tmp;
113 115
114 116 /*
115 117 * Increment counter. Counter bits are confined
116 118 * to the bottom 64 bits of the counter block.
117 119 */
118 120 counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
119 121 counter = htonll(counter + 1);
120 122 counter &= ctx->ccm_counter_mask;
121 123 ctx->ccm_cb[1] =
122 124 (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
123 125
124 126 /*
125 127 * XOR encrypted counter block with the current clear block.
126 128 */
127 129 xor_block(blockp, lastp);
128 130
129 131 ctx->ccm_processed_data_len += block_size;
130 132
131 133 if (out == NULL) {
132 134 if (ctx->ccm_remainder_len > 0) {
133 135 bcopy(blockp, ctx->ccm_copy_to,
134 136 ctx->ccm_remainder_len);
135 137 bcopy(blockp + ctx->ccm_remainder_len, datap,
136 138 need);
137 139 }
138 140 } else {
139 141 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
140 142 &out_data_1_len, &out_data_2, block_size);
141 143
142 144 /* copy block to where it belongs */
143 145 if (out_data_1_len == block_size) {
144 146 copy_block(lastp, out_data_1);
145 147 } else {
146 148 bcopy(lastp, out_data_1, out_data_1_len);
147 149 if (out_data_2 != NULL) {
148 150 bcopy(lastp + out_data_1_len,
149 151 out_data_2,
150 152 block_size - out_data_1_len);
151 153 }
152 154 }
153 155 /* update offset */
154 156 out->cd_offset += block_size;
155 157 }
156 158
157 159 /* Update pointer to next block of data to be processed. */
158 160 if (ctx->ccm_remainder_len != 0) {
159 161 datap += need;
160 162 ctx->ccm_remainder_len = 0;
161 163 } else {
162 164 datap += block_size;
163 165 }
164 166
165 167 remainder = (size_t)&data[length] - (size_t)datap;
166 168
167 169 /* Incomplete last block. */
168 170 if (remainder > 0 && remainder < block_size) {
169 171 bcopy(datap, ctx->ccm_remainder, remainder);
170 172 ctx->ccm_remainder_len = remainder;
171 173 ctx->ccm_copy_to = datap;
172 174 goto out;
173 175 }
174 176 ctx->ccm_copy_to = NULL;
175 177
176 178 } while (remainder > 0);
177 179
178 180 out:
179 181 return (CRYPTO_SUCCESS);
180 182 }
181 183
182 184 void
183 185 calculate_ccm_mac(ccm_ctx_t *ctx, uint8_t *ccm_mac,
184 186 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
185 187 {
186 188 uint64_t counter;
187 189 uint8_t *counterp, *mac_buf;
188 190 int i;
189 191
190 192 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
191 193
192 194 /* first counter block start with index 0 */
193 195 counter = 0;
194 196 ctx->ccm_cb[1] = (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
195 197
196 198 counterp = (uint8_t *)ctx->ccm_tmp;
197 199 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
198 200
↓ open down ↓ |
136 lines elided |
↑ open up ↑ |
199 201 /* calculate XOR of MAC with first counter block */
200 202 for (i = 0; i < ctx->ccm_mac_len; i++) {
201 203 ccm_mac[i] = mac_buf[i] ^ counterp[i];
202 204 }
203 205 }
204 206
205 207 /* ARGSUSED */
206 208 int
207 209 ccm_encrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
208 210 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
209 - void (*xor_block)(uint8_t *, uint8_t *))
211 + void (*xor_block)(const uint8_t *, uint8_t *))
210 212 {
211 213 uint8_t *lastp, *mac_buf, *ccm_mac_p, *macp;
212 214 void *iov_or_mp;
213 215 offset_t offset;
214 216 uint8_t *out_data_1;
215 217 uint8_t *out_data_2;
216 218 size_t out_data_1_len;
217 219 int i;
218 220
219 221 if (out->cd_length < (ctx->ccm_remainder_len + ctx->ccm_mac_len)) {
220 222 return (CRYPTO_DATA_LEN_RANGE);
221 223 }
222 224
223 225 /*
224 226 * When we get here, the number of bytes of payload processed
225 227 * plus whatever data remains, if any,
226 228 * should be the same as the number of bytes that's being
227 229 * passed in the argument during init time.
228 230 */
229 231 if ((ctx->ccm_processed_data_len + ctx->ccm_remainder_len)
230 232 != (ctx->ccm_data_len)) {
231 233 return (CRYPTO_DATA_LEN_RANGE);
232 234 }
233 235
234 236 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
235 237
236 238 if (ctx->ccm_remainder_len > 0) {
237 239
238 240 /* ccm_mac_input_buf is not used for encryption */
239 241 macp = (uint8_t *)ctx->ccm_mac_input_buf;
240 242 bzero(macp, block_size);
241 243
242 244 /* copy remainder to temporary buffer */
243 245 bcopy(ctx->ccm_remainder, macp, ctx->ccm_remainder_len);
244 246
245 247 /* calculate the CBC MAC */
246 248 xor_block(macp, mac_buf);
247 249 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
248 250
249 251 /* calculate the counter mode */
250 252 lastp = (uint8_t *)ctx->ccm_tmp;
251 253 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, lastp);
252 254
253 255 /* XOR with counter block */
254 256 for (i = 0; i < ctx->ccm_remainder_len; i++) {
255 257 macp[i] ^= lastp[i];
256 258 }
257 259 ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
258 260 }
259 261
260 262 /* Calculate the CCM MAC */
261 263 ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
262 264 calculate_ccm_mac(ctx, ccm_mac_p, encrypt_block);
263 265
264 266 crypto_init_ptrs(out, &iov_or_mp, &offset);
265 267 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1,
266 268 &out_data_1_len, &out_data_2,
267 269 ctx->ccm_remainder_len + ctx->ccm_mac_len);
268 270
269 271 if (ctx->ccm_remainder_len > 0) {
270 272
271 273 /* copy temporary block to where it belongs */
272 274 if (out_data_2 == NULL) {
273 275 /* everything will fit in out_data_1 */
274 276 bcopy(macp, out_data_1, ctx->ccm_remainder_len);
275 277 bcopy(ccm_mac_p, out_data_1 + ctx->ccm_remainder_len,
276 278 ctx->ccm_mac_len);
277 279 } else {
278 280
279 281 if (out_data_1_len < ctx->ccm_remainder_len) {
280 282
281 283 size_t data_2_len_used;
282 284
283 285 bcopy(macp, out_data_1, out_data_1_len);
284 286
285 287 data_2_len_used = ctx->ccm_remainder_len
286 288 - out_data_1_len;
287 289
288 290 bcopy((uint8_t *)macp + out_data_1_len,
289 291 out_data_2, data_2_len_used);
290 292 bcopy(ccm_mac_p, out_data_2 + data_2_len_used,
291 293 ctx->ccm_mac_len);
292 294 } else {
293 295 bcopy(macp, out_data_1, out_data_1_len);
294 296 if (out_data_1_len == ctx->ccm_remainder_len) {
295 297 /* mac will be in out_data_2 */
296 298 bcopy(ccm_mac_p, out_data_2,
297 299 ctx->ccm_mac_len);
298 300 } else {
299 301 size_t len_not_used = out_data_1_len -
300 302 ctx->ccm_remainder_len;
301 303 /*
302 304 * part of mac in will be in
303 305 * out_data_1, part of the mac will be
304 306 * in out_data_2
305 307 */
306 308 bcopy(ccm_mac_p,
307 309 out_data_1 + ctx->ccm_remainder_len,
308 310 len_not_used);
309 311 bcopy(ccm_mac_p + len_not_used,
310 312 out_data_2,
311 313 ctx->ccm_mac_len - len_not_used);
312 314
313 315 }
314 316 }
315 317 }
316 318 } else {
317 319 /* copy block to where it belongs */
318 320 bcopy(ccm_mac_p, out_data_1, out_data_1_len);
319 321 if (out_data_2 != NULL) {
320 322 bcopy(ccm_mac_p + out_data_1_len, out_data_2,
321 323 block_size - out_data_1_len);
322 324 }
323 325 }
324 326 out->cd_offset += ctx->ccm_remainder_len + ctx->ccm_mac_len;
325 327 ctx->ccm_remainder_len = 0;
326 328 return (CRYPTO_SUCCESS);
327 329 }
328 330
329 331 /*
330 332 * This will only deal with decrypting the last block of the input that
331 333 * might not be a multiple of block length.
332 334 */
333 335 void
334 336 ccm_decrypt_incomplete_block(ccm_ctx_t *ctx,
335 337 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *))
336 338 {
337 339 uint8_t *datap, *outp, *counterp;
338 340 int i;
339 341
340 342 datap = (uint8_t *)ctx->ccm_remainder;
341 343 outp = &((ctx->ccm_pt_buf)[ctx->ccm_processed_data_len]);
342 344
343 345 counterp = (uint8_t *)ctx->ccm_tmp;
344 346 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, counterp);
345 347
346 348 /* XOR with counter block */
347 349 for (i = 0; i < ctx->ccm_remainder_len; i++) {
348 350 outp[i] = datap[i] ^ counterp[i];
349 351 }
350 352 }
351 353
↓ open down ↓ |
132 lines elided |
↑ open up ↑ |
352 354 /*
353 355 * This will decrypt the cipher text. However, the plaintext won't be
354 356 * returned to the caller. It will be returned when decrypt_final() is
355 357 * called if the MAC matches
356 358 */
357 359 /* ARGSUSED */
358 360 int
359 361 ccm_mode_decrypt_contiguous_blocks(ccm_ctx_t *ctx, char *data, size_t length,
360 362 crypto_data_t *out, size_t block_size,
361 363 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
362 - void (*copy_block)(uint8_t *, uint8_t *),
363 - void (*xor_block)(uint8_t *, uint8_t *))
364 + void (*copy_block)(const uint8_t *, uint8_t *),
365 + void (*xor_block)(const uint8_t *, uint8_t *))
364 366 {
365 367 size_t remainder = length;
366 368 size_t need;
367 369 uint8_t *datap = (uint8_t *)data;
368 370 uint8_t *blockp;
369 371 uint8_t *cbp;
370 372 uint64_t counter;
371 373 size_t pt_len, total_decrypted_len, mac_len, pm_len, pd_len;
372 374 uint8_t *resultp;
373 375
374 376
375 377 pm_len = ctx->ccm_processed_mac_len;
376 378
377 379 if (pm_len > 0) {
378 380 uint8_t *tmp;
379 381 /*
380 382 * all ciphertext has been processed, just waiting for
381 383 * part of the value of the mac
382 384 */
383 385 if ((pm_len + length) > ctx->ccm_mac_len) {
384 386 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
385 387 }
386 388 tmp = (uint8_t *)ctx->ccm_mac_input_buf;
387 389
388 390 bcopy(datap, tmp + pm_len, length);
389 391
390 392 ctx->ccm_processed_mac_len += length;
391 393 return (CRYPTO_SUCCESS);
392 394 }
393 395
394 396 /*
395 397 * If we decrypt the given data, what total amount of data would
396 398 * have been decrypted?
397 399 */
398 400 pd_len = ctx->ccm_processed_data_len;
399 401 total_decrypted_len = pd_len + length + ctx->ccm_remainder_len;
400 402
401 403 if (total_decrypted_len >
402 404 (ctx->ccm_data_len + ctx->ccm_mac_len)) {
403 405 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
404 406 }
405 407
406 408 pt_len = ctx->ccm_data_len;
407 409
408 410 if (total_decrypted_len > pt_len) {
409 411 /*
410 412 * part of the input will be the MAC, need to isolate that
411 413 * to be dealt with later. The left-over data in
412 414 * ccm_remainder_len from last time will not be part of the
413 415 * MAC. Otherwise, it would have already been taken out
414 416 * when this call is made last time.
415 417 */
416 418 size_t pt_part = pt_len - pd_len - ctx->ccm_remainder_len;
417 419
418 420 mac_len = length - pt_part;
419 421
420 422 ctx->ccm_processed_mac_len = mac_len;
421 423 bcopy(data + pt_part, ctx->ccm_mac_input_buf, mac_len);
422 424
423 425 if (pt_part + ctx->ccm_remainder_len < block_size) {
424 426 /*
425 427 * since this is last of the ciphertext, will
426 428 * just decrypt with it here
427 429 */
428 430 bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
429 431 [ctx->ccm_remainder_len], pt_part);
430 432 ctx->ccm_remainder_len += pt_part;
431 433 ccm_decrypt_incomplete_block(ctx, encrypt_block);
432 434 ctx->ccm_processed_data_len += ctx->ccm_remainder_len;
433 435 ctx->ccm_remainder_len = 0;
434 436 return (CRYPTO_SUCCESS);
435 437 } else {
436 438 /* let rest of the code handle this */
437 439 length = pt_part;
438 440 }
439 441 } else if (length + ctx->ccm_remainder_len < block_size) {
440 442 /* accumulate bytes here and return */
441 443 bcopy(datap,
442 444 (uint8_t *)ctx->ccm_remainder + ctx->ccm_remainder_len,
443 445 length);
444 446 ctx->ccm_remainder_len += length;
445 447 ctx->ccm_copy_to = datap;
446 448 return (CRYPTO_SUCCESS);
447 449 }
448 450
449 451 do {
450 452 /* Unprocessed data from last call. */
451 453 if (ctx->ccm_remainder_len > 0) {
452 454 need = block_size - ctx->ccm_remainder_len;
453 455
454 456 if (need > remainder)
455 457 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
456 458
457 459 bcopy(datap, &((uint8_t *)ctx->ccm_remainder)
458 460 [ctx->ccm_remainder_len], need);
459 461
460 462 blockp = (uint8_t *)ctx->ccm_remainder;
461 463 } else {
462 464 blockp = datap;
463 465 }
464 466
465 467 /* Calculate the counter mode, ccm_cb is the counter block */
466 468 cbp = (uint8_t *)ctx->ccm_tmp;
467 469 encrypt_block(ctx->ccm_keysched, (uint8_t *)ctx->ccm_cb, cbp);
468 470
469 471 /*
470 472 * Increment counter.
471 473 * Counter bits are confined to the bottom 64 bits
472 474 */
473 475 counter = ntohll(ctx->ccm_cb[1] & ctx->ccm_counter_mask);
474 476 counter = htonll(counter + 1);
475 477 counter &= ctx->ccm_counter_mask;
476 478 ctx->ccm_cb[1] =
477 479 (ctx->ccm_cb[1] & ~(ctx->ccm_counter_mask)) | counter;
478 480
479 481 /* XOR with the ciphertext */
480 482 xor_block(blockp, cbp);
481 483
482 484 /* Copy the plaintext to the "holding buffer" */
483 485 resultp = (uint8_t *)ctx->ccm_pt_buf +
484 486 ctx->ccm_processed_data_len;
485 487 copy_block(cbp, resultp);
486 488
487 489 ctx->ccm_processed_data_len += block_size;
488 490
489 491 ctx->ccm_lastp = blockp;
490 492
491 493 /* Update pointer to next block of data to be processed. */
492 494 if (ctx->ccm_remainder_len != 0) {
493 495 datap += need;
494 496 ctx->ccm_remainder_len = 0;
495 497 } else {
496 498 datap += block_size;
497 499 }
498 500
499 501 remainder = (size_t)&data[length] - (size_t)datap;
500 502
501 503 /* Incomplete last block */
502 504 if (remainder > 0 && remainder < block_size) {
503 505 bcopy(datap, ctx->ccm_remainder, remainder);
504 506 ctx->ccm_remainder_len = remainder;
505 507 ctx->ccm_copy_to = datap;
506 508 if (ctx->ccm_processed_mac_len > 0) {
507 509 /*
508 510 * not expecting anymore ciphertext, just
509 511 * compute plaintext for the remaining input
510 512 */
511 513 ccm_decrypt_incomplete_block(ctx,
512 514 encrypt_block);
513 515 ctx->ccm_processed_data_len += remainder;
514 516 ctx->ccm_remainder_len = 0;
515 517 }
516 518 goto out;
517 519 }
518 520 ctx->ccm_copy_to = NULL;
↓ open down ↓ |
145 lines elided |
↑ open up ↑ |
519 521
520 522 } while (remainder > 0);
521 523
522 524 out:
523 525 return (CRYPTO_SUCCESS);
524 526 }
525 527
526 528 int
527 529 ccm_decrypt_final(ccm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
528 530 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
529 - void (*copy_block)(uint8_t *, uint8_t *),
530 - void (*xor_block)(uint8_t *, uint8_t *))
531 + void (*copy_block)(const uint8_t *, uint8_t *),
532 + void (*xor_block)(const uint8_t *, uint8_t *))
531 533 {
532 534 size_t mac_remain, pt_len;
533 535 uint8_t *pt, *mac_buf, *macp, *ccm_mac_p;
534 536 int rv;
535 537
536 538 pt_len = ctx->ccm_data_len;
537 539
538 540 /* Make sure output buffer can fit all of the plaintext */
539 541 if (out->cd_length < pt_len) {
540 542 return (CRYPTO_DATA_LEN_RANGE);
541 543 }
542 544
543 545 pt = ctx->ccm_pt_buf;
544 546 mac_remain = ctx->ccm_processed_data_len;
545 547 mac_buf = (uint8_t *)ctx->ccm_mac_buf;
546 548
547 549 macp = (uint8_t *)ctx->ccm_tmp;
548 550
549 551 while (mac_remain > 0) {
550 552
551 553 if (mac_remain < block_size) {
552 554 bzero(macp, block_size);
553 555 bcopy(pt, macp, mac_remain);
554 556 mac_remain = 0;
555 557 } else {
556 558 copy_block(pt, macp);
557 559 mac_remain -= block_size;
558 560 pt += block_size;
559 561 }
560 562
561 563 /* calculate the CBC MAC */
562 564 xor_block(macp, mac_buf);
563 565 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
564 566 }
565 567
566 568 /* Calculate the CCM MAC */
567 569 ccm_mac_p = (uint8_t *)ctx->ccm_tmp;
568 570 calculate_ccm_mac((ccm_ctx_t *)ctx, ccm_mac_p, encrypt_block);
569 571
570 572 /* compare the input CCM MAC value with what we calculated */
571 573 if (bcmp(ctx->ccm_mac_input_buf, ccm_mac_p, ctx->ccm_mac_len)) {
572 574 /* They don't match */
573 575 return (CRYPTO_INVALID_MAC);
574 576 } else {
575 577 rv = crypto_put_output_data(ctx->ccm_pt_buf, out, pt_len);
576 578 if (rv != CRYPTO_SUCCESS)
577 579 return (rv);
578 580 out->cd_offset += pt_len;
579 581 }
580 582 return (CRYPTO_SUCCESS);
581 583 }
582 584
583 585 int
584 586 ccm_validate_args(CK_AES_CCM_PARAMS *ccm_param, boolean_t is_encrypt_init)
585 587 {
586 588 size_t macSize, nonceSize;
587 589 uint8_t q;
588 590 uint64_t maxValue;
589 591
590 592 /*
591 593 * Check the length of the MAC. The only valid
592 594 * lengths for the MAC are: 4, 6, 8, 10, 12, 14, 16
593 595 */
594 596 macSize = ccm_param->ulMACSize;
595 597 if ((macSize < 4) || (macSize > 16) || ((macSize % 2) != 0)) {
596 598 return (CRYPTO_MECHANISM_PARAM_INVALID);
597 599 }
598 600
599 601 /* Check the nonce length. Valid values are 7, 8, 9, 10, 11, 12, 13 */
600 602 nonceSize = ccm_param->ulNonceSize;
601 603 if ((nonceSize < 7) || (nonceSize > 13)) {
602 604 return (CRYPTO_MECHANISM_PARAM_INVALID);
603 605 }
604 606
605 607 /* q is the length of the field storing the length, in bytes */
606 608 q = (uint8_t)((15 - nonceSize) & 0xFF);
607 609
608 610
609 611 /*
610 612 * If it is decrypt, need to make sure size of ciphertext is at least
611 613 * bigger than MAC len
612 614 */
613 615 if ((!is_encrypt_init) && (ccm_param->ulDataSize < macSize)) {
614 616 return (CRYPTO_MECHANISM_PARAM_INVALID);
615 617 }
616 618
617 619 /*
618 620 * Check to make sure the length of the payload is within the
619 621 * range of values allowed by q
620 622 */
621 623 if (q < 8) {
622 624 maxValue = (1ULL << (q * 8)) - 1;
623 625 } else {
624 626 maxValue = ULONG_MAX;
625 627 }
626 628
627 629 if (ccm_param->ulDataSize > maxValue) {
628 630 return (CRYPTO_MECHANISM_PARAM_INVALID);
629 631 }
630 632 return (CRYPTO_SUCCESS);
631 633 }
632 634
633 635 /*
634 636 * Format the first block used in CBC-MAC (B0) and the initial counter
635 637 * block based on formatting functions and counter generation functions
636 638 * specified in RFC 3610 and NIST publication 800-38C, appendix A
637 639 *
638 640 * b0 is the first block used in CBC-MAC
639 641 * cb0 is the first counter block
640 642 *
641 643 * It's assumed that the arguments b0 and cb0 are preallocated AES blocks
642 644 *
643 645 */
644 646 static void
645 647 ccm_format_initial_blocks(uchar_t *nonce, ulong_t nonceSize,
646 648 ulong_t authDataSize, uint8_t *b0, ccm_ctx_t *aes_ctx)
647 649 {
648 650 uint64_t payloadSize;
649 651 uint8_t t, q, have_adata = 0;
650 652 size_t limit;
651 653 int i, j, k;
652 654 uint64_t mask = 0;
653 655 uint8_t *cb;
654 656
655 657 q = (uint8_t)((15 - nonceSize) & 0xFF);
656 658 t = (uint8_t)((aes_ctx->ccm_mac_len) & 0xFF);
657 659
658 660 /* Construct the first octet of b0 */
659 661 if (authDataSize > 0) {
660 662 have_adata = 1;
661 663 }
662 664 b0[0] = (have_adata << 6) | (((t - 2) / 2) << 3) | (q - 1);
663 665
664 666 /* copy the nonce value into b0 */
665 667 bcopy(nonce, &(b0[1]), nonceSize);
666 668
667 669 /* store the length of the payload into b0 */
668 670 bzero(&(b0[1+nonceSize]), q);
669 671
670 672 payloadSize = aes_ctx->ccm_data_len;
671 673 limit = 8 < q ? 8 : q;
672 674
673 675 for (i = 0, j = 0, k = 15; i < limit; i++, j += 8, k--) {
674 676 b0[k] = (uint8_t)((payloadSize >> j) & 0xFF);
675 677 }
676 678
677 679 /* format the counter block */
678 680
679 681 cb = (uint8_t *)aes_ctx->ccm_cb;
680 682
681 683 cb[0] = 0x07 & (q-1); /* first byte */
682 684
683 685 /* copy the nonce value into the counter block */
684 686 bcopy(nonce, &(cb[1]), nonceSize);
685 687
686 688 bzero(&(cb[1+nonceSize]), q);
687 689
688 690 /* Create the mask for the counter field based on the size of nonce */
689 691 q <<= 3;
690 692 while (q-- > 0) {
691 693 mask |= (1ULL << q);
692 694 }
693 695
694 696 aes_ctx->ccm_counter_mask = htonll(mask);
695 697
696 698 /*
697 699 * During calculation, we start using counter block 1, we will
698 700 * set it up right here.
699 701 * We can just set the last byte to have the value 1, because
700 702 * even with the biggest nonce of 13, the last byte of the
701 703 * counter block will be used for the counter value.
702 704 */
703 705 cb[15] = 0x01;
704 706 }
705 707
706 708 /*
707 709 * Encode the length of the associated data as
708 710 * specified in RFC 3610 and NIST publication 800-38C, appendix A
709 711 */
710 712 static void
711 713 encode_adata_len(ulong_t auth_data_len, uint8_t *encoded, size_t *encoded_len)
712 714 {
713 715 #ifdef UNALIGNED_POINTERS_PERMITTED
714 716 uint32_t *lencoded_ptr;
715 717 #ifdef _LP64
716 718 uint64_t *llencoded_ptr;
717 719 #endif
718 720 #endif /* UNALIGNED_POINTERS_PERMITTED */
719 721
720 722 if (auth_data_len < ((1ULL<<16) - (1ULL<<8))) {
721 723 /* 0 < a < (2^16-2^8) */
722 724 *encoded_len = 2;
723 725 encoded[0] = (auth_data_len & 0xff00) >> 8;
724 726 encoded[1] = auth_data_len & 0xff;
725 727
726 728 } else if ((auth_data_len >= ((1ULL<<16) - (1ULL<<8))) &&
727 729 (auth_data_len < (1ULL << 31))) {
728 730 /* (2^16-2^8) <= a < 2^32 */
729 731 *encoded_len = 6;
730 732 encoded[0] = 0xff;
731 733 encoded[1] = 0xfe;
732 734 #ifdef UNALIGNED_POINTERS_PERMITTED
733 735 lencoded_ptr = (uint32_t *)(void *)&encoded[2];
734 736 *lencoded_ptr = htonl(auth_data_len);
735 737 #else
736 738 encoded[2] = (auth_data_len & 0xff000000) >> 24;
737 739 encoded[3] = (auth_data_len & 0xff0000) >> 16;
738 740 encoded[4] = (auth_data_len & 0xff00) >> 8;
739 741 encoded[5] = auth_data_len & 0xff;
740 742 #endif /* UNALIGNED_POINTERS_PERMITTED */
741 743
742 744 #ifdef _LP64
743 745 } else {
744 746 /* 2^32 <= a < 2^64 */
745 747 *encoded_len = 10;
746 748 encoded[0] = 0xff;
747 749 encoded[1] = 0xff;
748 750 #ifdef UNALIGNED_POINTERS_PERMITTED
749 751 llencoded_ptr = (uint64_t *)(void *)&encoded[2];
750 752 *llencoded_ptr = htonl(auth_data_len);
751 753 #else
752 754 encoded[2] = (auth_data_len & 0xff00000000000000) >> 56;
753 755 encoded[3] = (auth_data_len & 0xff000000000000) >> 48;
754 756 encoded[4] = (auth_data_len & 0xff0000000000) >> 40;
755 757 encoded[5] = (auth_data_len & 0xff00000000) >> 32;
756 758 encoded[6] = (auth_data_len & 0xff000000) >> 24;
757 759 encoded[7] = (auth_data_len & 0xff0000) >> 16;
758 760 encoded[8] = (auth_data_len & 0xff00) >> 8;
759 761 encoded[9] = auth_data_len & 0xff;
760 762 #endif /* UNALIGNED_POINTERS_PERMITTED */
761 763 #endif /* _LP64 */
762 764 }
↓ open down ↓ |
222 lines elided |
↑ open up ↑ |
763 765 }
764 766
765 767 /*
766 768 * The following function should be call at encrypt or decrypt init time
767 769 * for AES CCM mode.
768 770 */
769 771 int
770 772 ccm_init(ccm_ctx_t *ctx, unsigned char *nonce, size_t nonce_len,
771 773 unsigned char *auth_data, size_t auth_data_len, size_t block_size,
772 774 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
773 - void (*xor_block)(uint8_t *, uint8_t *))
775 + void (*xor_block)(const uint8_t *, uint8_t *))
774 776 {
775 777 uint8_t *mac_buf, *datap, *ivp, *authp;
776 778 size_t remainder, processed;
777 779 uint8_t encoded_a[10]; /* max encoded auth data length is 10 octets */
778 780 size_t encoded_a_len = 0;
779 781
780 782 mac_buf = (uint8_t *)&(ctx->ccm_mac_buf);
781 783
782 784 /*
783 785 * Format the 1st block for CBC-MAC and construct the
784 786 * 1st counter block.
785 787 *
786 788 * aes_ctx->ccm_iv is used for storing the counter block
787 789 * mac_buf will store b0 at this time.
788 790 */
789 791 ccm_format_initial_blocks(nonce, nonce_len,
790 792 auth_data_len, mac_buf, ctx);
791 793
792 794 /* The IV for CBC MAC for AES CCM mode is always zero */
793 795 ivp = (uint8_t *)ctx->ccm_tmp;
794 796 bzero(ivp, block_size);
795 797
796 798 xor_block(ivp, mac_buf);
797 799
798 800 /* encrypt the nonce */
799 801 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
800 802
801 803 /* take care of the associated data, if any */
802 804 if (auth_data_len == 0) {
803 805 return (CRYPTO_SUCCESS);
804 806 }
805 807
806 808 encode_adata_len(auth_data_len, encoded_a, &encoded_a_len);
807 809
808 810 remainder = auth_data_len;
809 811
810 812 /* 1st block: it contains encoded associated data, and some data */
811 813 authp = (uint8_t *)ctx->ccm_tmp;
812 814 bzero(authp, block_size);
813 815 bcopy(encoded_a, authp, encoded_a_len);
814 816 processed = block_size - encoded_a_len;
815 817 if (processed > auth_data_len) {
816 818 /* in case auth_data is very small */
817 819 processed = auth_data_len;
818 820 }
819 821 bcopy(auth_data, authp+encoded_a_len, processed);
820 822 /* xor with previous buffer */
821 823 xor_block(authp, mac_buf);
822 824 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
823 825 remainder -= processed;
824 826 if (remainder == 0) {
825 827 /* a small amount of associated data, it's all done now */
826 828 return (CRYPTO_SUCCESS);
827 829 }
828 830
829 831 do {
830 832 if (remainder < block_size) {
831 833 /*
832 834 * There's not a block full of data, pad rest of
833 835 * buffer with zero
834 836 */
835 837 bzero(authp, block_size);
836 838 bcopy(&(auth_data[processed]), authp, remainder);
837 839 datap = (uint8_t *)authp;
838 840 remainder = 0;
839 841 } else {
840 842 datap = (uint8_t *)(&(auth_data[processed]));
841 843 processed += block_size;
842 844 remainder -= block_size;
843 845 }
844 846
845 847 xor_block(datap, mac_buf);
846 848 encrypt_block(ctx->ccm_keysched, mac_buf, mac_buf);
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
847 849
848 850 } while (remainder > 0);
849 851
850 852 return (CRYPTO_SUCCESS);
851 853 }
852 854
853 855 int
854 856 ccm_init_ctx(ccm_ctx_t *ccm_ctx, char *param, int kmflag,
855 857 boolean_t is_encrypt_init, size_t block_size,
856 858 int (*encrypt_block)(const void *, const uint8_t *, uint8_t *),
857 - void (*xor_block)(uint8_t *, uint8_t *))
859 + void (*xor_block)(const uint8_t *, uint8_t *))
858 860 {
859 861 int rv;
860 862 CK_AES_CCM_PARAMS *ccm_param;
861 863
862 864 if (param != NULL) {
863 865 ccm_param = (CK_AES_CCM_PARAMS *)(void *)param;
864 866
865 867 if ((rv = ccm_validate_args(ccm_param,
866 868 is_encrypt_init)) != 0) {
867 869 return (rv);
868 870 }
869 871
870 872 ccm_ctx->ccm_mac_len = ccm_param->ulMACSize;
871 873 if (is_encrypt_init) {
872 874 ccm_ctx->ccm_data_len = ccm_param->ulDataSize;
873 875 } else {
874 876 ccm_ctx->ccm_data_len =
875 877 ccm_param->ulDataSize - ccm_ctx->ccm_mac_len;
876 878 ccm_ctx->ccm_processed_mac_len = 0;
877 879 }
878 880 ccm_ctx->ccm_processed_data_len = 0;
879 881
880 882 ccm_ctx->ccm_flags |= CCM_MODE;
881 883 } else {
882 884 rv = CRYPTO_MECHANISM_PARAM_INVALID;
883 885 goto out;
884 886 }
885 887
886 888 if (ccm_init(ccm_ctx, ccm_param->nonce, ccm_param->ulNonceSize,
887 889 ccm_param->authData, ccm_param->ulAuthDataSize, block_size,
888 890 encrypt_block, xor_block) != 0) {
889 891 rv = CRYPTO_MECHANISM_PARAM_INVALID;
890 892 goto out;
891 893 }
892 894 if (!is_encrypt_init) {
893 895 /* allocate buffer for storing decrypted plaintext */
894 896 #ifdef _KERNEL
895 897 ccm_ctx->ccm_pt_buf = kmem_alloc(ccm_ctx->ccm_data_len,
896 898 kmflag);
897 899 #else
898 900 ccm_ctx->ccm_pt_buf = malloc(ccm_ctx->ccm_data_len);
899 901 #endif
900 902 if (ccm_ctx->ccm_pt_buf == NULL) {
901 903 rv = CRYPTO_HOST_MEMORY;
902 904 }
903 905 }
904 906 out:
905 907 return (rv);
906 908 }
907 909
908 910 void *
909 911 ccm_alloc_ctx(int kmflag)
910 912 {
911 913 ccm_ctx_t *ccm_ctx;
912 914
913 915 #ifdef _KERNEL
914 916 if ((ccm_ctx = kmem_zalloc(sizeof (ccm_ctx_t), kmflag)) == NULL)
915 917 #else
916 918 if ((ccm_ctx = calloc(1, sizeof (ccm_ctx_t))) == NULL)
917 919 #endif
918 920 return (NULL);
919 921
920 922 ccm_ctx->ccm_flags = CCM_MODE;
921 923 return (ccm_ctx);
922 924 }
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX