Print this page
4896 Performance improvements for KCF AES modes
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/crypto/io/aes.c
+++ new/usr/src/uts/common/crypto/io/aes.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 + * Copyright 2015 by Saso Kiselkov. All rights reserved.
23 24 */
24 25
25 26 /*
26 27 * AES provider for the Kernel Cryptographic Framework (KCF)
27 28 */
28 29
29 30 #include <sys/types.h>
30 31 #include <sys/systm.h>
31 32 #include <sys/modctl.h>
32 33 #include <sys/cmn_err.h>
33 34 #include <sys/ddi.h>
34 35 #include <sys/crypto/common.h>
35 36 #include <sys/crypto/impl.h>
36 37 #include <sys/crypto/spi.h>
37 38 #include <sys/sysmacros.h>
38 39 #include <sys/strsun.h>
39 40 #include <modes/modes.h>
40 41 #define _AES_IMPL
41 42 #include <aes/aes_impl.h>
42 43
43 44 extern struct mod_ops mod_cryptoops;
44 45
45 46 /*
46 47 * Module linkage information for the kernel.
47 48 */
48 49 static struct modlcrypto modlcrypto = {
49 50 &mod_cryptoops,
50 51 "AES Kernel SW Provider"
51 52 };
52 53
53 54 static struct modlinkage modlinkage = {
54 55 MODREV_1,
55 56 (void *)&modlcrypto,
56 57 NULL
57 58 };
58 59
59 60 /*
60 61 * Mechanism info structure passed to KCF during registration.
61 62 */
62 63 static crypto_mech_info_t aes_mech_info_tab[] = {
63 64 /* AES_ECB */
64 65 {SUN_CKM_AES_ECB, AES_ECB_MECH_INFO_TYPE,
65 66 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
66 67 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
67 68 AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
68 69 /* AES_CBC */
69 70 {SUN_CKM_AES_CBC, AES_CBC_MECH_INFO_TYPE,
70 71 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
71 72 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
72 73 AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
73 74 /* AES_CTR */
74 75 {SUN_CKM_AES_CTR, AES_CTR_MECH_INFO_TYPE,
75 76 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
76 77 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
77 78 AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
78 79 /* AES_CCM */
79 80 {SUN_CKM_AES_CCM, AES_CCM_MECH_INFO_TYPE,
80 81 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
81 82 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
82 83 AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
83 84 /* AES_GCM */
84 85 {SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE,
85 86 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
86 87 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
87 88 AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
88 89 /* AES_GMAC */
89 90 {SUN_CKM_AES_GMAC, AES_GMAC_MECH_INFO_TYPE,
90 91 CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
91 92 CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC |
92 93 CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC |
93 94 CRYPTO_FG_SIGN | CRYPTO_FG_SIGN_ATOMIC |
94 95 CRYPTO_FG_VERIFY | CRYPTO_FG_VERIFY_ATOMIC,
95 96 AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
96 97 };
97 98
98 99 /* operations are in-place if the output buffer is NULL */
99 100 #define AES_ARG_INPLACE(input, output) \
100 101 if ((output) == NULL) \
101 102 (output) = (input);
102 103
103 104 static void aes_provider_status(crypto_provider_handle_t, uint_t *);
104 105
105 106 static crypto_control_ops_t aes_control_ops = {
106 107 aes_provider_status
107 108 };
108 109
109 110 static int aes_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
110 111 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
111 112 static int aes_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
112 113 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
113 114 static int aes_common_init(crypto_ctx_t *, crypto_mechanism_t *,
114 115 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t, boolean_t);
115 116 static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *,
116 117 crypto_mechanism_t *, crypto_key_t *, int, boolean_t);
117 118 static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *,
118 119 crypto_req_handle_t);
119 120 static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *,
120 121 crypto_req_handle_t);
121 122
122 123 static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
123 124 crypto_req_handle_t);
124 125 static int aes_encrypt_update(crypto_ctx_t *, crypto_data_t *,
125 126 crypto_data_t *, crypto_req_handle_t);
126 127 static int aes_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
127 128 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
128 129 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
129 130
130 131 static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
131 132 crypto_req_handle_t);
132 133 static int aes_decrypt_update(crypto_ctx_t *, crypto_data_t *,
133 134 crypto_data_t *, crypto_req_handle_t);
134 135 static int aes_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
135 136 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
136 137 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
137 138
138 139 static crypto_cipher_ops_t aes_cipher_ops = {
139 140 aes_encrypt_init,
140 141 aes_encrypt,
141 142 aes_encrypt_update,
142 143 aes_encrypt_final,
143 144 aes_encrypt_atomic,
144 145 aes_decrypt_init,
145 146 aes_decrypt,
146 147 aes_decrypt_update,
147 148 aes_decrypt_final,
148 149 aes_decrypt_atomic
149 150 };
150 151
151 152 static int aes_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
152 153 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
153 154 crypto_spi_ctx_template_t, crypto_req_handle_t);
154 155 static int aes_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
155 156 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
156 157 crypto_spi_ctx_template_t, crypto_req_handle_t);
157 158
158 159 static crypto_mac_ops_t aes_mac_ops = {
159 160 NULL,
160 161 NULL,
161 162 NULL,
162 163 NULL,
163 164 aes_mac_atomic,
164 165 aes_mac_verify_atomic
165 166 };
166 167
167 168 static int aes_create_ctx_template(crypto_provider_handle_t,
168 169 crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
169 170 size_t *, crypto_req_handle_t);
170 171 static int aes_free_context(crypto_ctx_t *);
171 172
172 173 static crypto_ctx_ops_t aes_ctx_ops = {
173 174 aes_create_ctx_template,
174 175 aes_free_context
175 176 };
176 177
177 178 static crypto_ops_t aes_crypto_ops = {
178 179 &aes_control_ops,
179 180 NULL,
180 181 &aes_cipher_ops,
181 182 &aes_mac_ops,
182 183 NULL,
183 184 NULL,
184 185 NULL,
185 186 NULL,
186 187 NULL,
187 188 NULL,
188 189 NULL,
189 190 NULL,
190 191 NULL,
191 192 &aes_ctx_ops,
192 193 NULL,
193 194 NULL,
194 195 NULL,
195 196 };
196 197
197 198 static crypto_provider_info_t aes_prov_info = {
198 199 CRYPTO_SPI_VERSION_4,
199 200 "AES Software Provider",
200 201 CRYPTO_SW_PROVIDER,
201 202 {&modlinkage},
202 203 NULL,
203 204 &aes_crypto_ops,
204 205 sizeof (aes_mech_info_tab)/sizeof (crypto_mech_info_t),
205 206 aes_mech_info_tab
206 207 };
207 208
208 209 static crypto_kcf_provider_handle_t aes_prov_handle = NULL;
209 210 static crypto_data_t null_crypto_data = { CRYPTO_DATA_RAW };
210 211
211 212 int
212 213 _init(void)
213 214 {
214 215 int ret;
215 216
216 217 if ((ret = mod_install(&modlinkage)) != 0)
217 218 return (ret);
218 219
219 220 /* Register with KCF. If the registration fails, remove the module. */
220 221 if (crypto_register_provider(&aes_prov_info, &aes_prov_handle)) {
221 222 (void) mod_remove(&modlinkage);
222 223 return (EACCES);
223 224 }
224 225
225 226 return (0);
226 227 }
227 228
228 229 int
229 230 _fini(void)
230 231 {
231 232 /* Unregister from KCF if module is registered */
232 233 if (aes_prov_handle != NULL) {
233 234 if (crypto_unregister_provider(aes_prov_handle))
234 235 return (EBUSY);
235 236
236 237 aes_prov_handle = NULL;
237 238 }
238 239
239 240 return (mod_remove(&modlinkage));
240 241 }
241 242
242 243 int
243 244 _info(struct modinfo *modinfop)
244 245 {
245 246 return (mod_info(&modlinkage, modinfop));
246 247 }
247 248
248 249
249 250 static int
250 251 aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx, int kmflag)
251 252 {
252 253 void *p = NULL;
253 254 boolean_t param_required = B_TRUE;
254 255 size_t param_len;
255 256 void *(*alloc_fun)(int);
256 257 int rv = CRYPTO_SUCCESS;
257 258
258 259 switch (mechanism->cm_type) {
259 260 case AES_ECB_MECH_INFO_TYPE:
260 261 param_required = B_FALSE;
261 262 alloc_fun = ecb_alloc_ctx;
262 263 break;
263 264 case AES_CBC_MECH_INFO_TYPE:
264 265 param_len = AES_BLOCK_LEN;
265 266 alloc_fun = cbc_alloc_ctx;
266 267 break;
267 268 case AES_CTR_MECH_INFO_TYPE:
268 269 param_len = sizeof (CK_AES_CTR_PARAMS);
269 270 alloc_fun = ctr_alloc_ctx;
270 271 break;
271 272 case AES_CCM_MECH_INFO_TYPE:
272 273 param_len = sizeof (CK_AES_CCM_PARAMS);
273 274 alloc_fun = ccm_alloc_ctx;
274 275 break;
275 276 case AES_GCM_MECH_INFO_TYPE:
276 277 param_len = sizeof (CK_AES_GCM_PARAMS);
277 278 alloc_fun = gcm_alloc_ctx;
278 279 break;
279 280 case AES_GMAC_MECH_INFO_TYPE:
280 281 param_len = sizeof (CK_AES_GMAC_PARAMS);
281 282 alloc_fun = gmac_alloc_ctx;
282 283 break;
283 284 default:
284 285 rv = CRYPTO_MECHANISM_INVALID;
285 286 return (rv);
286 287 }
287 288 if (param_required && mechanism->cm_param != NULL &&
288 289 mechanism->cm_param_len != param_len) {
289 290 rv = CRYPTO_MECHANISM_PARAM_INVALID;
290 291 }
291 292 if (ctx != NULL) {
292 293 p = (alloc_fun)(kmflag);
293 294 *ctx = p;
294 295 }
295 296 return (rv);
296 297 }
297 298
298 299 /*
299 300 * Initialize key schedules for AES
300 301 */
301 302 static int
302 303 init_keysched(crypto_key_t *key, void *newbie)
303 304 {
304 305 /*
305 306 * Only keys by value are supported by this module.
306 307 */
307 308 switch (key->ck_format) {
308 309 case CRYPTO_KEY_RAW:
309 310 if (key->ck_length < AES_MINBITS ||
310 311 key->ck_length > AES_MAXBITS) {
311 312 return (CRYPTO_KEY_SIZE_RANGE);
312 313 }
313 314
314 315 /* key length must be either 128, 192, or 256 */
315 316 if ((key->ck_length & 63) != 0)
316 317 return (CRYPTO_KEY_SIZE_RANGE);
317 318 break;
318 319 default:
319 320 return (CRYPTO_KEY_TYPE_INCONSISTENT);
320 321 }
321 322
322 323 aes_init_keysched(key->ck_data, key->ck_length, newbie);
323 324 return (CRYPTO_SUCCESS);
324 325 }
325 326
326 327 /*
327 328 * KCF software provider control entry points.
328 329 */
329 330 /* ARGSUSED */
330 331 static void
331 332 aes_provider_status(crypto_provider_handle_t provider, uint_t *status)
332 333 {
333 334 *status = CRYPTO_PROVIDER_READY;
334 335 }
335 336
336 337 static int
337 338 aes_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
338 339 crypto_key_t *key, crypto_spi_ctx_template_t template,
339 340 crypto_req_handle_t req) {
340 341 return (aes_common_init(ctx, mechanism, key, template, req, B_TRUE));
341 342 }
342 343
343 344 static int
344 345 aes_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
345 346 crypto_key_t *key, crypto_spi_ctx_template_t template,
346 347 crypto_req_handle_t req) {
347 348 return (aes_common_init(ctx, mechanism, key, template, req, B_FALSE));
348 349 }
349 350
350 351
351 352
352 353 /*
353 354 * KCF software provider encrypt entry points.
354 355 */
355 356 static int
356 357 aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
357 358 crypto_key_t *key, crypto_spi_ctx_template_t template,
358 359 crypto_req_handle_t req, boolean_t is_encrypt_init)
359 360 {
360 361 aes_ctx_t *aes_ctx;
361 362 int rv;
362 363 int kmflag;
363 364
364 365 /*
365 366 * Only keys by value are supported by this module.
366 367 */
367 368 if (key->ck_format != CRYPTO_KEY_RAW) {
368 369 return (CRYPTO_KEY_TYPE_INCONSISTENT);
369 370 }
370 371
371 372 kmflag = crypto_kmflag(req);
372 373 if ((rv = aes_check_mech_param(mechanism, &aes_ctx, kmflag))
373 374 != CRYPTO_SUCCESS)
374 375 return (rv);
375 376
376 377 rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, kmflag,
377 378 is_encrypt_init);
378 379 if (rv != CRYPTO_SUCCESS) {
↓ open down ↓ |
346 lines elided |
↑ open up ↑ |
379 380 crypto_free_mode_ctx(aes_ctx);
380 381 return (rv);
381 382 }
382 383
383 384 ctx->cc_provider_private = aes_ctx;
384 385
385 386 return (CRYPTO_SUCCESS);
386 387 }
387 388
388 389 static void
389 -aes_copy_block64(uint8_t *in, uint64_t *out)
390 +aes_copy_block64(const uint8_t *in, uint64_t *out)
390 391 {
391 392 if (IS_P2ALIGNED(in, sizeof (uint64_t))) {
392 393 /* LINTED: pointer alignment */
393 394 out[0] = *(uint64_t *)&in[0];
394 395 /* LINTED: pointer alignment */
395 396 out[1] = *(uint64_t *)&in[8];
396 397 } else {
397 398 uint8_t *iv8 = (uint8_t *)&out[0];
398 399
399 - AES_COPY_BLOCK(in, iv8);
400 + AES_COPY_BLOCK_UNALIGNED(in, iv8);
400 401 }
401 402 }
402 403
403 404
404 405 static int
405 406 aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
406 407 crypto_data_t *ciphertext, crypto_req_handle_t req)
407 408 {
408 409 int ret = CRYPTO_FAILED;
409 410
410 411 aes_ctx_t *aes_ctx;
411 412 size_t saved_length, saved_offset, length_needed;
412 413
413 414 ASSERT(ctx->cc_provider_private != NULL);
414 415 aes_ctx = ctx->cc_provider_private;
415 416
416 417 /*
417 418 * For block ciphers, plaintext must be a multiple of AES block size.
418 419 * This test is only valid for ciphers whose blocksize is a power of 2.
419 420 */
420 421 if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE))
421 422 == 0) && (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
422 423 return (CRYPTO_DATA_LEN_RANGE);
423 424
424 425 AES_ARG_INPLACE(plaintext, ciphertext);
425 426
426 427 /*
427 428 * We need to just return the length needed to store the output.
428 429 * We should not destroy the context for the following case.
429 430 */
430 431 switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) {
431 432 case CCM_MODE:
432 433 length_needed = plaintext->cd_length + aes_ctx->ac_mac_len;
433 434 break;
434 435 case GCM_MODE:
435 436 length_needed = plaintext->cd_length + aes_ctx->ac_tag_len;
436 437 break;
437 438 case GMAC_MODE:
438 439 if (plaintext->cd_length != 0)
439 440 return (CRYPTO_ARGUMENTS_BAD);
440 441
441 442 length_needed = aes_ctx->ac_tag_len;
442 443 break;
443 444 default:
444 445 length_needed = plaintext->cd_length;
445 446 }
446 447
447 448 if (ciphertext->cd_length < length_needed) {
448 449 ciphertext->cd_length = length_needed;
449 450 return (CRYPTO_BUFFER_TOO_SMALL);
450 451 }
451 452
452 453 saved_length = ciphertext->cd_length;
453 454 saved_offset = ciphertext->cd_offset;
454 455
455 456 /*
456 457 * Do an update on the specified input data.
457 458 */
458 459 ret = aes_encrypt_update(ctx, plaintext, ciphertext, req);
459 460 if (ret != CRYPTO_SUCCESS) {
460 461 return (ret);
461 462 }
462 463
463 464 /*
464 465 * For CCM mode, aes_ccm_encrypt_final() will take care of any
465 466 * left-over unprocessed data, and compute the MAC
466 467 */
467 468 if (aes_ctx->ac_flags & CCM_MODE) {
↓ open down ↓ |
58 lines elided |
↑ open up ↑ |
468 469 /*
469 470 * ccm_encrypt_final() will compute the MAC and append
470 471 * it to existing ciphertext. So, need to adjust the left over
471 472 * length value accordingly
472 473 */
473 474
474 475 /* order of following 2 lines MUST not be reversed */
475 476 ciphertext->cd_offset = ciphertext->cd_length;
476 477 ciphertext->cd_length = saved_length - ciphertext->cd_length;
477 478 ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, ciphertext,
478 - AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
479 + AES_BLOCK_LEN, aes_encrypt_block, AES_XOR_BLOCK);
479 480 if (ret != CRYPTO_SUCCESS) {
480 481 return (ret);
481 482 }
482 483
483 484 if (plaintext != ciphertext) {
484 485 ciphertext->cd_length =
485 486 ciphertext->cd_offset - saved_offset;
486 487 }
487 488 ciphertext->cd_offset = saved_offset;
488 489 } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
489 490 /*
490 491 * gcm_encrypt_final() will compute the MAC and append
491 492 * it to existing ciphertext. So, need to adjust the left over
492 493 * length value accordingly
493 494 */
494 495
495 496 /* order of following 2 lines MUST not be reversed */
496 497 ciphertext->cd_offset = ciphertext->cd_length;
497 498 ciphertext->cd_length = saved_length - ciphertext->cd_length;
498 499 ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, ciphertext,
499 - AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
500 - aes_xor_block);
500 + AES_BLOCK_LEN, aes_encrypt_block, AES_COPY_BLOCK,
501 + AES_XOR_BLOCK);
501 502 if (ret != CRYPTO_SUCCESS) {
502 503 return (ret);
503 504 }
504 505
505 506 if (plaintext != ciphertext) {
506 507 ciphertext->cd_length =
507 508 ciphertext->cd_offset - saved_offset;
508 509 }
509 510 ciphertext->cd_offset = saved_offset;
510 511 }
511 512
512 513 ASSERT(aes_ctx->ac_remainder_len == 0);
513 514 (void) aes_free_context(ctx);
514 515
515 516 return (ret);
516 517 }
517 518
518 519
519 520 static int
520 521 aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
521 522 crypto_data_t *plaintext, crypto_req_handle_t req)
522 523 {
523 524 int ret = CRYPTO_FAILED;
524 525
525 526 aes_ctx_t *aes_ctx;
526 527 off_t saved_offset;
527 528 size_t saved_length, length_needed;
528 529
529 530 ASSERT(ctx->cc_provider_private != NULL);
530 531 aes_ctx = ctx->cc_provider_private;
531 532
532 533 /*
533 534 * For block ciphers, plaintext must be a multiple of AES block size.
534 535 * This test is only valid for ciphers whose blocksize is a power of 2.
535 536 */
536 537 if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE))
537 538 == 0) && (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) {
538 539 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
539 540 }
540 541
541 542 AES_ARG_INPLACE(ciphertext, plaintext);
542 543
543 544 /*
544 545 * Return length needed to store the output.
545 546 * Do not destroy context when plaintext buffer is too small.
546 547 *
547 548 * CCM: plaintext is MAC len smaller than cipher text
548 549 * GCM: plaintext is TAG len smaller than cipher text
549 550 * GMAC: plaintext length must be zero
550 551 */
551 552 switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) {
552 553 case CCM_MODE:
553 554 length_needed = aes_ctx->ac_processed_data_len;
554 555 break;
555 556 case GCM_MODE:
556 557 length_needed = ciphertext->cd_length - aes_ctx->ac_tag_len;
557 558 break;
558 559 case GMAC_MODE:
559 560 if (plaintext->cd_length != 0)
560 561 return (CRYPTO_ARGUMENTS_BAD);
561 562
562 563 length_needed = 0;
563 564 break;
564 565 default:
565 566 length_needed = ciphertext->cd_length;
566 567 }
567 568
568 569 if (plaintext->cd_length < length_needed) {
569 570 plaintext->cd_length = length_needed;
570 571 return (CRYPTO_BUFFER_TOO_SMALL);
571 572 }
572 573
573 574 saved_offset = plaintext->cd_offset;
574 575 saved_length = plaintext->cd_length;
575 576
576 577 /*
577 578 * Do an update on the specified input data.
578 579 */
579 580 ret = aes_decrypt_update(ctx, ciphertext, plaintext, req);
580 581 if (ret != CRYPTO_SUCCESS) {
581 582 goto cleanup;
582 583 }
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
583 584
584 585 if (aes_ctx->ac_flags & CCM_MODE) {
585 586 ASSERT(aes_ctx->ac_processed_data_len == aes_ctx->ac_data_len);
586 587 ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len);
587 588
588 589 /* order of following 2 lines MUST not be reversed */
589 590 plaintext->cd_offset = plaintext->cd_length;
590 591 plaintext->cd_length = saved_length - plaintext->cd_length;
591 592
592 593 ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, plaintext,
593 - AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
594 - aes_xor_block);
594 + AES_BLOCK_LEN, aes_encrypt_block, AES_COPY_BLOCK,
595 + AES_XOR_BLOCK);
595 596 if (ret == CRYPTO_SUCCESS) {
596 597 if (plaintext != ciphertext) {
597 598 plaintext->cd_length =
598 599 plaintext->cd_offset - saved_offset;
599 600 }
600 601 } else {
601 602 plaintext->cd_length = saved_length;
602 603 }
603 604
604 605 plaintext->cd_offset = saved_offset;
605 606 } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
606 607 /* order of following 2 lines MUST not be reversed */
607 608 plaintext->cd_offset = plaintext->cd_length;
608 609 plaintext->cd_length = saved_length - plaintext->cd_length;
609 610
610 611 ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, plaintext,
611 - AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
612 + AES_BLOCK_LEN, aes_encrypt_block, AES_XOR_BLOCK,
613 + AES_COPY_BLOCK, aes_ctr_mode);
612 614 if (ret == CRYPTO_SUCCESS) {
613 615 if (plaintext != ciphertext) {
614 616 plaintext->cd_length =
615 617 plaintext->cd_offset - saved_offset;
616 618 }
617 619 } else {
618 620 plaintext->cd_length = saved_length;
619 621 }
620 622
621 623 plaintext->cd_offset = saved_offset;
622 624 }
623 625
624 626 ASSERT(aes_ctx->ac_remainder_len == 0);
625 627
626 628 cleanup:
627 629 (void) aes_free_context(ctx);
628 630
629 631 return (ret);
630 632 }
631 633
632 634
633 635 /* ARGSUSED */
634 636 static int
635 637 aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
636 638 crypto_data_t *ciphertext, crypto_req_handle_t req)
637 639 {
638 640 off_t saved_offset;
639 641 size_t saved_length, out_len;
640 642 int ret = CRYPTO_SUCCESS;
641 643 aes_ctx_t *aes_ctx;
642 644
643 645 ASSERT(ctx->cc_provider_private != NULL);
644 646 aes_ctx = ctx->cc_provider_private;
645 647
646 648 AES_ARG_INPLACE(plaintext, ciphertext);
647 649
648 650 /* compute number of bytes that will hold the ciphertext */
649 651 out_len = aes_ctx->ac_remainder_len;
650 652 out_len += plaintext->cd_length;
651 653 out_len &= ~(AES_BLOCK_LEN - 1);
652 654
653 655 /* return length needed to store the output */
654 656 if (ciphertext->cd_length < out_len) {
655 657 ciphertext->cd_length = out_len;
656 658 return (CRYPTO_BUFFER_TOO_SMALL);
657 659 }
658 660
659 661 saved_offset = ciphertext->cd_offset;
660 662 saved_length = ciphertext->cd_length;
661 663
662 664 /*
663 665 * Do the AES update on the specified input data.
664 666 */
665 667 switch (plaintext->cd_format) {
666 668 case CRYPTO_DATA_RAW:
667 669 ret = crypto_update_iov(ctx->cc_provider_private,
668 670 plaintext, ciphertext, aes_encrypt_contiguous_blocks,
669 671 aes_copy_block64);
670 672 break;
671 673 case CRYPTO_DATA_UIO:
672 674 ret = crypto_update_uio(ctx->cc_provider_private,
673 675 plaintext, ciphertext, aes_encrypt_contiguous_blocks,
674 676 aes_copy_block64);
675 677 break;
676 678 case CRYPTO_DATA_MBLK:
677 679 ret = crypto_update_mp(ctx->cc_provider_private,
678 680 plaintext, ciphertext, aes_encrypt_contiguous_blocks,
679 681 aes_copy_block64);
680 682 break;
681 683 default:
682 684 ret = CRYPTO_ARGUMENTS_BAD;
683 685 }
684 686
685 687 /*
686 688 * Since AES counter mode is a stream cipher, we call
687 689 * ctr_mode_final() to pick up any remaining bytes.
688 690 * It is an internal function that does not destroy
689 691 * the context like *normal* final routines.
690 692 */
691 693 if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
692 694 ret = ctr_mode_final((ctr_ctx_t *)aes_ctx,
693 695 ciphertext, aes_encrypt_block);
694 696 }
695 697
696 698 if (ret == CRYPTO_SUCCESS) {
697 699 if (plaintext != ciphertext)
698 700 ciphertext->cd_length =
699 701 ciphertext->cd_offset - saved_offset;
700 702 } else {
701 703 ciphertext->cd_length = saved_length;
702 704 }
703 705 ciphertext->cd_offset = saved_offset;
704 706
705 707 return (ret);
706 708 }
707 709
708 710
709 711 static int
710 712 aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
711 713 crypto_data_t *plaintext, crypto_req_handle_t req)
712 714 {
713 715 off_t saved_offset;
714 716 size_t saved_length, out_len;
↓ open down ↓ |
93 lines elided |
↑ open up ↑ |
715 717 int ret = CRYPTO_SUCCESS;
716 718 aes_ctx_t *aes_ctx;
717 719
718 720 ASSERT(ctx->cc_provider_private != NULL);
719 721 aes_ctx = ctx->cc_provider_private;
720 722
721 723 AES_ARG_INPLACE(ciphertext, plaintext);
722 724
723 725 /*
724 726 * Compute number of bytes that will hold the plaintext.
725 - * This is not necessary for CCM, GCM, and GMAC since these
727 + * This is not necessary for CCM and GMAC since these
726 728 * mechanisms never return plaintext for update operations.
727 729 */
728 - if ((aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) {
730 + if ((aes_ctx->ac_flags & (CCM_MODE|GMAC_MODE)) == 0) {
729 731 out_len = aes_ctx->ac_remainder_len;
730 732 out_len += ciphertext->cd_length;
731 733 out_len &= ~(AES_BLOCK_LEN - 1);
734 + if (aes_ctx->ac_flags & GCM_MODE)
735 + out_len -= ((gcm_ctx_t *)aes_ctx)->gcm_tag_len;
732 736
733 737 /* return length needed to store the output */
734 738 if (plaintext->cd_length < out_len) {
735 739 plaintext->cd_length = out_len;
736 740 return (CRYPTO_BUFFER_TOO_SMALL);
737 741 }
738 742 }
739 743
740 744 saved_offset = plaintext->cd_offset;
741 745 saved_length = plaintext->cd_length;
742 746
743 747 if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE))
744 748 gcm_set_kmflag((gcm_ctx_t *)aes_ctx, crypto_kmflag(req));
745 749
746 750 /*
747 751 * Do the AES update on the specified input data.
748 752 */
749 753 switch (ciphertext->cd_format) {
750 754 case CRYPTO_DATA_RAW:
751 755 ret = crypto_update_iov(ctx->cc_provider_private,
752 756 ciphertext, plaintext, aes_decrypt_contiguous_blocks,
753 757 aes_copy_block64);
754 758 break;
755 759 case CRYPTO_DATA_UIO:
756 760 ret = crypto_update_uio(ctx->cc_provider_private,
757 761 ciphertext, plaintext, aes_decrypt_contiguous_blocks,
758 762 aes_copy_block64);
759 763 break;
760 764 case CRYPTO_DATA_MBLK:
761 765 ret = crypto_update_mp(ctx->cc_provider_private,
762 766 ciphertext, plaintext, aes_decrypt_contiguous_blocks,
763 767 aes_copy_block64);
764 768 break;
765 769 default:
766 770 ret = CRYPTO_ARGUMENTS_BAD;
767 771 }
768 772
769 773 /*
770 774 * Since AES counter mode is a stream cipher, we call
771 775 * ctr_mode_final() to pick up any remaining bytes.
772 776 * It is an internal function that does not destroy
773 777 * the context like *normal* final routines.
774 778 */
775 779 if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
776 780 ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, plaintext,
777 781 aes_encrypt_block);
778 782 if (ret == CRYPTO_DATA_LEN_RANGE)
779 783 ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
780 784 }
781 785
782 786 if (ret == CRYPTO_SUCCESS) {
783 787 if (ciphertext != plaintext)
784 788 plaintext->cd_length =
785 789 plaintext->cd_offset - saved_offset;
786 790 } else {
787 791 plaintext->cd_length = saved_length;
788 792 }
789 793 plaintext->cd_offset = saved_offset;
790 794
791 795
792 796 return (ret);
793 797 }
794 798
795 799 /* ARGSUSED */
796 800 static int
797 801 aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
798 802 crypto_req_handle_t req)
799 803 {
800 804 aes_ctx_t *aes_ctx;
801 805 int ret;
802 806
803 807 ASSERT(ctx->cc_provider_private != NULL);
804 808 aes_ctx = ctx->cc_provider_private;
805 809
806 810 if (data->cd_format != CRYPTO_DATA_RAW &&
807 811 data->cd_format != CRYPTO_DATA_UIO &&
808 812 data->cd_format != CRYPTO_DATA_MBLK) {
809 813 return (CRYPTO_ARGUMENTS_BAD);
810 814 }
↓ open down ↓ |
69 lines elided |
↑ open up ↑ |
811 815
812 816 if (aes_ctx->ac_flags & CTR_MODE) {
813 817 if (aes_ctx->ac_remainder_len > 0) {
814 818 ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
815 819 aes_encrypt_block);
816 820 if (ret != CRYPTO_SUCCESS)
817 821 return (ret);
818 822 }
819 823 } else if (aes_ctx->ac_flags & CCM_MODE) {
820 824 ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, data,
821 - AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
825 + AES_BLOCK_LEN, aes_encrypt_block, AES_XOR_BLOCK);
822 826 if (ret != CRYPTO_SUCCESS) {
823 827 return (ret);
824 828 }
825 829 } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
826 830 size_t saved_offset = data->cd_offset;
827 831
828 832 ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, data,
829 - AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
830 - aes_xor_block);
833 + AES_BLOCK_LEN, aes_encrypt_block, AES_COPY_BLOCK,
834 + AES_XOR_BLOCK);
831 835 if (ret != CRYPTO_SUCCESS) {
832 836 return (ret);
833 837 }
834 838 data->cd_length = data->cd_offset - saved_offset;
835 839 data->cd_offset = saved_offset;
836 840 } else {
837 841 /*
838 842 * There must be no unprocessed plaintext.
839 843 * This happens if the length of the last data is
840 844 * not a multiple of the AES block length.
841 845 */
842 846 if (aes_ctx->ac_remainder_len > 0) {
843 847 return (CRYPTO_DATA_LEN_RANGE);
844 848 }
845 849 data->cd_length = 0;
846 850 }
847 851
848 852 (void) aes_free_context(ctx);
849 853
850 854 return (CRYPTO_SUCCESS);
851 855 }
852 856
853 857 /* ARGSUSED */
854 858 static int
855 859 aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
856 860 crypto_req_handle_t req)
857 861 {
858 862 aes_ctx_t *aes_ctx;
859 863 int ret;
860 864 off_t saved_offset;
861 865 size_t saved_length;
862 866
863 867 ASSERT(ctx->cc_provider_private != NULL);
864 868 aes_ctx = ctx->cc_provider_private;
865 869
866 870 if (data->cd_format != CRYPTO_DATA_RAW &&
867 871 data->cd_format != CRYPTO_DATA_UIO &&
868 872 data->cd_format != CRYPTO_DATA_MBLK) {
869 873 return (CRYPTO_ARGUMENTS_BAD);
870 874 }
871 875
872 876 /*
873 877 * There must be no unprocessed ciphertext.
874 878 * This happens if the length of the last ciphertext is
875 879 * not a multiple of the AES block length.
876 880 */
877 881 if (aes_ctx->ac_remainder_len > 0) {
878 882 if ((aes_ctx->ac_flags & CTR_MODE) == 0)
879 883 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
880 884 else {
881 885 ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
882 886 aes_encrypt_block);
883 887 if (ret == CRYPTO_DATA_LEN_RANGE)
884 888 ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
885 889 if (ret != CRYPTO_SUCCESS)
886 890 return (ret);
887 891 }
888 892 }
889 893
890 894 if (aes_ctx->ac_flags & CCM_MODE) {
891 895 /*
892 896 * This is where all the plaintext is returned, make sure
893 897 * the plaintext buffer is big enough
894 898 */
895 899 size_t pt_len = aes_ctx->ac_data_len;
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
896 900 if (data->cd_length < pt_len) {
897 901 data->cd_length = pt_len;
898 902 return (CRYPTO_BUFFER_TOO_SMALL);
899 903 }
900 904
901 905 ASSERT(aes_ctx->ac_processed_data_len == pt_len);
902 906 ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len);
903 907 saved_offset = data->cd_offset;
904 908 saved_length = data->cd_length;
905 909 ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, data,
906 - AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
907 - aes_xor_block);
910 + AES_BLOCK_LEN, aes_encrypt_block, AES_COPY_BLOCK,
911 + AES_XOR_BLOCK);
908 912 if (ret == CRYPTO_SUCCESS) {
909 913 data->cd_length = data->cd_offset - saved_offset;
910 914 } else {
911 915 data->cd_length = saved_length;
912 916 }
913 917
914 918 data->cd_offset = saved_offset;
915 919 if (ret != CRYPTO_SUCCESS) {
916 920 return (ret);
917 921 }
918 922 } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
919 923 /*
920 - * This is where all the plaintext is returned, make sure
921 - * the plaintext buffer is big enough
924 + * Check to make sure there is enough space for remaining
925 + * plaintext.
922 926 */
923 927 gcm_ctx_t *ctx = (gcm_ctx_t *)aes_ctx;
924 - size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
928 + size_t pt_len = ctx->gcm_last_input_fill - ctx->gcm_tag_len;
925 929
926 930 if (data->cd_length < pt_len) {
927 931 data->cd_length = pt_len;
928 932 return (CRYPTO_BUFFER_TOO_SMALL);
929 933 }
930 -
931 934 saved_offset = data->cd_offset;
932 935 saved_length = data->cd_length;
933 936 ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, data,
934 - AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
937 + AES_BLOCK_LEN, aes_encrypt_block, AES_COPY_BLOCK,
938 + AES_XOR_BLOCK, aes_ctr_mode);
935 939 if (ret == CRYPTO_SUCCESS) {
936 940 data->cd_length = data->cd_offset - saved_offset;
937 941 } else {
938 942 data->cd_length = saved_length;
939 943 }
940 944
941 945 data->cd_offset = saved_offset;
942 946 if (ret != CRYPTO_SUCCESS) {
943 947 return (ret);
944 948 }
945 949 }
946 950
947 -
948 951 if ((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) {
949 952 data->cd_length = 0;
950 953 }
951 954
952 955 (void) aes_free_context(ctx);
953 956
954 957 return (CRYPTO_SUCCESS);
955 958 }
956 959
957 960 /* ARGSUSED */
958 961 static int
959 962 aes_encrypt_atomic(crypto_provider_handle_t provider,
960 963 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
961 964 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
962 965 crypto_spi_ctx_template_t template, crypto_req_handle_t req)
963 966 {
964 967 aes_ctx_t aes_ctx; /* on the stack */
965 968 off_t saved_offset;
966 969 size_t saved_length;
967 970 size_t length_needed;
968 971 int ret;
969 972
970 973 AES_ARG_INPLACE(plaintext, ciphertext);
971 974
972 975 /*
973 976 * CTR, CCM, GCM, and GMAC modes do not require that plaintext
974 977 * be a multiple of AES block size.
975 978 */
976 979 switch (mechanism->cm_type) {
977 980 case AES_CTR_MECH_INFO_TYPE:
978 981 case AES_CCM_MECH_INFO_TYPE:
979 982 case AES_GCM_MECH_INFO_TYPE:
980 983 case AES_GMAC_MECH_INFO_TYPE:
981 984 break;
982 985 default:
983 986 if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
984 987 return (CRYPTO_DATA_LEN_RANGE);
985 988 }
986 989
987 990 if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS)
988 991 return (ret);
989 992
990 993 bzero(&aes_ctx, sizeof (aes_ctx_t));
991 994
992 995 ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
993 996 crypto_kmflag(req), B_TRUE);
994 997 if (ret != CRYPTO_SUCCESS)
995 998 return (ret);
996 999
997 1000 switch (mechanism->cm_type) {
998 1001 case AES_CCM_MECH_INFO_TYPE:
999 1002 length_needed = plaintext->cd_length + aes_ctx.ac_mac_len;
1000 1003 break;
1001 1004 case AES_GMAC_MECH_INFO_TYPE:
1002 1005 if (plaintext->cd_length != 0)
1003 1006 return (CRYPTO_ARGUMENTS_BAD);
1004 1007 /* FALLTHRU */
1005 1008 case AES_GCM_MECH_INFO_TYPE:
1006 1009 length_needed = plaintext->cd_length + aes_ctx.ac_tag_len;
1007 1010 break;
1008 1011 default:
1009 1012 length_needed = plaintext->cd_length;
1010 1013 }
1011 1014
1012 1015 /* return size of buffer needed to store output */
1013 1016 if (ciphertext->cd_length < length_needed) {
1014 1017 ciphertext->cd_length = length_needed;
1015 1018 ret = CRYPTO_BUFFER_TOO_SMALL;
1016 1019 goto out;
1017 1020 }
1018 1021
1019 1022 saved_offset = ciphertext->cd_offset;
1020 1023 saved_length = ciphertext->cd_length;
1021 1024
1022 1025 /*
1023 1026 * Do an update on the specified input data.
1024 1027 */
1025 1028 switch (plaintext->cd_format) {
1026 1029 case CRYPTO_DATA_RAW:
1027 1030 ret = crypto_update_iov(&aes_ctx, plaintext, ciphertext,
1028 1031 aes_encrypt_contiguous_blocks, aes_copy_block64);
1029 1032 break;
1030 1033 case CRYPTO_DATA_UIO:
1031 1034 ret = crypto_update_uio(&aes_ctx, plaintext, ciphertext,
1032 1035 aes_encrypt_contiguous_blocks, aes_copy_block64);
1033 1036 break;
1034 1037 case CRYPTO_DATA_MBLK:
1035 1038 ret = crypto_update_mp(&aes_ctx, plaintext, ciphertext,
↓ open down ↓ |
78 lines elided |
↑ open up ↑ |
1036 1039 aes_encrypt_contiguous_blocks, aes_copy_block64);
1037 1040 break;
1038 1041 default:
1039 1042 ret = CRYPTO_ARGUMENTS_BAD;
1040 1043 }
1041 1044
1042 1045 if (ret == CRYPTO_SUCCESS) {
1043 1046 if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
1044 1047 ret = ccm_encrypt_final((ccm_ctx_t *)&aes_ctx,
1045 1048 ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
1046 - aes_xor_block);
1049 + AES_XOR_BLOCK);
1047 1050 if (ret != CRYPTO_SUCCESS)
1048 1051 goto out;
1049 1052 ASSERT(aes_ctx.ac_remainder_len == 0);
1050 1053 } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
1051 1054 mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
1052 1055 ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx,
1053 1056 ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
1054 - aes_copy_block, aes_xor_block);
1057 + AES_COPY_BLOCK, AES_XOR_BLOCK);
1055 1058 if (ret != CRYPTO_SUCCESS)
1056 1059 goto out;
1057 1060 ASSERT(aes_ctx.ac_remainder_len == 0);
1058 1061 } else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) {
1059 1062 if (aes_ctx.ac_remainder_len > 0) {
1060 1063 ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
1061 1064 ciphertext, aes_encrypt_block);
1062 1065 if (ret != CRYPTO_SUCCESS)
1063 1066 goto out;
1064 1067 }
1065 1068 } else {
1066 1069 ASSERT(aes_ctx.ac_remainder_len == 0);
1067 1070 }
1068 1071
1069 1072 if (plaintext != ciphertext) {
1070 1073 ciphertext->cd_length =
1071 1074 ciphertext->cd_offset - saved_offset;
1072 1075 }
1073 1076 } else {
1074 1077 ciphertext->cd_length = saved_length;
1075 1078 }
1076 1079 ciphertext->cd_offset = saved_offset;
1077 1080
1078 1081 out:
1079 1082 if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
1080 1083 bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
1081 1084 kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
1082 1085 }
1083 1086
1084 1087 return (ret);
1085 1088 }
1086 1089
1087 1090 /* ARGSUSED */
1088 1091 static int
1089 1092 aes_decrypt_atomic(crypto_provider_handle_t provider,
1090 1093 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1091 1094 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
1092 1095 crypto_spi_ctx_template_t template, crypto_req_handle_t req)
1093 1096 {
1094 1097 aes_ctx_t aes_ctx; /* on the stack */
1095 1098 off_t saved_offset;
1096 1099 size_t saved_length;
1097 1100 size_t length_needed;
1098 1101 int ret;
1099 1102
1100 1103 AES_ARG_INPLACE(ciphertext, plaintext);
1101 1104
1102 1105 /*
1103 1106 * CCM, GCM, CTR, and GMAC modes do not require that ciphertext
1104 1107 * be a multiple of AES block size.
1105 1108 */
1106 1109 switch (mechanism->cm_type) {
1107 1110 case AES_CTR_MECH_INFO_TYPE:
1108 1111 case AES_CCM_MECH_INFO_TYPE:
1109 1112 case AES_GCM_MECH_INFO_TYPE:
1110 1113 case AES_GMAC_MECH_INFO_TYPE:
1111 1114 break;
1112 1115 default:
1113 1116 if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
1114 1117 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
1115 1118 }
1116 1119
1117 1120 if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS)
1118 1121 return (ret);
1119 1122
1120 1123 bzero(&aes_ctx, sizeof (aes_ctx_t));
1121 1124
1122 1125 ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
1123 1126 crypto_kmflag(req), B_FALSE);
1124 1127 if (ret != CRYPTO_SUCCESS)
1125 1128 return (ret);
1126 1129
1127 1130 switch (mechanism->cm_type) {
1128 1131 case AES_CCM_MECH_INFO_TYPE:
1129 1132 length_needed = aes_ctx.ac_data_len;
1130 1133 break;
1131 1134 case AES_GCM_MECH_INFO_TYPE:
1132 1135 length_needed = ciphertext->cd_length - aes_ctx.ac_tag_len;
1133 1136 break;
1134 1137 case AES_GMAC_MECH_INFO_TYPE:
1135 1138 if (plaintext->cd_length != 0)
1136 1139 return (CRYPTO_ARGUMENTS_BAD);
1137 1140 length_needed = 0;
1138 1141 break;
1139 1142 default:
1140 1143 length_needed = ciphertext->cd_length;
1141 1144 }
1142 1145
1143 1146 /* return size of buffer needed to store output */
1144 1147 if (plaintext->cd_length < length_needed) {
1145 1148 plaintext->cd_length = length_needed;
1146 1149 ret = CRYPTO_BUFFER_TOO_SMALL;
1147 1150 goto out;
1148 1151 }
1149 1152
1150 1153 saved_offset = plaintext->cd_offset;
1151 1154 saved_length = plaintext->cd_length;
1152 1155
1153 1156 if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
1154 1157 mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE)
1155 1158 gcm_set_kmflag((gcm_ctx_t *)&aes_ctx, crypto_kmflag(req));
1156 1159
1157 1160 /*
1158 1161 * Do an update on the specified input data.
1159 1162 */
1160 1163 switch (ciphertext->cd_format) {
1161 1164 case CRYPTO_DATA_RAW:
1162 1165 ret = crypto_update_iov(&aes_ctx, ciphertext, plaintext,
1163 1166 aes_decrypt_contiguous_blocks, aes_copy_block64);
1164 1167 break;
1165 1168 case CRYPTO_DATA_UIO:
1166 1169 ret = crypto_update_uio(&aes_ctx, ciphertext, plaintext,
1167 1170 aes_decrypt_contiguous_blocks, aes_copy_block64);
1168 1171 break;
1169 1172 case CRYPTO_DATA_MBLK:
1170 1173 ret = crypto_update_mp(&aes_ctx, ciphertext, plaintext,
1171 1174 aes_decrypt_contiguous_blocks, aes_copy_block64);
1172 1175 break;
1173 1176 default:
1174 1177 ret = CRYPTO_ARGUMENTS_BAD;
↓ open down ↓ |
110 lines elided |
↑ open up ↑ |
1175 1178 }
1176 1179
1177 1180 if (ret == CRYPTO_SUCCESS) {
1178 1181 if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
1179 1182 ASSERT(aes_ctx.ac_processed_data_len
1180 1183 == aes_ctx.ac_data_len);
1181 1184 ASSERT(aes_ctx.ac_processed_mac_len
1182 1185 == aes_ctx.ac_mac_len);
1183 1186 ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx,
1184 1187 plaintext, AES_BLOCK_LEN, aes_encrypt_block,
1185 - aes_copy_block, aes_xor_block);
1188 + AES_COPY_BLOCK, AES_XOR_BLOCK);
1186 1189 ASSERT(aes_ctx.ac_remainder_len == 0);
1187 1190 if ((ret == CRYPTO_SUCCESS) &&
1188 1191 (ciphertext != plaintext)) {
1189 1192 plaintext->cd_length =
1190 1193 plaintext->cd_offset - saved_offset;
1191 1194 } else {
1192 1195 plaintext->cd_length = saved_length;
1193 1196 }
1194 1197 } else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
1195 1198 mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
1196 1199 ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx,
1197 1200 plaintext, AES_BLOCK_LEN, aes_encrypt_block,
1198 - aes_xor_block);
1201 + AES_COPY_BLOCK, AES_XOR_BLOCK, aes_ctr_mode);
1199 1202 ASSERT(aes_ctx.ac_remainder_len == 0);
1200 1203 if ((ret == CRYPTO_SUCCESS) &&
1201 1204 (ciphertext != plaintext)) {
1202 1205 plaintext->cd_length =
1203 1206 plaintext->cd_offset - saved_offset;
1204 1207 } else {
1205 1208 plaintext->cd_length = saved_length;
1206 1209 }
1207 1210 } else if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) {
1208 1211 ASSERT(aes_ctx.ac_remainder_len == 0);
1209 1212 if (ciphertext != plaintext)
1210 1213 plaintext->cd_length =
1211 1214 plaintext->cd_offset - saved_offset;
1212 1215 } else {
1213 1216 if (aes_ctx.ac_remainder_len > 0) {
1214 1217 ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
1215 1218 plaintext, aes_encrypt_block);
1216 1219 if (ret == CRYPTO_DATA_LEN_RANGE)
1217 1220 ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
1218 1221 if (ret != CRYPTO_SUCCESS)
1219 1222 goto out;
1220 1223 }
1221 1224 if (ciphertext != plaintext)
1222 1225 plaintext->cd_length =
1223 1226 plaintext->cd_offset - saved_offset;
1224 1227 }
1225 1228 } else {
1226 1229 plaintext->cd_length = saved_length;
1227 1230 }
1228 1231 plaintext->cd_offset = saved_offset;
1229 1232
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
1230 1233 out:
1231 1234 if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
1232 1235 bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
1233 1236 kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
1234 1237 }
1235 1238
1236 1239 if (aes_ctx.ac_flags & CCM_MODE) {
1237 1240 if (aes_ctx.ac_pt_buf != NULL) {
1238 1241 kmem_free(aes_ctx.ac_pt_buf, aes_ctx.ac_data_len);
1239 1242 }
1240 - } else if (aes_ctx.ac_flags & (GCM_MODE|GMAC_MODE)) {
1241 - if (((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf != NULL) {
1242 - kmem_free(((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf,
1243 - ((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf_len);
1244 - }
1245 1243 }
1246 1244
1247 1245 return (ret);
1248 1246 }
1249 1247
1250 1248 /*
1251 1249 * KCF software provider context template entry points.
1252 1250 */
1253 1251 /* ARGSUSED */
1254 1252 static int
1255 1253 aes_create_ctx_template(crypto_provider_handle_t provider,
1256 1254 crypto_mechanism_t *mechanism, crypto_key_t *key,
1257 1255 crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size, crypto_req_handle_t req)
1258 1256 {
1259 1257 void *keysched;
1260 1258 size_t size;
1261 1259 int rv;
1262 1260
1263 1261 if (mechanism->cm_type != AES_ECB_MECH_INFO_TYPE &&
1264 1262 mechanism->cm_type != AES_CBC_MECH_INFO_TYPE &&
1265 1263 mechanism->cm_type != AES_CTR_MECH_INFO_TYPE &&
1266 1264 mechanism->cm_type != AES_CCM_MECH_INFO_TYPE &&
1267 1265 mechanism->cm_type != AES_GCM_MECH_INFO_TYPE &&
1268 1266 mechanism->cm_type != AES_GMAC_MECH_INFO_TYPE)
1269 1267 return (CRYPTO_MECHANISM_INVALID);
1270 1268
1271 1269 if ((keysched = aes_alloc_keysched(&size,
1272 1270 crypto_kmflag(req))) == NULL) {
1273 1271 return (CRYPTO_HOST_MEMORY);
1274 1272 }
1275 1273
1276 1274 /*
1277 1275 * Initialize key schedule. Key length information is stored
1278 1276 * in the key.
1279 1277 */
1280 1278 if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
1281 1279 bzero(keysched, size);
1282 1280 kmem_free(keysched, size);
1283 1281 return (rv);
1284 1282 }
1285 1283
1286 1284 *tmpl = keysched;
1287 1285 *tmpl_size = size;
1288 1286
1289 1287 return (CRYPTO_SUCCESS);
1290 1288 }
1291 1289
1292 1290
1293 1291 static int
1294 1292 aes_free_context(crypto_ctx_t *ctx)
1295 1293 {
1296 1294 aes_ctx_t *aes_ctx = ctx->cc_provider_private;
1297 1295
1298 1296 if (aes_ctx != NULL) {
1299 1297 if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
1300 1298 ASSERT(aes_ctx->ac_keysched_len != 0);
1301 1299 bzero(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len);
1302 1300 kmem_free(aes_ctx->ac_keysched,
1303 1301 aes_ctx->ac_keysched_len);
1304 1302 }
1305 1303 crypto_free_mode_ctx(aes_ctx);
1306 1304 ctx->cc_provider_private = NULL;
1307 1305 }
1308 1306
1309 1307 return (CRYPTO_SUCCESS);
1310 1308 }
1311 1309
1312 1310
1313 1311 static int
1314 1312 aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template,
1315 1313 crypto_mechanism_t *mechanism, crypto_key_t *key, int kmflag,
1316 1314 boolean_t is_encrypt_init)
1317 1315 {
1318 1316 int rv = CRYPTO_SUCCESS;
1319 1317 void *keysched;
1320 1318 size_t size;
1321 1319
1322 1320 if (template == NULL) {
1323 1321 if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL)
1324 1322 return (CRYPTO_HOST_MEMORY);
1325 1323 /*
1326 1324 * Initialize key schedule.
1327 1325 * Key length is stored in the key.
1328 1326 */
1329 1327 if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
1330 1328 kmem_free(keysched, size);
1331 1329 return (rv);
1332 1330 }
1333 1331
1334 1332 aes_ctx->ac_flags |= PROVIDER_OWNS_KEY_SCHEDULE;
1335 1333 aes_ctx->ac_keysched_len = size;
1336 1334 } else {
1337 1335 keysched = template;
1338 1336 }
1339 1337 aes_ctx->ac_keysched = keysched;
1340 1338
1341 1339 switch (mechanism->cm_type) {
1342 1340 case AES_CBC_MECH_INFO_TYPE:
1343 1341 rv = cbc_init_ctx((cbc_ctx_t *)aes_ctx, mechanism->cm_param,
1344 1342 mechanism->cm_param_len, AES_BLOCK_LEN, aes_copy_block64);
↓ open down ↓ |
90 lines elided |
↑ open up ↑ |
1345 1343 break;
1346 1344 case AES_CTR_MECH_INFO_TYPE: {
1347 1345 CK_AES_CTR_PARAMS *pp;
1348 1346
1349 1347 if (mechanism->cm_param == NULL ||
1350 1348 mechanism->cm_param_len != sizeof (CK_AES_CTR_PARAMS)) {
1351 1349 return (CRYPTO_MECHANISM_PARAM_INVALID);
1352 1350 }
1353 1351 pp = (CK_AES_CTR_PARAMS *)(void *)mechanism->cm_param;
1354 1352 rv = ctr_init_ctx((ctr_ctx_t *)aes_ctx, pp->ulCounterBits,
1355 - pp->cb, aes_copy_block);
1353 + pp->cb, AES_COPY_BLOCK);
1356 1354 break;
1357 1355 }
1358 1356 case AES_CCM_MECH_INFO_TYPE:
1359 1357 if (mechanism->cm_param == NULL ||
1360 1358 mechanism->cm_param_len != sizeof (CK_AES_CCM_PARAMS)) {
1361 1359 return (CRYPTO_MECHANISM_PARAM_INVALID);
1362 1360 }
1363 1361 rv = ccm_init_ctx((ccm_ctx_t *)aes_ctx, mechanism->cm_param,
1364 1362 kmflag, is_encrypt_init, AES_BLOCK_LEN, aes_encrypt_block,
1365 - aes_xor_block);
1363 + AES_XOR_BLOCK);
1366 1364 break;
1367 1365 case AES_GCM_MECH_INFO_TYPE:
1368 1366 if (mechanism->cm_param == NULL ||
1369 1367 mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) {
1370 1368 return (CRYPTO_MECHANISM_PARAM_INVALID);
1371 1369 }
1372 1370 rv = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param,
1373 - AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
1374 - aes_xor_block);
1371 + AES_BLOCK_LEN, aes_encrypt_block, AES_COPY_BLOCK,
1372 + AES_XOR_BLOCK);
1375 1373 break;
1376 1374 case AES_GMAC_MECH_INFO_TYPE:
1377 1375 if (mechanism->cm_param == NULL ||
1378 1376 mechanism->cm_param_len != sizeof (CK_AES_GMAC_PARAMS)) {
1379 1377 return (CRYPTO_MECHANISM_PARAM_INVALID);
1380 1378 }
1381 1379 rv = gmac_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param,
1382 - AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
1383 - aes_xor_block);
1380 + AES_BLOCK_LEN, aes_encrypt_block, AES_COPY_BLOCK,
1381 + AES_XOR_BLOCK);
1384 1382 break;
1385 1383 case AES_ECB_MECH_INFO_TYPE:
1386 1384 aes_ctx->ac_flags |= ECB_MODE;
1387 1385 }
1388 1386
1389 1387 if (rv != CRYPTO_SUCCESS) {
1390 1388 if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
1391 1389 bzero(keysched, size);
1392 1390 kmem_free(keysched, size);
1393 1391 }
1394 1392 }
1395 1393
1396 1394 return (rv);
1397 1395 }
1398 1396
1399 1397 static int
1400 1398 process_gmac_mech(crypto_mechanism_t *mech, crypto_data_t *data,
1401 1399 CK_AES_GCM_PARAMS *gcm_params)
1402 1400 {
1403 1401 /* LINTED: pointer alignment */
1404 1402 CK_AES_GMAC_PARAMS *params = (CK_AES_GMAC_PARAMS *)mech->cm_param;
1405 1403
1406 1404 if (mech->cm_type != AES_GMAC_MECH_INFO_TYPE)
1407 1405 return (CRYPTO_MECHANISM_INVALID);
1408 1406
1409 1407 if (mech->cm_param_len != sizeof (CK_AES_GMAC_PARAMS))
1410 1408 return (CRYPTO_MECHANISM_PARAM_INVALID);
1411 1409
1412 1410 if (params->pIv == NULL)
1413 1411 return (CRYPTO_MECHANISM_PARAM_INVALID);
1414 1412
1415 1413 gcm_params->pIv = params->pIv;
1416 1414 gcm_params->ulIvLen = AES_GMAC_IV_LEN;
1417 1415 gcm_params->ulTagBits = AES_GMAC_TAG_BITS;
1418 1416
1419 1417 if (data == NULL)
1420 1418 return (CRYPTO_SUCCESS);
1421 1419
1422 1420 if (data->cd_format != CRYPTO_DATA_RAW)
1423 1421 return (CRYPTO_ARGUMENTS_BAD);
1424 1422
1425 1423 gcm_params->pAAD = (uchar_t *)data->cd_raw.iov_base;
1426 1424 gcm_params->ulAADLen = data->cd_length;
1427 1425 return (CRYPTO_SUCCESS);
1428 1426 }
1429 1427
1430 1428 static int
1431 1429 aes_mac_atomic(crypto_provider_handle_t provider,
1432 1430 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1433 1431 crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1434 1432 crypto_spi_ctx_template_t template, crypto_req_handle_t req)
1435 1433 {
1436 1434 CK_AES_GCM_PARAMS gcm_params;
1437 1435 crypto_mechanism_t gcm_mech;
1438 1436 int rv;
1439 1437
1440 1438 if ((rv = process_gmac_mech(mechanism, data, &gcm_params))
1441 1439 != CRYPTO_SUCCESS)
1442 1440 return (rv);
1443 1441
1444 1442 gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE;
1445 1443 gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
1446 1444 gcm_mech.cm_param = (char *)&gcm_params;
1447 1445
1448 1446 return (aes_encrypt_atomic(provider, session_id, &gcm_mech,
1449 1447 key, &null_crypto_data, mac, template, req));
1450 1448 }
1451 1449
1452 1450 static int
1453 1451 aes_mac_verify_atomic(crypto_provider_handle_t provider,
1454 1452 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1455 1453 crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1456 1454 crypto_spi_ctx_template_t template, crypto_req_handle_t req)
1457 1455 {
1458 1456 CK_AES_GCM_PARAMS gcm_params;
1459 1457 crypto_mechanism_t gcm_mech;
1460 1458 int rv;
1461 1459
1462 1460 if ((rv = process_gmac_mech(mechanism, data, &gcm_params))
1463 1461 != CRYPTO_SUCCESS)
1464 1462 return (rv);
1465 1463
1466 1464 gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE;
1467 1465 gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
1468 1466 gcm_mech.cm_param = (char *)&gcm_params;
1469 1467
1470 1468 return (aes_decrypt_atomic(provider, session_id, &gcm_mech,
1471 1469 key, mac, &null_crypto_data, template, req));
1472 1470 }
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX