Print this page
fixup .text where possible
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/crypto/io/sha1_mod.c
+++ new/usr/src/uts/common/crypto/io/sha1_mod.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <sys/modctl.h>
28 28 #include <sys/cmn_err.h>
29 29 #include <sys/note.h>
30 30 #include <sys/crypto/common.h>
31 31 #include <sys/crypto/spi.h>
32 32 #include <sys/strsun.h>
33 33 #include <sys/systm.h>
34 34 #include <sys/sysmacros.h>
35 35
36 36 #include <sys/sha1.h>
37 37 #include <sha1/sha1_impl.h>
38 38
39 39 /*
40 40 * The sha1 module is created with two modlinkages:
41 41 * - a modlmisc that allows consumers to directly call the entry points
42 42 * SHA1Init, SHA1Update, and SHA1Final.
43 43 * - a modlcrypto that allows the module to register with the Kernel
44 44 * Cryptographic Framework (KCF) as a software provider for the SHA1
45 45 * mechanisms.
46 46 */
47 47
48 48 static struct modlmisc modlmisc = {
↓ open down ↓ |
48 lines elided |
↑ open up ↑ |
49 49 &mod_miscops,
50 50 "SHA1 Message-Digest Algorithm"
51 51 };
52 52
53 53 static struct modlcrypto modlcrypto = {
54 54 &mod_cryptoops,
55 55 "SHA1 Kernel SW Provider 1.1"
56 56 };
57 57
58 58 static struct modlinkage modlinkage = {
59 - MODREV_1, &modlmisc, &modlcrypto, NULL
59 + MODREV_1, { &modlmisc, &modlcrypto, NULL }
60 60 };
61 61
62 62
63 63 /*
64 64 * Macros to access the SHA1 or SHA1-HMAC contexts from a context passed
65 65 * by KCF to one of the entry points.
66 66 */
67 67
68 68 #define PROV_SHA1_CTX(ctx) ((sha1_ctx_t *)(ctx)->cc_provider_private)
69 69 #define PROV_SHA1_HMAC_CTX(ctx) ((sha1_hmac_ctx_t *)(ctx)->cc_provider_private)
70 70
71 71 /* to extract the digest length passed as mechanism parameter */
72 72 #define PROV_SHA1_GET_DIGEST_LEN(m, len) { \
73 73 if (IS_P2ALIGNED((m)->cm_param, sizeof (ulong_t))) \
74 74 (len) = (uint32_t)*((ulong_t *)(void *)mechanism->cm_param); \
75 75 else { \
76 76 ulong_t tmp_ulong; \
77 77 bcopy((m)->cm_param, &tmp_ulong, sizeof (ulong_t)); \
78 78 (len) = (uint32_t)tmp_ulong; \
79 79 } \
80 80 }
81 81
82 82 #define PROV_SHA1_DIGEST_KEY(ctx, key, len, digest) { \
83 83 SHA1Init(ctx); \
84 84 SHA1Update(ctx, key, len); \
85 85 SHA1Final(digest, ctx); \
86 86 }
87 87
88 88 /*
89 89 * Mechanism info structure passed to KCF during registration.
90 90 */
91 91 static crypto_mech_info_t sha1_mech_info_tab[] = {
92 92 /* SHA1 */
93 93 {SUN_CKM_SHA1, SHA1_MECH_INFO_TYPE,
94 94 CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
95 95 0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
96 96 /* SHA1-HMAC */
97 97 {SUN_CKM_SHA1_HMAC, SHA1_HMAC_MECH_INFO_TYPE,
98 98 CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
99 99 SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
100 100 CRYPTO_KEYSIZE_UNIT_IN_BYTES},
101 101 /* SHA1-HMAC GENERAL */
102 102 {SUN_CKM_SHA1_HMAC_GENERAL, SHA1_HMAC_GEN_MECH_INFO_TYPE,
103 103 CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC,
104 104 SHA1_HMAC_MIN_KEY_LEN, SHA1_HMAC_MAX_KEY_LEN,
105 105 CRYPTO_KEYSIZE_UNIT_IN_BYTES}
106 106 };
107 107
108 108 static void sha1_provider_status(crypto_provider_handle_t, uint_t *);
109 109
110 110 static crypto_control_ops_t sha1_control_ops = {
111 111 sha1_provider_status
112 112 };
113 113
114 114 static int sha1_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
115 115 crypto_req_handle_t);
116 116 static int sha1_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
117 117 crypto_req_handle_t);
118 118 static int sha1_digest_update(crypto_ctx_t *, crypto_data_t *,
119 119 crypto_req_handle_t);
120 120 static int sha1_digest_final(crypto_ctx_t *, crypto_data_t *,
121 121 crypto_req_handle_t);
122 122 static int sha1_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
123 123 crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
124 124 crypto_req_handle_t);
125 125
126 126 static crypto_digest_ops_t sha1_digest_ops = {
127 127 sha1_digest_init,
128 128 sha1_digest,
129 129 sha1_digest_update,
130 130 NULL,
131 131 sha1_digest_final,
132 132 sha1_digest_atomic
133 133 };
134 134
135 135 static int sha1_mac_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
136 136 crypto_spi_ctx_template_t, crypto_req_handle_t);
137 137 static int sha1_mac_update(crypto_ctx_t *, crypto_data_t *,
138 138 crypto_req_handle_t);
139 139 static int sha1_mac_final(crypto_ctx_t *, crypto_data_t *, crypto_req_handle_t);
140 140 static int sha1_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
141 141 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
142 142 crypto_spi_ctx_template_t, crypto_req_handle_t);
143 143 static int sha1_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
144 144 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
145 145 crypto_spi_ctx_template_t, crypto_req_handle_t);
146 146
147 147 static crypto_mac_ops_t sha1_mac_ops = {
148 148 sha1_mac_init,
149 149 NULL,
150 150 sha1_mac_update,
151 151 sha1_mac_final,
152 152 sha1_mac_atomic,
153 153 sha1_mac_verify_atomic
154 154 };
155 155
156 156 static int sha1_create_ctx_template(crypto_provider_handle_t,
↓ open down ↓ |
87 lines elided |
↑ open up ↑ |
157 157 crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
158 158 size_t *, crypto_req_handle_t);
159 159 static int sha1_free_context(crypto_ctx_t *);
160 160
161 161 static crypto_ctx_ops_t sha1_ctx_ops = {
162 162 sha1_create_ctx_template,
163 163 sha1_free_context
164 164 };
165 165
166 166 static crypto_ops_t sha1_crypto_ops = {
167 - &sha1_control_ops,
168 - &sha1_digest_ops,
169 - NULL,
170 - &sha1_mac_ops,
171 - NULL,
172 - NULL,
173 - NULL,
174 - NULL,
175 - NULL,
176 - NULL,
177 - NULL,
178 - NULL,
179 - NULL,
180 - &sha1_ctx_ops,
181 - NULL,
182 - NULL,
183 - NULL,
167 + .co_control_ops = &sha1_control_ops,
168 + .co_digest_ops = &sha1_digest_ops,
169 + .co_mac_ops = &sha1_mac_ops,
170 + .co_ctx_ops = &sha1_ctx_ops
184 171 };
185 172
186 -static crypto_provider_info_t sha1_prov_info = {
173 +static crypto_provider_info_t sha1_prov_info = {{{{
187 174 CRYPTO_SPI_VERSION_4,
188 175 "SHA1 Software Provider",
189 176 CRYPTO_SW_PROVIDER,
190 177 {&modlinkage},
191 178 NULL,
192 179 &sha1_crypto_ops,
193 180 sizeof (sha1_mech_info_tab)/sizeof (crypto_mech_info_t),
194 181 sha1_mech_info_tab
195 -};
182 +}}}};
196 183
197 184 static crypto_kcf_provider_handle_t sha1_prov_handle = NULL;
198 185
199 186 int
200 187 _init()
201 188 {
202 189 int ret;
203 190
204 191 if ((ret = mod_install(&modlinkage)) != 0)
205 192 return (ret);
206 193
207 194 /*
208 195 * Register with KCF. If the registration fails, log do not uninstall
209 196 * the module, since the functionality provided by misc/sha1 should
210 197 * still be available.
211 198 */
212 199 (void) crypto_register_provider(&sha1_prov_info, &sha1_prov_handle);
213 200
214 201 return (0);
215 202 }
216 203
217 204 int
218 205 _info(struct modinfo *modinfop)
219 206 {
220 207 return (mod_info(&modlinkage, modinfop));
221 208 }
222 209
223 210 /*
224 211 * KCF software provider control entry points.
225 212 */
226 213 /* ARGSUSED */
227 214 static void
228 215 sha1_provider_status(crypto_provider_handle_t provider, uint_t *status)
229 216 {
230 217 *status = CRYPTO_PROVIDER_READY;
231 218 }
232 219
233 220 /*
234 221 * KCF software provider digest entry points.
235 222 */
236 223
237 224 static int
238 225 sha1_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
239 226 crypto_req_handle_t req)
240 227 {
241 228 if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
242 229 return (CRYPTO_MECHANISM_INVALID);
243 230
244 231 /*
245 232 * Allocate and initialize SHA1 context.
246 233 */
247 234 ctx->cc_provider_private = kmem_alloc(sizeof (sha1_ctx_t),
248 235 crypto_kmflag(req));
249 236 if (ctx->cc_provider_private == NULL)
250 237 return (CRYPTO_HOST_MEMORY);
251 238
252 239 PROV_SHA1_CTX(ctx)->sc_mech_type = SHA1_MECH_INFO_TYPE;
253 240 SHA1Init(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
254 241
255 242 return (CRYPTO_SUCCESS);
256 243 }
257 244
258 245 /*
259 246 * Helper SHA1 digest update function for uio data.
260 247 */
261 248 static int
262 249 sha1_digest_update_uio(SHA1_CTX *sha1_ctx, crypto_data_t *data)
263 250 {
264 251 off_t offset = data->cd_offset;
265 252 size_t length = data->cd_length;
266 253 uint_t vec_idx;
267 254 size_t cur_len;
268 255
269 256 /* we support only kernel buffer */
270 257 if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
271 258 return (CRYPTO_ARGUMENTS_BAD);
272 259
273 260 /*
274 261 * Jump to the first iovec containing data to be
275 262 * digested.
276 263 */
277 264 for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
278 265 offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
279 266 offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
280 267 ;
281 268 if (vec_idx == data->cd_uio->uio_iovcnt) {
282 269 /*
283 270 * The caller specified an offset that is larger than the
284 271 * total size of the buffers it provided.
285 272 */
286 273 return (CRYPTO_DATA_LEN_RANGE);
287 274 }
288 275
289 276 /*
290 277 * Now do the digesting on the iovecs.
291 278 */
292 279 while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
293 280 cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
294 281 offset, length);
295 282
296 283 SHA1Update(sha1_ctx,
297 284 (uint8_t *)data->cd_uio->uio_iov[vec_idx].iov_base + offset,
298 285 cur_len);
299 286
300 287 length -= cur_len;
301 288 vec_idx++;
302 289 offset = 0;
303 290 }
304 291
305 292 if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
306 293 /*
307 294 * The end of the specified iovec's was reached but
308 295 * the length requested could not be processed, i.e.
309 296 * The caller requested to digest more data than it provided.
310 297 */
311 298 return (CRYPTO_DATA_LEN_RANGE);
312 299 }
313 300
314 301 return (CRYPTO_SUCCESS);
315 302 }
316 303
317 304 /*
318 305 * Helper SHA1 digest final function for uio data.
319 306 * digest_len is the length of the desired digest. If digest_len
320 307 * is smaller than the default SHA1 digest length, the caller
321 308 * must pass a scratch buffer, digest_scratch, which must
322 309 * be at least SHA1_DIGEST_LENGTH bytes.
323 310 */
324 311 static int
325 312 sha1_digest_final_uio(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
326 313 ulong_t digest_len, uchar_t *digest_scratch)
327 314 {
328 315 off_t offset = digest->cd_offset;
329 316 uint_t vec_idx;
330 317
331 318 /* we support only kernel buffer */
332 319 if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
333 320 return (CRYPTO_ARGUMENTS_BAD);
334 321
335 322 /*
336 323 * Jump to the first iovec containing ptr to the digest to
337 324 * be returned.
338 325 */
339 326 for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
340 327 vec_idx < digest->cd_uio->uio_iovcnt;
341 328 offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
342 329 ;
343 330 if (vec_idx == digest->cd_uio->uio_iovcnt) {
344 331 /*
345 332 * The caller specified an offset that is
346 333 * larger than the total size of the buffers
347 334 * it provided.
348 335 */
349 336 return (CRYPTO_DATA_LEN_RANGE);
350 337 }
351 338
352 339 if (offset + digest_len <=
353 340 digest->cd_uio->uio_iov[vec_idx].iov_len) {
354 341 /*
355 342 * The computed SHA1 digest will fit in the current
356 343 * iovec.
357 344 */
358 345 if (digest_len != SHA1_DIGEST_LENGTH) {
359 346 /*
360 347 * The caller requested a short digest. Digest
361 348 * into a scratch buffer and return to
362 349 * the user only what was requested.
363 350 */
364 351 SHA1Final(digest_scratch, sha1_ctx);
365 352 bcopy(digest_scratch, (uchar_t *)digest->
366 353 cd_uio->uio_iov[vec_idx].iov_base + offset,
367 354 digest_len);
368 355 } else {
369 356 SHA1Final((uchar_t *)digest->
370 357 cd_uio->uio_iov[vec_idx].iov_base + offset,
371 358 sha1_ctx);
372 359 }
373 360 } else {
374 361 /*
375 362 * The computed digest will be crossing one or more iovec's.
376 363 * This is bad performance-wise but we need to support it.
377 364 * Allocate a small scratch buffer on the stack and
378 365 * copy it piece meal to the specified digest iovec's.
379 366 */
380 367 uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
381 368 off_t scratch_offset = 0;
382 369 size_t length = digest_len;
383 370 size_t cur_len;
384 371
385 372 SHA1Final(digest_tmp, sha1_ctx);
386 373
387 374 while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
388 375 cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
389 376 offset, length);
390 377 bcopy(digest_tmp + scratch_offset,
391 378 digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
392 379 cur_len);
393 380
394 381 length -= cur_len;
395 382 vec_idx++;
396 383 scratch_offset += cur_len;
397 384 offset = 0;
398 385 }
399 386
400 387 if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
401 388 /*
402 389 * The end of the specified iovec's was reached but
403 390 * the length requested could not be processed, i.e.
404 391 * The caller requested to digest more data than it
405 392 * provided.
406 393 */
407 394 return (CRYPTO_DATA_LEN_RANGE);
408 395 }
409 396 }
410 397
411 398 return (CRYPTO_SUCCESS);
412 399 }
413 400
414 401 /*
415 402 * Helper SHA1 digest update for mblk's.
416 403 */
417 404 static int
418 405 sha1_digest_update_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *data)
419 406 {
420 407 off_t offset = data->cd_offset;
421 408 size_t length = data->cd_length;
422 409 mblk_t *mp;
423 410 size_t cur_len;
424 411
425 412 /*
426 413 * Jump to the first mblk_t containing data to be digested.
427 414 */
428 415 for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
429 416 offset -= MBLKL(mp), mp = mp->b_cont)
430 417 ;
431 418 if (mp == NULL) {
432 419 /*
433 420 * The caller specified an offset that is larger than the
434 421 * total size of the buffers it provided.
435 422 */
436 423 return (CRYPTO_DATA_LEN_RANGE);
437 424 }
438 425
439 426 /*
440 427 * Now do the digesting on the mblk chain.
441 428 */
442 429 while (mp != NULL && length > 0) {
443 430 cur_len = MIN(MBLKL(mp) - offset, length);
444 431 SHA1Update(sha1_ctx, mp->b_rptr + offset, cur_len);
445 432 length -= cur_len;
446 433 offset = 0;
447 434 mp = mp->b_cont;
448 435 }
449 436
450 437 if (mp == NULL && length > 0) {
451 438 /*
452 439 * The end of the mblk was reached but the length requested
453 440 * could not be processed, i.e. The caller requested
454 441 * to digest more data than it provided.
455 442 */
456 443 return (CRYPTO_DATA_LEN_RANGE);
457 444 }
458 445
459 446 return (CRYPTO_SUCCESS);
460 447 }
461 448
462 449 /*
463 450 * Helper SHA1 digest final for mblk's.
464 451 * digest_len is the length of the desired digest. If digest_len
465 452 * is smaller than the default SHA1 digest length, the caller
466 453 * must pass a scratch buffer, digest_scratch, which must
467 454 * be at least SHA1_DIGEST_LENGTH bytes.
468 455 */
469 456 static int
470 457 sha1_digest_final_mblk(SHA1_CTX *sha1_ctx, crypto_data_t *digest,
471 458 ulong_t digest_len, uchar_t *digest_scratch)
472 459 {
473 460 off_t offset = digest->cd_offset;
474 461 mblk_t *mp;
475 462
476 463 /*
477 464 * Jump to the first mblk_t that will be used to store the digest.
478 465 */
479 466 for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
480 467 offset -= MBLKL(mp), mp = mp->b_cont)
481 468 ;
482 469 if (mp == NULL) {
483 470 /*
484 471 * The caller specified an offset that is larger than the
485 472 * total size of the buffers it provided.
486 473 */
487 474 return (CRYPTO_DATA_LEN_RANGE);
488 475 }
489 476
490 477 if (offset + digest_len <= MBLKL(mp)) {
491 478 /*
492 479 * The computed SHA1 digest will fit in the current mblk.
493 480 * Do the SHA1Final() in-place.
494 481 */
495 482 if (digest_len != SHA1_DIGEST_LENGTH) {
496 483 /*
497 484 * The caller requested a short digest. Digest
498 485 * into a scratch buffer and return to
499 486 * the user only what was requested.
500 487 */
501 488 SHA1Final(digest_scratch, sha1_ctx);
502 489 bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
503 490 } else {
504 491 SHA1Final(mp->b_rptr + offset, sha1_ctx);
505 492 }
506 493 } else {
507 494 /*
508 495 * The computed digest will be crossing one or more mblk's.
509 496 * This is bad performance-wise but we need to support it.
510 497 * Allocate a small scratch buffer on the stack and
511 498 * copy it piece meal to the specified digest iovec's.
512 499 */
513 500 uchar_t digest_tmp[SHA1_DIGEST_LENGTH];
514 501 off_t scratch_offset = 0;
515 502 size_t length = digest_len;
516 503 size_t cur_len;
517 504
518 505 SHA1Final(digest_tmp, sha1_ctx);
519 506
520 507 while (mp != NULL && length > 0) {
521 508 cur_len = MIN(MBLKL(mp) - offset, length);
522 509 bcopy(digest_tmp + scratch_offset,
523 510 mp->b_rptr + offset, cur_len);
524 511
525 512 length -= cur_len;
526 513 mp = mp->b_cont;
527 514 scratch_offset += cur_len;
528 515 offset = 0;
529 516 }
530 517
531 518 if (mp == NULL && length > 0) {
532 519 /*
533 520 * The end of the specified mblk was reached but
534 521 * the length requested could not be processed, i.e.
535 522 * The caller requested to digest more data than it
536 523 * provided.
537 524 */
538 525 return (CRYPTO_DATA_LEN_RANGE);
539 526 }
540 527 }
541 528
542 529 return (CRYPTO_SUCCESS);
543 530 }
544 531
545 532 /* ARGSUSED */
546 533 static int
547 534 sha1_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
548 535 crypto_req_handle_t req)
549 536 {
550 537 int ret = CRYPTO_SUCCESS;
551 538
552 539 ASSERT(ctx->cc_provider_private != NULL);
553 540
554 541 /*
555 542 * We need to just return the length needed to store the output.
556 543 * We should not destroy the context for the following cases.
557 544 */
558 545 if ((digest->cd_length == 0) ||
559 546 (digest->cd_length < SHA1_DIGEST_LENGTH)) {
560 547 digest->cd_length = SHA1_DIGEST_LENGTH;
561 548 return (CRYPTO_BUFFER_TOO_SMALL);
562 549 }
563 550
564 551 /*
565 552 * Do the SHA1 update on the specified input data.
566 553 */
567 554 switch (data->cd_format) {
568 555 case CRYPTO_DATA_RAW:
569 556 SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
570 557 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
571 558 data->cd_length);
572 559 break;
573 560 case CRYPTO_DATA_UIO:
574 561 ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
575 562 data);
576 563 break;
577 564 case CRYPTO_DATA_MBLK:
578 565 ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
579 566 data);
580 567 break;
581 568 default:
582 569 ret = CRYPTO_ARGUMENTS_BAD;
583 570 }
584 571
585 572 if (ret != CRYPTO_SUCCESS) {
586 573 /* the update failed, free context and bail */
587 574 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
588 575 ctx->cc_provider_private = NULL;
589 576 digest->cd_length = 0;
590 577 return (ret);
591 578 }
592 579
593 580 /*
594 581 * Do a SHA1 final, must be done separately since the digest
595 582 * type can be different than the input data type.
596 583 */
597 584 switch (digest->cd_format) {
598 585 case CRYPTO_DATA_RAW:
599 586 SHA1Final((unsigned char *)digest->cd_raw.iov_base +
600 587 digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
601 588 break;
602 589 case CRYPTO_DATA_UIO:
603 590 ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
604 591 digest, SHA1_DIGEST_LENGTH, NULL);
605 592 break;
606 593 case CRYPTO_DATA_MBLK:
607 594 ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
608 595 digest, SHA1_DIGEST_LENGTH, NULL);
609 596 break;
610 597 default:
611 598 ret = CRYPTO_ARGUMENTS_BAD;
612 599 }
613 600
614 601 /* all done, free context and return */
615 602
616 603 if (ret == CRYPTO_SUCCESS) {
617 604 digest->cd_length = SHA1_DIGEST_LENGTH;
618 605 } else {
619 606 digest->cd_length = 0;
620 607 }
621 608
622 609 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
623 610 ctx->cc_provider_private = NULL;
624 611 return (ret);
625 612 }
626 613
627 614 /* ARGSUSED */
628 615 static int
629 616 sha1_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
630 617 crypto_req_handle_t req)
631 618 {
632 619 int ret = CRYPTO_SUCCESS;
633 620
634 621 ASSERT(ctx->cc_provider_private != NULL);
635 622
636 623 /*
637 624 * Do the SHA1 update on the specified input data.
638 625 */
639 626 switch (data->cd_format) {
640 627 case CRYPTO_DATA_RAW:
641 628 SHA1Update(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
642 629 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
643 630 data->cd_length);
644 631 break;
645 632 case CRYPTO_DATA_UIO:
646 633 ret = sha1_digest_update_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
647 634 data);
648 635 break;
649 636 case CRYPTO_DATA_MBLK:
650 637 ret = sha1_digest_update_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
651 638 data);
652 639 break;
653 640 default:
654 641 ret = CRYPTO_ARGUMENTS_BAD;
655 642 }
656 643
657 644 return (ret);
658 645 }
659 646
660 647 /* ARGSUSED */
661 648 static int
662 649 sha1_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
663 650 crypto_req_handle_t req)
664 651 {
665 652 int ret = CRYPTO_SUCCESS;
666 653
667 654 ASSERT(ctx->cc_provider_private != NULL);
668 655
669 656 /*
670 657 * We need to just return the length needed to store the output.
671 658 * We should not destroy the context for the following cases.
672 659 */
673 660 if ((digest->cd_length == 0) ||
674 661 (digest->cd_length < SHA1_DIGEST_LENGTH)) {
675 662 digest->cd_length = SHA1_DIGEST_LENGTH;
676 663 return (CRYPTO_BUFFER_TOO_SMALL);
677 664 }
678 665
679 666 /*
680 667 * Do a SHA1 final.
681 668 */
682 669 switch (digest->cd_format) {
683 670 case CRYPTO_DATA_RAW:
684 671 SHA1Final((unsigned char *)digest->cd_raw.iov_base +
685 672 digest->cd_offset, &PROV_SHA1_CTX(ctx)->sc_sha1_ctx);
686 673 break;
687 674 case CRYPTO_DATA_UIO:
688 675 ret = sha1_digest_final_uio(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
689 676 digest, SHA1_DIGEST_LENGTH, NULL);
690 677 break;
691 678 case CRYPTO_DATA_MBLK:
692 679 ret = sha1_digest_final_mblk(&PROV_SHA1_CTX(ctx)->sc_sha1_ctx,
693 680 digest, SHA1_DIGEST_LENGTH, NULL);
694 681 break;
695 682 default:
696 683 ret = CRYPTO_ARGUMENTS_BAD;
697 684 }
698 685
699 686 /* all done, free context and return */
700 687
701 688 if (ret == CRYPTO_SUCCESS) {
702 689 digest->cd_length = SHA1_DIGEST_LENGTH;
703 690 } else {
704 691 digest->cd_length = 0;
705 692 }
706 693
707 694 kmem_free(ctx->cc_provider_private, sizeof (sha1_ctx_t));
708 695 ctx->cc_provider_private = NULL;
709 696
710 697 return (ret);
711 698 }
712 699
713 700 /* ARGSUSED */
714 701 static int
715 702 sha1_digest_atomic(crypto_provider_handle_t provider,
716 703 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
717 704 crypto_data_t *data, crypto_data_t *digest,
718 705 crypto_req_handle_t req)
719 706 {
720 707 int ret = CRYPTO_SUCCESS;
721 708 SHA1_CTX sha1_ctx;
722 709
723 710 if (mechanism->cm_type != SHA1_MECH_INFO_TYPE)
724 711 return (CRYPTO_MECHANISM_INVALID);
725 712
726 713 /*
727 714 * Do the SHA1 init.
728 715 */
729 716 SHA1Init(&sha1_ctx);
730 717
731 718 /*
732 719 * Do the SHA1 update on the specified input data.
733 720 */
734 721 switch (data->cd_format) {
735 722 case CRYPTO_DATA_RAW:
736 723 SHA1Update(&sha1_ctx,
737 724 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
738 725 data->cd_length);
739 726 break;
740 727 case CRYPTO_DATA_UIO:
741 728 ret = sha1_digest_update_uio(&sha1_ctx, data);
742 729 break;
743 730 case CRYPTO_DATA_MBLK:
744 731 ret = sha1_digest_update_mblk(&sha1_ctx, data);
745 732 break;
746 733 default:
747 734 ret = CRYPTO_ARGUMENTS_BAD;
748 735 }
749 736
750 737 if (ret != CRYPTO_SUCCESS) {
751 738 /* the update failed, bail */
752 739 digest->cd_length = 0;
753 740 return (ret);
754 741 }
755 742
756 743 /*
757 744 * Do a SHA1 final, must be done separately since the digest
758 745 * type can be different than the input data type.
759 746 */
760 747 switch (digest->cd_format) {
761 748 case CRYPTO_DATA_RAW:
762 749 SHA1Final((unsigned char *)digest->cd_raw.iov_base +
763 750 digest->cd_offset, &sha1_ctx);
764 751 break;
765 752 case CRYPTO_DATA_UIO:
766 753 ret = sha1_digest_final_uio(&sha1_ctx, digest,
767 754 SHA1_DIGEST_LENGTH, NULL);
768 755 break;
769 756 case CRYPTO_DATA_MBLK:
770 757 ret = sha1_digest_final_mblk(&sha1_ctx, digest,
771 758 SHA1_DIGEST_LENGTH, NULL);
772 759 break;
773 760 default:
774 761 ret = CRYPTO_ARGUMENTS_BAD;
775 762 }
776 763
777 764 if (ret == CRYPTO_SUCCESS) {
778 765 digest->cd_length = SHA1_DIGEST_LENGTH;
779 766 } else {
780 767 digest->cd_length = 0;
781 768 }
782 769
783 770 return (ret);
784 771 }
785 772
786 773 /*
787 774 * KCF software provider mac entry points.
788 775 *
789 776 * SHA1 HMAC is: SHA1(key XOR opad, SHA1(key XOR ipad, text))
790 777 *
791 778 * Init:
792 779 * The initialization routine initializes what we denote
793 780 * as the inner and outer contexts by doing
794 781 * - for inner context: SHA1(key XOR ipad)
795 782 * - for outer context: SHA1(key XOR opad)
796 783 *
797 784 * Update:
798 785 * Each subsequent SHA1 HMAC update will result in an
799 786 * update of the inner context with the specified data.
800 787 *
801 788 * Final:
802 789 * The SHA1 HMAC final will do a SHA1 final operation on the
803 790 * inner context, and the resulting digest will be used
804 791 * as the data for an update on the outer context. Last
805 792 * but not least, a SHA1 final on the outer context will
806 793 * be performed to obtain the SHA1 HMAC digest to return
807 794 * to the user.
808 795 */
809 796
810 797 /*
811 798 * Initialize a SHA1-HMAC context.
812 799 */
813 800 static void
814 801 sha1_mac_init_ctx(sha1_hmac_ctx_t *ctx, void *keyval, uint_t length_in_bytes)
815 802 {
816 803 uint32_t ipad[SHA1_HMAC_INTS_PER_BLOCK];
817 804 uint32_t opad[SHA1_HMAC_INTS_PER_BLOCK];
818 805 uint_t i;
819 806
820 807 bzero(ipad, SHA1_HMAC_BLOCK_SIZE);
821 808 bzero(opad, SHA1_HMAC_BLOCK_SIZE);
822 809
823 810 bcopy(keyval, ipad, length_in_bytes);
824 811 bcopy(keyval, opad, length_in_bytes);
825 812
826 813 /* XOR key with ipad (0x36) and opad (0x5c) */
827 814 for (i = 0; i < SHA1_HMAC_INTS_PER_BLOCK; i++) {
828 815 ipad[i] ^= 0x36363636;
829 816 opad[i] ^= 0x5c5c5c5c;
830 817 }
831 818
832 819 /* perform SHA1 on ipad */
833 820 SHA1Init(&ctx->hc_icontext);
834 821 SHA1Update(&ctx->hc_icontext, (uint8_t *)ipad, SHA1_HMAC_BLOCK_SIZE);
835 822
836 823 /* perform SHA1 on opad */
837 824 SHA1Init(&ctx->hc_ocontext);
838 825 SHA1Update(&ctx->hc_ocontext, (uint8_t *)opad, SHA1_HMAC_BLOCK_SIZE);
839 826 }
840 827
841 828 /*
842 829 */
843 830 static int
844 831 sha1_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
845 832 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
846 833 crypto_req_handle_t req)
847 834 {
848 835 int ret = CRYPTO_SUCCESS;
849 836 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
850 837
851 838 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
852 839 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
853 840 return (CRYPTO_MECHANISM_INVALID);
854 841
855 842 /* Add support for key by attributes (RFE 4706552) */
856 843 if (key->ck_format != CRYPTO_KEY_RAW)
857 844 return (CRYPTO_ARGUMENTS_BAD);
858 845
859 846 ctx->cc_provider_private = kmem_alloc(sizeof (sha1_hmac_ctx_t),
860 847 crypto_kmflag(req));
861 848 if (ctx->cc_provider_private == NULL)
862 849 return (CRYPTO_HOST_MEMORY);
863 850
864 851 if (ctx_template != NULL) {
865 852 /* reuse context template */
866 853 bcopy(ctx_template, PROV_SHA1_HMAC_CTX(ctx),
867 854 sizeof (sha1_hmac_ctx_t));
868 855 } else {
869 856 /* no context template, compute context */
870 857 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
871 858 uchar_t digested_key[SHA1_DIGEST_LENGTH];
872 859 sha1_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private;
873 860
874 861 /*
875 862 * Hash the passed-in key to get a smaller key.
876 863 * The inner context is used since it hasn't been
877 864 * initialized yet.
878 865 */
879 866 PROV_SHA1_DIGEST_KEY(&hmac_ctx->hc_icontext,
880 867 key->ck_data, keylen_in_bytes, digested_key);
881 868 sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
882 869 digested_key, SHA1_DIGEST_LENGTH);
883 870 } else {
884 871 sha1_mac_init_ctx(PROV_SHA1_HMAC_CTX(ctx),
885 872 key->ck_data, keylen_in_bytes);
886 873 }
887 874 }
888 875
889 876 /*
890 877 * Get the mechanism parameters, if applicable.
891 878 */
892 879 PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type;
893 880 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
894 881 if (mechanism->cm_param == NULL ||
895 882 mechanism->cm_param_len != sizeof (ulong_t))
896 883 ret = CRYPTO_MECHANISM_PARAM_INVALID;
897 884 PROV_SHA1_GET_DIGEST_LEN(mechanism,
898 885 PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len);
899 886 if (PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len >
900 887 SHA1_DIGEST_LENGTH)
901 888 ret = CRYPTO_MECHANISM_PARAM_INVALID;
902 889 }
903 890
904 891 if (ret != CRYPTO_SUCCESS) {
905 892 bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
906 893 kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
907 894 ctx->cc_provider_private = NULL;
908 895 }
909 896
910 897 return (ret);
911 898 }
912 899
913 900 /* ARGSUSED */
914 901 static int
915 902 sha1_mac_update(crypto_ctx_t *ctx, crypto_data_t *data, crypto_req_handle_t req)
916 903 {
917 904 int ret = CRYPTO_SUCCESS;
918 905
919 906 ASSERT(ctx->cc_provider_private != NULL);
920 907
921 908 /*
922 909 * Do a SHA1 update of the inner context using the specified
923 910 * data.
924 911 */
925 912 switch (data->cd_format) {
926 913 case CRYPTO_DATA_RAW:
927 914 SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_icontext,
928 915 (uint8_t *)data->cd_raw.iov_base + data->cd_offset,
929 916 data->cd_length);
930 917 break;
931 918 case CRYPTO_DATA_UIO:
932 919 ret = sha1_digest_update_uio(
933 920 &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
934 921 break;
935 922 case CRYPTO_DATA_MBLK:
936 923 ret = sha1_digest_update_mblk(
937 924 &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext, data);
938 925 break;
939 926 default:
940 927 ret = CRYPTO_ARGUMENTS_BAD;
941 928 }
942 929
943 930 return (ret);
944 931 }
945 932
946 933 /* ARGSUSED */
947 934 static int
948 935 sha1_mac_final(crypto_ctx_t *ctx, crypto_data_t *mac, crypto_req_handle_t req)
949 936 {
950 937 int ret = CRYPTO_SUCCESS;
951 938 uchar_t digest[SHA1_DIGEST_LENGTH];
952 939 uint32_t digest_len = SHA1_DIGEST_LENGTH;
953 940
954 941 ASSERT(ctx->cc_provider_private != NULL);
955 942
956 943 if (PROV_SHA1_HMAC_CTX(ctx)->hc_mech_type ==
957 944 SHA1_HMAC_GEN_MECH_INFO_TYPE)
958 945 digest_len = PROV_SHA1_HMAC_CTX(ctx)->hc_digest_len;
959 946
960 947 /*
961 948 * We need to just return the length needed to store the output.
962 949 * We should not destroy the context for the following cases.
963 950 */
964 951 if ((mac->cd_length == 0) || (mac->cd_length < digest_len)) {
965 952 mac->cd_length = digest_len;
966 953 return (CRYPTO_BUFFER_TOO_SMALL);
967 954 }
968 955
969 956 /*
970 957 * Do a SHA1 final on the inner context.
971 958 */
972 959 SHA1Final(digest, &PROV_SHA1_HMAC_CTX(ctx)->hc_icontext);
973 960
974 961 /*
975 962 * Do a SHA1 update on the outer context, feeding the inner
976 963 * digest as data.
977 964 */
978 965 SHA1Update(&PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, digest,
979 966 SHA1_DIGEST_LENGTH);
980 967
981 968 /*
982 969 * Do a SHA1 final on the outer context, storing the computing
983 970 * digest in the users buffer.
984 971 */
985 972 switch (mac->cd_format) {
986 973 case CRYPTO_DATA_RAW:
987 974 if (digest_len != SHA1_DIGEST_LENGTH) {
988 975 /*
989 976 * The caller requested a short digest. Digest
990 977 * into a scratch buffer and return to
991 978 * the user only what was requested.
992 979 */
993 980 SHA1Final(digest,
994 981 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
995 982 bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
996 983 mac->cd_offset, digest_len);
997 984 } else {
998 985 SHA1Final((unsigned char *)mac->cd_raw.iov_base +
999 986 mac->cd_offset,
1000 987 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext);
1001 988 }
1002 989 break;
1003 990 case CRYPTO_DATA_UIO:
1004 991 ret = sha1_digest_final_uio(
1005 992 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1006 993 digest_len, digest);
1007 994 break;
1008 995 case CRYPTO_DATA_MBLK:
1009 996 ret = sha1_digest_final_mblk(
1010 997 &PROV_SHA1_HMAC_CTX(ctx)->hc_ocontext, mac,
1011 998 digest_len, digest);
1012 999 break;
1013 1000 default:
1014 1001 ret = CRYPTO_ARGUMENTS_BAD;
1015 1002 }
1016 1003
1017 1004 if (ret == CRYPTO_SUCCESS) {
1018 1005 mac->cd_length = digest_len;
1019 1006 } else {
1020 1007 mac->cd_length = 0;
1021 1008 }
1022 1009
1023 1010 bzero(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1024 1011 kmem_free(ctx->cc_provider_private, sizeof (sha1_hmac_ctx_t));
1025 1012 ctx->cc_provider_private = NULL;
1026 1013
1027 1014 return (ret);
1028 1015 }
1029 1016
1030 1017 #define SHA1_MAC_UPDATE(data, ctx, ret) { \
1031 1018 switch (data->cd_format) { \
1032 1019 case CRYPTO_DATA_RAW: \
1033 1020 SHA1Update(&(ctx).hc_icontext, \
1034 1021 (uint8_t *)data->cd_raw.iov_base + \
1035 1022 data->cd_offset, data->cd_length); \
1036 1023 break; \
1037 1024 case CRYPTO_DATA_UIO: \
1038 1025 ret = sha1_digest_update_uio(&(ctx).hc_icontext, data); \
1039 1026 break; \
1040 1027 case CRYPTO_DATA_MBLK: \
1041 1028 ret = sha1_digest_update_mblk(&(ctx).hc_icontext, \
1042 1029 data); \
1043 1030 break; \
1044 1031 default: \
1045 1032 ret = CRYPTO_ARGUMENTS_BAD; \
1046 1033 } \
1047 1034 }
1048 1035
1049 1036 /* ARGSUSED */
1050 1037 static int
1051 1038 sha1_mac_atomic(crypto_provider_handle_t provider,
1052 1039 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1053 1040 crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1054 1041 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1055 1042 {
1056 1043 int ret = CRYPTO_SUCCESS;
1057 1044 uchar_t digest[SHA1_DIGEST_LENGTH];
1058 1045 sha1_hmac_ctx_t sha1_hmac_ctx;
1059 1046 uint32_t digest_len = SHA1_DIGEST_LENGTH;
1060 1047 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1061 1048
1062 1049 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1063 1050 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1064 1051 return (CRYPTO_MECHANISM_INVALID);
1065 1052
1066 1053 /* Add support for key by attributes (RFE 4706552) */
1067 1054 if (key->ck_format != CRYPTO_KEY_RAW)
1068 1055 return (CRYPTO_ARGUMENTS_BAD);
1069 1056
1070 1057 if (ctx_template != NULL) {
1071 1058 /* reuse context template */
1072 1059 bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1073 1060 } else {
1074 1061 /* no context template, initialize context */
1075 1062 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1076 1063 /*
1077 1064 * Hash the passed-in key to get a smaller key.
1078 1065 * The inner context is used since it hasn't been
1079 1066 * initialized yet.
1080 1067 */
1081 1068 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1082 1069 key->ck_data, keylen_in_bytes, digest);
1083 1070 sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1084 1071 SHA1_DIGEST_LENGTH);
1085 1072 } else {
1086 1073 sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1087 1074 keylen_in_bytes);
1088 1075 }
1089 1076 }
1090 1077
1091 1078 /* get the mechanism parameters, if applicable */
1092 1079 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1093 1080 if (mechanism->cm_param == NULL ||
1094 1081 mechanism->cm_param_len != sizeof (ulong_t)) {
1095 1082 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1096 1083 goto bail;
1097 1084 }
1098 1085 PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1099 1086 if (digest_len > SHA1_DIGEST_LENGTH) {
1100 1087 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1101 1088 goto bail;
1102 1089 }
1103 1090 }
1104 1091
1105 1092 /* do a SHA1 update of the inner context using the specified data */
1106 1093 SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1107 1094 if (ret != CRYPTO_SUCCESS)
1108 1095 /* the update failed, free context and bail */
1109 1096 goto bail;
1110 1097
1111 1098 /*
1112 1099 * Do a SHA1 final on the inner context.
1113 1100 */
1114 1101 SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1115 1102
1116 1103 /*
1117 1104 * Do an SHA1 update on the outer context, feeding the inner
1118 1105 * digest as data.
1119 1106 */
1120 1107 SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1121 1108
1122 1109 /*
1123 1110 * Do a SHA1 final on the outer context, storing the computed
1124 1111 * digest in the users buffer.
1125 1112 */
1126 1113 switch (mac->cd_format) {
1127 1114 case CRYPTO_DATA_RAW:
1128 1115 if (digest_len != SHA1_DIGEST_LENGTH) {
1129 1116 /*
1130 1117 * The caller requested a short digest. Digest
1131 1118 * into a scratch buffer and return to
1132 1119 * the user only what was requested.
1133 1120 */
1134 1121 SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1135 1122 bcopy(digest, (unsigned char *)mac->cd_raw.iov_base +
1136 1123 mac->cd_offset, digest_len);
1137 1124 } else {
1138 1125 SHA1Final((unsigned char *)mac->cd_raw.iov_base +
1139 1126 mac->cd_offset, &sha1_hmac_ctx.hc_ocontext);
1140 1127 }
1141 1128 break;
1142 1129 case CRYPTO_DATA_UIO:
1143 1130 ret = sha1_digest_final_uio(&sha1_hmac_ctx.hc_ocontext, mac,
1144 1131 digest_len, digest);
1145 1132 break;
1146 1133 case CRYPTO_DATA_MBLK:
1147 1134 ret = sha1_digest_final_mblk(&sha1_hmac_ctx.hc_ocontext, mac,
1148 1135 digest_len, digest);
1149 1136 break;
1150 1137 default:
1151 1138 ret = CRYPTO_ARGUMENTS_BAD;
1152 1139 }
1153 1140
1154 1141 if (ret == CRYPTO_SUCCESS) {
1155 1142 mac->cd_length = digest_len;
1156 1143 } else {
1157 1144 mac->cd_length = 0;
1158 1145 }
1159 1146 /* Extra paranoia: zeroize the context on the stack */
1160 1147 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1161 1148
1162 1149 return (ret);
1163 1150 bail:
1164 1151 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1165 1152 mac->cd_length = 0;
1166 1153 return (ret);
1167 1154 }
1168 1155
1169 1156 /* ARGSUSED */
1170 1157 static int
1171 1158 sha1_mac_verify_atomic(crypto_provider_handle_t provider,
1172 1159 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
1173 1160 crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
1174 1161 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
1175 1162 {
1176 1163 int ret = CRYPTO_SUCCESS;
1177 1164 uchar_t digest[SHA1_DIGEST_LENGTH];
1178 1165 sha1_hmac_ctx_t sha1_hmac_ctx;
1179 1166 uint32_t digest_len = SHA1_DIGEST_LENGTH;
1180 1167 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1181 1168
1182 1169 if (mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE &&
1183 1170 mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)
1184 1171 return (CRYPTO_MECHANISM_INVALID);
1185 1172
1186 1173 /* Add support for key by attributes (RFE 4706552) */
1187 1174 if (key->ck_format != CRYPTO_KEY_RAW)
1188 1175 return (CRYPTO_ARGUMENTS_BAD);
1189 1176
1190 1177 if (ctx_template != NULL) {
1191 1178 /* reuse context template */
1192 1179 bcopy(ctx_template, &sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1193 1180 } else {
1194 1181 /* no context template, initialize context */
1195 1182 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1196 1183 /*
1197 1184 * Hash the passed-in key to get a smaller key.
1198 1185 * The inner context is used since it hasn't been
1199 1186 * initialized yet.
1200 1187 */
1201 1188 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx.hc_icontext,
1202 1189 key->ck_data, keylen_in_bytes, digest);
1203 1190 sha1_mac_init_ctx(&sha1_hmac_ctx, digest,
1204 1191 SHA1_DIGEST_LENGTH);
1205 1192 } else {
1206 1193 sha1_mac_init_ctx(&sha1_hmac_ctx, key->ck_data,
1207 1194 keylen_in_bytes);
1208 1195 }
1209 1196 }
1210 1197
1211 1198 /* get the mechanism parameters, if applicable */
1212 1199 if (mechanism->cm_type == SHA1_HMAC_GEN_MECH_INFO_TYPE) {
1213 1200 if (mechanism->cm_param == NULL ||
1214 1201 mechanism->cm_param_len != sizeof (ulong_t)) {
1215 1202 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1216 1203 goto bail;
1217 1204 }
1218 1205 PROV_SHA1_GET_DIGEST_LEN(mechanism, digest_len);
1219 1206 if (digest_len > SHA1_DIGEST_LENGTH) {
1220 1207 ret = CRYPTO_MECHANISM_PARAM_INVALID;
1221 1208 goto bail;
1222 1209 }
1223 1210 }
1224 1211
1225 1212 if (mac->cd_length != digest_len) {
1226 1213 ret = CRYPTO_INVALID_MAC;
1227 1214 goto bail;
1228 1215 }
1229 1216
1230 1217 /* do a SHA1 update of the inner context using the specified data */
1231 1218 SHA1_MAC_UPDATE(data, sha1_hmac_ctx, ret);
1232 1219 if (ret != CRYPTO_SUCCESS)
1233 1220 /* the update failed, free context and bail */
1234 1221 goto bail;
1235 1222
1236 1223 /* do a SHA1 final on the inner context */
1237 1224 SHA1Final(digest, &sha1_hmac_ctx.hc_icontext);
1238 1225
1239 1226 /*
1240 1227 * Do an SHA1 update on the outer context, feeding the inner
1241 1228 * digest as data.
1242 1229 */
1243 1230 SHA1Update(&sha1_hmac_ctx.hc_ocontext, digest, SHA1_DIGEST_LENGTH);
1244 1231
1245 1232 /*
1246 1233 * Do a SHA1 final on the outer context, storing the computed
1247 1234 * digest in the users buffer.
1248 1235 */
1249 1236 SHA1Final(digest, &sha1_hmac_ctx.hc_ocontext);
1250 1237
1251 1238 /*
1252 1239 * Compare the computed digest against the expected digest passed
1253 1240 * as argument.
1254 1241 */
1255 1242
1256 1243 switch (mac->cd_format) {
1257 1244
1258 1245 case CRYPTO_DATA_RAW:
1259 1246 if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base +
1260 1247 mac->cd_offset, digest_len) != 0)
1261 1248 ret = CRYPTO_INVALID_MAC;
1262 1249 break;
1263 1250
1264 1251 case CRYPTO_DATA_UIO: {
1265 1252 off_t offset = mac->cd_offset;
1266 1253 uint_t vec_idx;
1267 1254 off_t scratch_offset = 0;
1268 1255 size_t length = digest_len;
1269 1256 size_t cur_len;
1270 1257
1271 1258 /* we support only kernel buffer */
1272 1259 if (mac->cd_uio->uio_segflg != UIO_SYSSPACE)
1273 1260 return (CRYPTO_ARGUMENTS_BAD);
1274 1261
1275 1262 /* jump to the first iovec containing the expected digest */
1276 1263 for (vec_idx = 0;
1277 1264 offset >= mac->cd_uio->uio_iov[vec_idx].iov_len &&
1278 1265 vec_idx < mac->cd_uio->uio_iovcnt;
1279 1266 offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len)
1280 1267 ;
1281 1268 if (vec_idx == mac->cd_uio->uio_iovcnt) {
1282 1269 /*
1283 1270 * The caller specified an offset that is
1284 1271 * larger than the total size of the buffers
1285 1272 * it provided.
1286 1273 */
1287 1274 ret = CRYPTO_DATA_LEN_RANGE;
1288 1275 break;
1289 1276 }
1290 1277
1291 1278 /* do the comparison of computed digest vs specified one */
1292 1279 while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) {
1293 1280 cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len -
1294 1281 offset, length);
1295 1282
1296 1283 if (bcmp(digest + scratch_offset,
1297 1284 mac->cd_uio->uio_iov[vec_idx].iov_base + offset,
1298 1285 cur_len) != 0) {
1299 1286 ret = CRYPTO_INVALID_MAC;
1300 1287 break;
1301 1288 }
1302 1289
1303 1290 length -= cur_len;
1304 1291 vec_idx++;
1305 1292 scratch_offset += cur_len;
1306 1293 offset = 0;
1307 1294 }
1308 1295 break;
1309 1296 }
1310 1297
1311 1298 case CRYPTO_DATA_MBLK: {
1312 1299 off_t offset = mac->cd_offset;
1313 1300 mblk_t *mp;
1314 1301 off_t scratch_offset = 0;
1315 1302 size_t length = digest_len;
1316 1303 size_t cur_len;
1317 1304
1318 1305 /* jump to the first mblk_t containing the expected digest */
1319 1306 for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp);
1320 1307 offset -= MBLKL(mp), mp = mp->b_cont)
1321 1308 ;
1322 1309 if (mp == NULL) {
1323 1310 /*
1324 1311 * The caller specified an offset that is larger than
1325 1312 * the total size of the buffers it provided.
1326 1313 */
1327 1314 ret = CRYPTO_DATA_LEN_RANGE;
1328 1315 break;
1329 1316 }
1330 1317
1331 1318 while (mp != NULL && length > 0) {
1332 1319 cur_len = MIN(MBLKL(mp) - offset, length);
1333 1320 if (bcmp(digest + scratch_offset,
1334 1321 mp->b_rptr + offset, cur_len) != 0) {
1335 1322 ret = CRYPTO_INVALID_MAC;
1336 1323 break;
1337 1324 }
1338 1325
1339 1326 length -= cur_len;
1340 1327 mp = mp->b_cont;
1341 1328 scratch_offset += cur_len;
1342 1329 offset = 0;
1343 1330 }
1344 1331 break;
1345 1332 }
1346 1333
1347 1334 default:
1348 1335 ret = CRYPTO_ARGUMENTS_BAD;
1349 1336 }
1350 1337
1351 1338 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1352 1339 return (ret);
1353 1340 bail:
1354 1341 bzero(&sha1_hmac_ctx, sizeof (sha1_hmac_ctx_t));
1355 1342 mac->cd_length = 0;
1356 1343 return (ret);
1357 1344 }
1358 1345
1359 1346 /*
1360 1347 * KCF software provider context management entry points.
1361 1348 */
1362 1349
1363 1350 /* ARGSUSED */
1364 1351 static int
1365 1352 sha1_create_ctx_template(crypto_provider_handle_t provider,
1366 1353 crypto_mechanism_t *mechanism, crypto_key_t *key,
1367 1354 crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size,
1368 1355 crypto_req_handle_t req)
1369 1356 {
1370 1357 sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl;
1371 1358 uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length);
1372 1359
1373 1360 if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) &&
1374 1361 (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) {
1375 1362 return (CRYPTO_MECHANISM_INVALID);
1376 1363 }
1377 1364
1378 1365 /* Add support for key by attributes (RFE 4706552) */
1379 1366 if (key->ck_format != CRYPTO_KEY_RAW)
1380 1367 return (CRYPTO_ARGUMENTS_BAD);
1381 1368
1382 1369 /*
1383 1370 * Allocate and initialize SHA1 context.
1384 1371 */
1385 1372 sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t),
1386 1373 crypto_kmflag(req));
1387 1374 if (sha1_hmac_ctx_tmpl == NULL)
1388 1375 return (CRYPTO_HOST_MEMORY);
1389 1376
1390 1377 if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) {
1391 1378 uchar_t digested_key[SHA1_DIGEST_LENGTH];
1392 1379
1393 1380 /*
1394 1381 * Hash the passed-in key to get a smaller key.
1395 1382 * The inner context is used since it hasn't been
1396 1383 * initialized yet.
1397 1384 */
1398 1385 PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext,
1399 1386 key->ck_data, keylen_in_bytes, digested_key);
1400 1387 sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key,
1401 1388 SHA1_DIGEST_LENGTH);
1402 1389 } else {
1403 1390 sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data,
1404 1391 keylen_in_bytes);
1405 1392 }
1406 1393
1407 1394 sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type;
1408 1395 *ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl;
1409 1396 *ctx_template_size = sizeof (sha1_hmac_ctx_t);
1410 1397
1411 1398
1412 1399 return (CRYPTO_SUCCESS);
1413 1400 }
1414 1401
1415 1402 static int
1416 1403 sha1_free_context(crypto_ctx_t *ctx)
1417 1404 {
1418 1405 uint_t ctx_len;
1419 1406 sha1_mech_type_t mech_type;
1420 1407
1421 1408 if (ctx->cc_provider_private == NULL)
1422 1409 return (CRYPTO_SUCCESS);
1423 1410
1424 1411 /*
1425 1412 * We have to free either SHA1 or SHA1-HMAC contexts, which
1426 1413 * have different lengths.
1427 1414 */
1428 1415
1429 1416 mech_type = PROV_SHA1_CTX(ctx)->sc_mech_type;
1430 1417 if (mech_type == SHA1_MECH_INFO_TYPE)
1431 1418 ctx_len = sizeof (sha1_ctx_t);
1432 1419 else {
1433 1420 ASSERT(mech_type == SHA1_HMAC_MECH_INFO_TYPE ||
1434 1421 mech_type == SHA1_HMAC_GEN_MECH_INFO_TYPE);
1435 1422 ctx_len = sizeof (sha1_hmac_ctx_t);
1436 1423 }
1437 1424
1438 1425 bzero(ctx->cc_provider_private, ctx_len);
1439 1426 kmem_free(ctx->cc_provider_private, ctx_len);
1440 1427 ctx->cc_provider_private = NULL;
1441 1428
1442 1429 return (CRYPTO_SUCCESS);
1443 1430 }
↓ open down ↓ |
1238 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX