Print this page
fixup .text where possible
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/crypto/io/md4_mod.c
+++ new/usr/src/uts/common/crypto/io/md4_mod.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * In kernel module, the md4 module is created with one modlinkage,
29 29 * this is different to md5 and sha1 modules which have a legacy misc
30 30 * variant for direct calls to the Init/Update/Final routines.
31 31 *
32 32 * - a modlcrypto that allows the module to register with the Kernel
33 33 * Cryptographic Framework (KCF) as a software provider for the MD4
34 34 * mechanisms.
35 35 */
36 36
37 37 #include <sys/types.h>
38 38 #include <sys/systm.h>
39 39 #include <sys/modctl.h>
40 40 #include <sys/cmn_err.h>
41 41 #include <sys/ddi.h>
42 42 #include <sys/crypto/common.h>
43 43 #include <sys/crypto/spi.h>
44 44 #include <sys/sysmacros.h>
45 45 #include <sys/strsun.h>
46 46 #include <sys/note.h>
47 47 #include <sys/md4.h>
48 48
49 49 extern struct mod_ops mod_miscops;
50 50 extern struct mod_ops mod_cryptoops;
51 51
52 52 /*
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
53 53 * Module linkage information for the kernel.
54 54 */
55 55
56 56 static struct modlcrypto modlcrypto = {
57 57 &mod_cryptoops,
58 58 "MD4 Kernel SW Provider"
59 59 };
60 60
61 61 static struct modlinkage modlinkage = {
62 62 MODREV_1,
63 - (void *)&modlcrypto,
64 - NULL
63 + { (void *)&modlcrypto,
64 + NULL }
65 65 };
66 66
67 67 /*
68 68 * CSPI information (entry points, provider info, etc.)
69 69 */
70 70
71 71 typedef enum md4_mech_type {
72 72 MD4_MECH_INFO_TYPE, /* SUN_CKM_MD4 */
73 73 } md4_mech_type_t;
74 74
75 75 #define MD4_DIGEST_LENGTH 16 /* MD4 digest length in bytes */
76 76
77 77 /*
78 78 * Context for MD4 mechanism.
79 79 */
80 80 typedef struct md4_ctx {
81 81 md4_mech_type_t mc_mech_type; /* type of context */
82 82 MD4_CTX mc_md4_ctx; /* MD4 context */
83 83 } md4_ctx_t;
84 84
85 85 /*
86 86 * Macros to access the MD4 contexts from a context passed
87 87 * by KCF to one of the entry points.
88 88 */
89 89
90 90 #define PROV_MD4_CTX(ctx) ((md4_ctx_t *)(ctx)->cc_provider_private)
91 91
92 92 /*
93 93 * Mechanism info structure passed to KCF during registration.
94 94 */
95 95 static crypto_mech_info_t md4_mech_info_tab[] = {
96 96 /* MD4 */
97 97 {SUN_CKM_MD4, MD4_MECH_INFO_TYPE,
98 98 CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
99 99 0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
100 100 };
101 101
102 102 static void md4_provider_status(crypto_provider_handle_t, uint_t *);
103 103
104 104 static crypto_control_ops_t md4_control_ops = {
105 105 md4_provider_status
106 106 };
107 107
108 108 static int md4_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
109 109 crypto_req_handle_t);
110 110 static int md4_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
111 111 crypto_req_handle_t);
112 112 static int md4_digest_update(crypto_ctx_t *, crypto_data_t *,
113 113 crypto_req_handle_t);
114 114 static int md4_digest_final(crypto_ctx_t *, crypto_data_t *,
115 115 crypto_req_handle_t);
116 116 static int md4_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
117 117 crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
118 118 crypto_req_handle_t);
119 119
↓ open down ↓ |
45 lines elided |
↑ open up ↑ |
120 120 static crypto_digest_ops_t md4_digest_ops = {
121 121 md4_digest_init,
122 122 md4_digest,
123 123 md4_digest_update,
124 124 NULL,
125 125 md4_digest_final,
126 126 md4_digest_atomic
127 127 };
128 128
129 129 static crypto_ops_t md4_crypto_ops = {
130 - &md4_control_ops,
131 - &md4_digest_ops,
132 - NULL,
133 - NULL,
134 - NULL,
135 - NULL,
136 - NULL,
137 - NULL,
138 - NULL,
139 - NULL,
140 - NULL,
141 - NULL,
142 - NULL,
143 - NULL,
130 + .co_control_ops = &md4_control_ops,
131 + .co_digest_ops = &md4_digest_ops,
132 + .co_cipher_ops = NULL,
133 + .co_mac_ops = NULL,
134 + .co_sign_ops = NULL,
135 + .co_verify_ops = NULL,
136 + .co_dual_ops = NULL,
137 + .co_dual_cipher_mac_ops = NULL,
138 + .co_random_ops = NULL,
139 + .co_session_ops = NULL,
140 + .co_object_ops = NULL,
141 + .co_key_ops = NULL,
142 + .co_provider_ops = NULL,
143 + .co_ctx_ops = NULL
144 144 };
145 145
146 -static crypto_provider_info_t md4_prov_info = {
146 +static crypto_provider_info_t md4_prov_info = {{{{
147 147 CRYPTO_SPI_VERSION_1,
148 148 "MD4 Software Provider",
149 149 CRYPTO_SW_PROVIDER,
150 150 {&modlinkage},
151 151 NULL,
152 152 &md4_crypto_ops,
153 153 sizeof (md4_mech_info_tab)/sizeof (crypto_mech_info_t),
154 154 md4_mech_info_tab
155 -};
155 +}}}};
156 156
157 157 static crypto_kcf_provider_handle_t md4_prov_handle = NULL;
158 158
159 159 int
160 160 _init(void)
161 161 {
162 162 int ret;
163 163
164 164 if ((ret = mod_install(&modlinkage)) != 0)
165 165 return (ret);
166 166
167 167 /* Register with KCF. If the registration fails, remove the module. */
168 168 if (crypto_register_provider(&md4_prov_info, &md4_prov_handle)) {
169 169 (void) mod_remove(&modlinkage);
170 170 return (EACCES);
171 171 }
172 172
173 173 return (0);
174 174 }
175 175
176 176 int
177 177 _fini(void)
178 178 {
179 179 /* Unregister from KCF if module is registered */
180 180 if (md4_prov_handle != NULL) {
181 181 if (crypto_unregister_provider(md4_prov_handle))
182 182 return (EBUSY);
183 183
184 184 md4_prov_handle = NULL;
185 185 }
186 186
187 187 return (mod_remove(&modlinkage));
188 188 }
189 189
190 190 int
191 191 _info(struct modinfo *modinfop)
192 192 {
193 193 return (mod_info(&modlinkage, modinfop));
194 194 }
195 195
196 196 /*
197 197 * KCF software provider control entry points.
198 198 */
199 199 /* ARGSUSED */
200 200 static void
201 201 md4_provider_status(crypto_provider_handle_t provider, uint_t *status)
202 202 {
203 203 *status = CRYPTO_PROVIDER_READY;
204 204 }
205 205
206 206 /*
207 207 * KCF software provider digest entry points.
208 208 */
209 209
210 210 static int
211 211 md4_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
212 212 crypto_req_handle_t req)
213 213 {
214 214 if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
215 215 return (CRYPTO_MECHANISM_INVALID);
216 216
217 217 /*
218 218 * Allocate and initialize MD4 context.
219 219 */
220 220 ctx->cc_provider_private = kmem_alloc(sizeof (md4_ctx_t),
221 221 crypto_kmflag(req));
222 222 if (ctx->cc_provider_private == NULL)
223 223 return (CRYPTO_HOST_MEMORY);
224 224
225 225 PROV_MD4_CTX(ctx)->mc_mech_type = MD4_MECH_INFO_TYPE;
226 226 MD4Init(&PROV_MD4_CTX(ctx)->mc_md4_ctx);
227 227
228 228 return (CRYPTO_SUCCESS);
229 229 }
230 230
231 231 /*
232 232 * Helper MD4 digest update function for uio data.
233 233 */
234 234 static int
235 235 md4_digest_update_uio(MD4_CTX *md4_ctx, crypto_data_t *data)
236 236 {
237 237 off_t offset = data->cd_offset;
238 238 size_t length = data->cd_length;
239 239 uint_t vec_idx;
240 240 size_t cur_len;
241 241
242 242 /* we support only kernel buffer */
243 243 if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
244 244 return (CRYPTO_ARGUMENTS_BAD);
245 245
246 246 /*
247 247 * Jump to the first iovec containing data to be
248 248 * digested.
249 249 */
250 250 for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
251 251 offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
252 252 offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
253 253 ;
254 254 if (vec_idx == data->cd_uio->uio_iovcnt) {
255 255 /*
256 256 * The caller specified an offset that is larger than the
257 257 * total size of the buffers it provided.
258 258 */
259 259 return (CRYPTO_DATA_LEN_RANGE);
260 260 }
261 261
262 262 /*
263 263 * Now do the digesting on the iovecs.
264 264 */
265 265 while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
266 266 cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
267 267 offset, length);
268 268
269 269 MD4Update(md4_ctx, data->cd_uio->uio_iov[vec_idx].iov_base +
270 270 offset, cur_len);
271 271
272 272 length -= cur_len;
273 273 vec_idx++;
274 274 offset = 0;
275 275 }
276 276
277 277 if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
278 278 /*
279 279 * The end of the specified iovec's was reached but
280 280 * the length requested could not be processed, i.e.
281 281 * The caller requested to digest more data than it provided.
282 282 */
283 283 return (CRYPTO_DATA_LEN_RANGE);
284 284 }
285 285
286 286 return (CRYPTO_SUCCESS);
287 287 }
288 288
289 289 /*
290 290 * Helper MD4 digest final function for uio data.
291 291 * digest_len is the length of the desired digest. If digest_len
292 292 * is smaller than the default MD4 digest length, the caller
293 293 * must pass a scratch buffer, digest_scratch, which must
294 294 * be at least MD4_DIGEST_LENGTH bytes.
295 295 */
296 296 static int
297 297 md4_digest_final_uio(MD4_CTX *md4_ctx, crypto_data_t *digest,
298 298 ulong_t digest_len, uchar_t *digest_scratch)
299 299 {
300 300 off_t offset = digest->cd_offset;
301 301 uint_t vec_idx;
302 302
303 303 /* we support only kernel buffer */
304 304 if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
305 305 return (CRYPTO_ARGUMENTS_BAD);
306 306
307 307 /*
308 308 * Jump to the first iovec containing ptr to the digest to
309 309 * be returned.
310 310 */
311 311 for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
312 312 vec_idx < digest->cd_uio->uio_iovcnt;
313 313 offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
314 314 ;
315 315 if (vec_idx == digest->cd_uio->uio_iovcnt) {
316 316 /*
317 317 * The caller specified an offset that is
318 318 * larger than the total size of the buffers
319 319 * it provided.
320 320 */
321 321 return (CRYPTO_DATA_LEN_RANGE);
322 322 }
323 323
324 324 if (offset + digest_len <=
325 325 digest->cd_uio->uio_iov[vec_idx].iov_len) {
326 326 /*
327 327 * The computed MD4 digest will fit in the current
328 328 * iovec.
329 329 */
330 330 if (digest_len != MD4_DIGEST_LENGTH) {
331 331 /*
332 332 * The caller requested a short digest. Digest
333 333 * into a scratch buffer and return to
334 334 * the user only what was requested.
335 335 */
336 336 MD4Final(digest_scratch, md4_ctx);
337 337 bcopy(digest_scratch, (uchar_t *)digest->
338 338 cd_uio->uio_iov[vec_idx].iov_base + offset,
339 339 digest_len);
340 340 } else {
341 341 MD4Final((uchar_t *)digest->
342 342 cd_uio->uio_iov[vec_idx].iov_base + offset,
343 343 md4_ctx);
344 344 }
345 345 } else {
346 346 /*
347 347 * The computed digest will be crossing one or more iovec's.
348 348 * This is bad performance-wise but we need to support it.
349 349 * Allocate a small scratch buffer on the stack and
350 350 * copy it piece meal to the specified digest iovec's.
351 351 */
352 352 uchar_t digest_tmp[MD4_DIGEST_LENGTH];
353 353 off_t scratch_offset = 0;
354 354 size_t length = digest_len;
355 355 size_t cur_len;
356 356
357 357 MD4Final(digest_tmp, md4_ctx);
358 358
359 359 while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
360 360 cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
361 361 offset, length);
362 362 bcopy(digest_tmp + scratch_offset,
363 363 digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
364 364 cur_len);
365 365
366 366 length -= cur_len;
367 367 vec_idx++;
368 368 scratch_offset += cur_len;
369 369 offset = 0;
370 370 }
371 371
372 372 if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
373 373 /*
374 374 * The end of the specified iovec's was reached but
375 375 * the length requested could not be processed, i.e.
376 376 * The caller requested to digest more data than it
377 377 * provided.
378 378 */
379 379 return (CRYPTO_DATA_LEN_RANGE);
380 380 }
381 381 }
382 382
383 383 return (CRYPTO_SUCCESS);
384 384 }
385 385
386 386 /*
387 387 * Helper MD4 digest update for mblk's.
388 388 */
389 389 static int
390 390 md4_digest_update_mblk(MD4_CTX *md4_ctx, crypto_data_t *data)
391 391 {
392 392 off_t offset = data->cd_offset;
393 393 size_t length = data->cd_length;
394 394 mblk_t *mp;
395 395 size_t cur_len;
396 396
397 397 /*
398 398 * Jump to the first mblk_t containing data to be digested.
399 399 */
400 400 for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
401 401 offset -= MBLKL(mp), mp = mp->b_cont)
402 402 ;
403 403 if (mp == NULL) {
404 404 /*
405 405 * The caller specified an offset that is larger than the
406 406 * total size of the buffers it provided.
407 407 */
408 408 return (CRYPTO_DATA_LEN_RANGE);
409 409 }
410 410
411 411 /*
412 412 * Now do the digesting on the mblk chain.
413 413 */
414 414 while (mp != NULL && length > 0) {
415 415 cur_len = MIN(MBLKL(mp) - offset, length);
416 416 MD4Update(md4_ctx, mp->b_rptr + offset, cur_len);
417 417 length -= cur_len;
418 418 offset = 0;
419 419 mp = mp->b_cont;
420 420 }
421 421
422 422 if (mp == NULL && length > 0) {
423 423 /*
424 424 * The end of the mblk was reached but the length requested
425 425 * could not be processed, i.e. The caller requested
426 426 * to digest more data than it provided.
427 427 */
428 428 return (CRYPTO_DATA_LEN_RANGE);
429 429 }
430 430
431 431 return (CRYPTO_SUCCESS);
432 432 }
433 433
434 434 /*
435 435 * Helper MD4 digest final for mblk's.
436 436 * digest_len is the length of the desired digest. If digest_len
437 437 * is smaller than the default MD4 digest length, the caller
438 438 * must pass a scratch buffer, digest_scratch, which must
439 439 * be at least MD4_DIGEST_LENGTH bytes.
440 440 */
441 441 static int
442 442 md4_digest_final_mblk(MD4_CTX *md4_ctx, crypto_data_t *digest,
443 443 ulong_t digest_len, uchar_t *digest_scratch)
444 444 {
445 445 off_t offset = digest->cd_offset;
446 446 mblk_t *mp;
447 447
448 448 /*
449 449 * Jump to the first mblk_t that will be used to store the digest.
450 450 */
451 451 for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
452 452 offset -= MBLKL(mp), mp = mp->b_cont)
453 453 ;
454 454 if (mp == NULL) {
455 455 /*
456 456 * The caller specified an offset that is larger than the
457 457 * total size of the buffers it provided.
458 458 */
459 459 return (CRYPTO_DATA_LEN_RANGE);
460 460 }
461 461
462 462 if (offset + digest_len <= MBLKL(mp)) {
463 463 /*
464 464 * The computed MD4 digest will fit in the current mblk.
465 465 * Do the MD4Final() in-place.
466 466 */
467 467 if (digest_len != MD4_DIGEST_LENGTH) {
468 468 /*
469 469 * The caller requested a short digest. Digest
470 470 * into a scratch buffer and return to
471 471 * the user only what was requested.
472 472 */
473 473 MD4Final(digest_scratch, md4_ctx);
474 474 bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
475 475 } else {
476 476 MD4Final(mp->b_rptr + offset, md4_ctx);
477 477 }
478 478 } else {
479 479 /*
480 480 * The computed digest will be crossing one or more mblk's.
481 481 * This is bad performance-wise but we need to support it.
482 482 * Allocate a small scratch buffer on the stack and
483 483 * copy it piece meal to the specified digest iovec's.
484 484 */
485 485 uchar_t digest_tmp[MD4_DIGEST_LENGTH];
486 486 off_t scratch_offset = 0;
487 487 size_t length = digest_len;
488 488 size_t cur_len;
489 489
490 490 MD4Final(digest_tmp, md4_ctx);
491 491
492 492 while (mp != NULL && length > 0) {
493 493 cur_len = MIN(MBLKL(mp) - offset, length);
494 494 bcopy(digest_tmp + scratch_offset,
495 495 mp->b_rptr + offset, cur_len);
496 496
497 497 length -= cur_len;
498 498 mp = mp->b_cont;
499 499 scratch_offset += cur_len;
500 500 offset = 0;
501 501 }
502 502
503 503 if (mp == NULL && length > 0) {
504 504 /*
505 505 * The end of the specified mblk was reached but
506 506 * the length requested could not be processed, i.e.
507 507 * The caller requested to digest more data than it
508 508 * provided.
509 509 */
510 510 return (CRYPTO_DATA_LEN_RANGE);
511 511 }
512 512 }
513 513
514 514 return (CRYPTO_SUCCESS);
515 515 }
516 516
517 517 /* ARGSUSED */
518 518 static int
519 519 md4_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
520 520 crypto_req_handle_t req)
521 521 {
522 522 int ret = CRYPTO_SUCCESS;
523 523
524 524 ASSERT(ctx->cc_provider_private != NULL);
525 525
526 526 /*
527 527 * We need to just return the length needed to store the output.
528 528 * We should not destroy the context for the following cases.
529 529 */
530 530 if ((digest->cd_length == 0) ||
531 531 (digest->cd_length < MD4_DIGEST_LENGTH)) {
532 532 digest->cd_length = MD4_DIGEST_LENGTH;
533 533 return (CRYPTO_BUFFER_TOO_SMALL);
534 534 }
535 535
536 536 /*
537 537 * Do the MD4 update on the specified input data.
538 538 */
539 539 switch (data->cd_format) {
540 540 case CRYPTO_DATA_RAW:
541 541 MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
542 542 data->cd_raw.iov_base + data->cd_offset,
543 543 data->cd_length);
544 544 break;
545 545 case CRYPTO_DATA_UIO:
546 546 ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
547 547 data);
548 548 break;
549 549 case CRYPTO_DATA_MBLK:
550 550 ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
551 551 data);
552 552 break;
553 553 default:
554 554 ret = CRYPTO_ARGUMENTS_BAD;
555 555 }
556 556
557 557 if (ret != CRYPTO_SUCCESS) {
558 558 /* the update failed, free context and bail */
559 559 kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
560 560 ctx->cc_provider_private = NULL;
561 561 digest->cd_length = 0;
562 562 return (ret);
563 563 }
564 564
565 565 /*
566 566 * Do an MD4 final, must be done separately since the digest
567 567 * type can be different than the input data type.
568 568 */
569 569 switch (digest->cd_format) {
570 570 case CRYPTO_DATA_RAW:
571 571 MD4Final((unsigned char *)digest->cd_raw.iov_base +
572 572 digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
573 573 break;
574 574 case CRYPTO_DATA_UIO:
575 575 ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
576 576 digest, MD4_DIGEST_LENGTH, NULL);
577 577 break;
578 578 case CRYPTO_DATA_MBLK:
579 579 ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
580 580 digest, MD4_DIGEST_LENGTH, NULL);
581 581 break;
582 582 default:
583 583 ret = CRYPTO_ARGUMENTS_BAD;
584 584 }
585 585
586 586 /* all done, free context and return */
587 587
588 588 if (ret == CRYPTO_SUCCESS) {
589 589 digest->cd_length = MD4_DIGEST_LENGTH;
590 590 } else {
591 591 digest->cd_length = 0;
592 592 }
593 593
594 594 kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
595 595 ctx->cc_provider_private = NULL;
596 596 return (ret);
597 597 }
598 598
599 599 /* ARGSUSED */
600 600 static int
601 601 md4_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
602 602 crypto_req_handle_t req)
603 603 {
604 604 int ret = CRYPTO_SUCCESS;
605 605
606 606 ASSERT(ctx->cc_provider_private != NULL);
607 607
608 608 /*
609 609 * Do the MD4 update on the specified input data.
610 610 */
611 611 switch (data->cd_format) {
612 612 case CRYPTO_DATA_RAW:
613 613 MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
614 614 data->cd_raw.iov_base + data->cd_offset,
615 615 data->cd_length);
616 616 break;
617 617 case CRYPTO_DATA_UIO:
618 618 ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
619 619 data);
620 620 break;
621 621 case CRYPTO_DATA_MBLK:
622 622 ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
623 623 data);
624 624 break;
625 625 default:
626 626 ret = CRYPTO_ARGUMENTS_BAD;
627 627 }
628 628
629 629 return (ret);
630 630 }
631 631
632 632 /* ARGSUSED */
633 633 static int
634 634 md4_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
635 635 crypto_req_handle_t req)
636 636 {
637 637 int ret = CRYPTO_SUCCESS;
638 638
639 639 ASSERT(ctx->cc_provider_private != NULL);
640 640
641 641 /*
642 642 * We need to just return the length needed to store the output.
643 643 * We should not destroy the context for the following cases.
644 644 */
645 645 if ((digest->cd_length == 0) ||
646 646 (digest->cd_length < MD4_DIGEST_LENGTH)) {
647 647 digest->cd_length = MD4_DIGEST_LENGTH;
648 648 return (CRYPTO_BUFFER_TOO_SMALL);
649 649 }
650 650
651 651 /*
652 652 * Do an MD4 final.
653 653 */
654 654 switch (digest->cd_format) {
655 655 case CRYPTO_DATA_RAW:
656 656 MD4Final((unsigned char *)digest->cd_raw.iov_base +
657 657 digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
658 658 break;
659 659 case CRYPTO_DATA_UIO:
660 660 ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
661 661 digest, MD4_DIGEST_LENGTH, NULL);
662 662 break;
663 663 case CRYPTO_DATA_MBLK:
664 664 ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
665 665 digest, MD4_DIGEST_LENGTH, NULL);
666 666 break;
667 667 default:
668 668 ret = CRYPTO_ARGUMENTS_BAD;
669 669 }
670 670
671 671 /* all done, free context and return */
672 672
673 673 if (ret == CRYPTO_SUCCESS) {
674 674 digest->cd_length = MD4_DIGEST_LENGTH;
675 675 } else {
676 676 digest->cd_length = 0;
677 677 }
678 678
679 679 kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
680 680 ctx->cc_provider_private = NULL;
681 681
682 682 return (ret);
683 683 }
684 684
685 685 /* ARGSUSED */
686 686 static int
687 687 md4_digest_atomic(crypto_provider_handle_t provider,
688 688 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
689 689 crypto_data_t *data, crypto_data_t *digest,
690 690 crypto_req_handle_t req)
691 691 {
692 692 int ret = CRYPTO_SUCCESS;
693 693 MD4_CTX md4_ctx;
694 694
695 695 if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
696 696 return (CRYPTO_MECHANISM_INVALID);
697 697
698 698 /*
699 699 * Do the MD4 init.
700 700 */
701 701 MD4Init(&md4_ctx);
702 702
703 703 /*
704 704 * Do the MD4 update on the specified input data.
705 705 */
706 706 switch (data->cd_format) {
707 707 case CRYPTO_DATA_RAW:
708 708 MD4Update(&md4_ctx, data->cd_raw.iov_base + data->cd_offset,
709 709 data->cd_length);
710 710 break;
711 711 case CRYPTO_DATA_UIO:
712 712 ret = md4_digest_update_uio(&md4_ctx, data);
713 713 break;
714 714 case CRYPTO_DATA_MBLK:
715 715 ret = md4_digest_update_mblk(&md4_ctx, data);
716 716 break;
717 717 default:
718 718 ret = CRYPTO_ARGUMENTS_BAD;
719 719 }
720 720
721 721 if (ret != CRYPTO_SUCCESS) {
722 722 /* the update failed, bail */
723 723 digest->cd_length = 0;
724 724 return (ret);
725 725 }
726 726
727 727 /*
728 728 * Do an MD4 final, must be done separately since the digest
729 729 * type can be different than the input data type.
730 730 */
731 731 switch (digest->cd_format) {
732 732 case CRYPTO_DATA_RAW:
733 733 MD4Final((unsigned char *)digest->cd_raw.iov_base +
734 734 digest->cd_offset, &md4_ctx);
735 735 break;
736 736 case CRYPTO_DATA_UIO:
737 737 ret = md4_digest_final_uio(&md4_ctx, digest,
738 738 MD4_DIGEST_LENGTH, NULL);
739 739 break;
740 740 case CRYPTO_DATA_MBLK:
741 741 ret = md4_digest_final_mblk(&md4_ctx, digest,
742 742 MD4_DIGEST_LENGTH, NULL);
743 743 break;
744 744 default:
745 745 ret = CRYPTO_ARGUMENTS_BAD;
746 746 }
747 747
748 748 if (ret == CRYPTO_SUCCESS) {
749 749 digest->cd_length = MD4_DIGEST_LENGTH;
750 750 } else {
751 751 digest->cd_length = 0;
752 752 }
753 753
754 754 return (ret);
755 755 }
↓ open down ↓ |
590 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX