Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / crypto / io / md4_mod.c
blob86722e002df53105159ed57cdcb8cc8bda885303
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * In kernel module, the md4 module is created with one modlinkage,
29 * this is different to md5 and sha1 modules which have a legacy misc
30 * variant for direct calls to the Init/Update/Final routines.
32 * - a modlcrypto that allows the module to register with the Kernel
33 * Cryptographic Framework (KCF) as a software provider for the MD4
34 * mechanisms.
37 #include <sys/types.h>
38 #include <sys/systm.h>
39 #include <sys/modctl.h>
40 #include <sys/cmn_err.h>
41 #include <sys/ddi.h>
42 #include <sys/crypto/common.h>
43 #include <sys/crypto/spi.h>
44 #include <sys/sysmacros.h>
45 #include <sys/strsun.h>
46 #include <sys/note.h>
47 #include <sys/md4.h>
49 extern struct mod_ops mod_miscops;
50 extern struct mod_ops mod_cryptoops;
53 * Module linkage information for the kernel.
56 static struct modlcrypto modlcrypto = {
57 &mod_cryptoops,
58 "MD4 Kernel SW Provider"
61 static struct modlinkage modlinkage = {
62 MODREV_1,
63 (void *)&modlcrypto,
64 NULL
68 * CSPI information (entry points, provider info, etc.)
71 typedef enum md4_mech_type {
72 MD4_MECH_INFO_TYPE, /* SUN_CKM_MD4 */
73 } md4_mech_type_t;
75 #define MD4_DIGEST_LENGTH 16 /* MD4 digest length in bytes */
78 * Context for MD4 mechanism.
80 typedef struct md4_ctx {
81 md4_mech_type_t mc_mech_type; /* type of context */
82 MD4_CTX mc_md4_ctx; /* MD4 context */
83 } md4_ctx_t;
86 * Macros to access the MD4 contexts from a context passed
87 * by KCF to one of the entry points.
90 #define PROV_MD4_CTX(ctx) ((md4_ctx_t *)(ctx)->cc_provider_private)
93 * Mechanism info structure passed to KCF during registration.
95 static crypto_mech_info_t md4_mech_info_tab[] = {
96 /* MD4 */
97 {SUN_CKM_MD4, MD4_MECH_INFO_TYPE,
98 CRYPTO_FG_DIGEST | CRYPTO_FG_DIGEST_ATOMIC,
99 0, 0, CRYPTO_KEYSIZE_UNIT_IN_BITS},
102 static void md4_provider_status(crypto_provider_handle_t, uint_t *);
104 static crypto_control_ops_t md4_control_ops = {
105 md4_provider_status
108 static int md4_digest_init(crypto_ctx_t *, crypto_mechanism_t *,
109 crypto_req_handle_t);
110 static int md4_digest(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
111 crypto_req_handle_t);
112 static int md4_digest_update(crypto_ctx_t *, crypto_data_t *,
113 crypto_req_handle_t);
114 static int md4_digest_final(crypto_ctx_t *, crypto_data_t *,
115 crypto_req_handle_t);
116 static int md4_digest_atomic(crypto_provider_handle_t, crypto_session_id_t,
117 crypto_mechanism_t *, crypto_data_t *, crypto_data_t *,
118 crypto_req_handle_t);
120 static crypto_digest_ops_t md4_digest_ops = {
121 md4_digest_init,
122 md4_digest,
123 md4_digest_update,
124 NULL,
125 md4_digest_final,
126 md4_digest_atomic
129 static crypto_ops_t md4_crypto_ops = {
130 &md4_control_ops,
131 &md4_digest_ops,
132 NULL,
133 NULL,
134 NULL,
135 NULL,
136 NULL,
137 NULL,
138 NULL,
139 NULL,
140 NULL,
141 NULL,
142 NULL,
143 NULL,
146 static crypto_provider_info_t md4_prov_info = {
147 CRYPTO_SPI_VERSION_1,
148 "MD4 Software Provider",
149 CRYPTO_SW_PROVIDER,
150 {&modlinkage},
151 NULL,
152 &md4_crypto_ops,
153 sizeof (md4_mech_info_tab)/sizeof (crypto_mech_info_t),
154 md4_mech_info_tab
157 static crypto_kcf_provider_handle_t md4_prov_handle = 0;
160 _init(void)
162 int ret;
164 if ((ret = mod_install(&modlinkage)) != 0)
165 return (ret);
167 /* Register with KCF. If the registration fails, remove the module. */
168 if (crypto_register_provider(&md4_prov_info, &md4_prov_handle)) {
169 (void) mod_remove(&modlinkage);
170 return (EACCES);
173 return (0);
177 _fini(void)
179 /* Unregister from KCF if module is registered */
180 if (md4_prov_handle != 0) {
181 if (crypto_unregister_provider(md4_prov_handle))
182 return (EBUSY);
184 md4_prov_handle = 0;
187 return (mod_remove(&modlinkage));
191 _info(struct modinfo *modinfop)
193 return (mod_info(&modlinkage, modinfop));
197 * KCF software provider control entry points.
199 /* ARGSUSED */
200 static void
201 md4_provider_status(crypto_provider_handle_t provider, uint_t *status)
203 *status = CRYPTO_PROVIDER_READY;
207 * KCF software provider digest entry points.
210 static int
211 md4_digest_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
212 crypto_req_handle_t req)
214 if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
215 return (CRYPTO_MECHANISM_INVALID);
218 * Allocate and initialize MD4 context.
220 ctx->cc_provider_private = kmem_alloc(sizeof (md4_ctx_t),
221 crypto_kmflag(req));
222 if (ctx->cc_provider_private == NULL)
223 return (CRYPTO_HOST_MEMORY);
225 PROV_MD4_CTX(ctx)->mc_mech_type = MD4_MECH_INFO_TYPE;
226 MD4Init(&PROV_MD4_CTX(ctx)->mc_md4_ctx);
228 return (CRYPTO_SUCCESS);
232 * Helper MD4 digest update function for uio data.
234 static int
235 md4_digest_update_uio(MD4_CTX *md4_ctx, crypto_data_t *data)
237 off_t offset = data->cd_offset;
238 size_t length = data->cd_length;
239 uint_t vec_idx;
240 size_t cur_len;
242 /* we support only kernel buffer */
243 if (data->cd_uio->uio_segflg != UIO_SYSSPACE)
244 return (CRYPTO_ARGUMENTS_BAD);
247 * Jump to the first iovec containing data to be
248 * digested.
250 for (vec_idx = 0; vec_idx < data->cd_uio->uio_iovcnt &&
251 offset >= data->cd_uio->uio_iov[vec_idx].iov_len;
252 offset -= data->cd_uio->uio_iov[vec_idx++].iov_len)
254 if (vec_idx == data->cd_uio->uio_iovcnt) {
256 * The caller specified an offset that is larger than the
257 * total size of the buffers it provided.
259 return (CRYPTO_DATA_LEN_RANGE);
263 * Now do the digesting on the iovecs.
265 while (vec_idx < data->cd_uio->uio_iovcnt && length > 0) {
266 cur_len = MIN(data->cd_uio->uio_iov[vec_idx].iov_len -
267 offset, length);
269 MD4Update(md4_ctx, data->cd_uio->uio_iov[vec_idx].iov_base +
270 offset, cur_len);
272 length -= cur_len;
273 vec_idx++;
274 offset = 0;
277 if (vec_idx == data->cd_uio->uio_iovcnt && length > 0) {
279 * The end of the specified iovec's was reached but
280 * the length requested could not be processed, i.e.
281 * The caller requested to digest more data than it provided.
283 return (CRYPTO_DATA_LEN_RANGE);
286 return (CRYPTO_SUCCESS);
290 * Helper MD4 digest final function for uio data.
291 * digest_len is the length of the desired digest. If digest_len
292 * is smaller than the default MD4 digest length, the caller
293 * must pass a scratch buffer, digest_scratch, which must
294 * be at least MD4_DIGEST_LENGTH bytes.
296 static int
297 md4_digest_final_uio(MD4_CTX *md4_ctx, crypto_data_t *digest,
298 ulong_t digest_len, uchar_t *digest_scratch)
300 off_t offset = digest->cd_offset;
301 uint_t vec_idx;
303 /* we support only kernel buffer */
304 if (digest->cd_uio->uio_segflg != UIO_SYSSPACE)
305 return (CRYPTO_ARGUMENTS_BAD);
308 * Jump to the first iovec containing ptr to the digest to
309 * be returned.
311 for (vec_idx = 0; offset >= digest->cd_uio->uio_iov[vec_idx].iov_len &&
312 vec_idx < digest->cd_uio->uio_iovcnt;
313 offset -= digest->cd_uio->uio_iov[vec_idx++].iov_len)
315 if (vec_idx == digest->cd_uio->uio_iovcnt) {
317 * The caller specified an offset that is
318 * larger than the total size of the buffers
319 * it provided.
321 return (CRYPTO_DATA_LEN_RANGE);
324 if (offset + digest_len <=
325 digest->cd_uio->uio_iov[vec_idx].iov_len) {
327 * The computed MD4 digest will fit in the current
328 * iovec.
330 if (digest_len != MD4_DIGEST_LENGTH) {
332 * The caller requested a short digest. Digest
333 * into a scratch buffer and return to
334 * the user only what was requested.
336 MD4Final(digest_scratch, md4_ctx);
337 bcopy(digest_scratch, (uchar_t *)digest->
338 cd_uio->uio_iov[vec_idx].iov_base + offset,
339 digest_len);
340 } else {
341 MD4Final((uchar_t *)digest->
342 cd_uio->uio_iov[vec_idx].iov_base + offset,
343 md4_ctx);
345 } else {
347 * The computed digest will be crossing one or more iovec's.
348 * This is bad performance-wise but we need to support it.
349 * Allocate a small scratch buffer on the stack and
350 * copy it piece meal to the specified digest iovec's.
352 uchar_t digest_tmp[MD4_DIGEST_LENGTH];
353 off_t scratch_offset = 0;
354 size_t length = digest_len;
355 size_t cur_len;
357 MD4Final(digest_tmp, md4_ctx);
359 while (vec_idx < digest->cd_uio->uio_iovcnt && length > 0) {
360 cur_len = MIN(digest->cd_uio->uio_iov[vec_idx].iov_len -
361 offset, length);
362 bcopy(digest_tmp + scratch_offset,
363 digest->cd_uio->uio_iov[vec_idx].iov_base + offset,
364 cur_len);
366 length -= cur_len;
367 vec_idx++;
368 scratch_offset += cur_len;
369 offset = 0;
372 if (vec_idx == digest->cd_uio->uio_iovcnt && length > 0) {
374 * The end of the specified iovec's was reached but
375 * the length requested could not be processed, i.e.
376 * The caller requested to digest more data than it
377 * provided.
379 return (CRYPTO_DATA_LEN_RANGE);
383 return (CRYPTO_SUCCESS);
387 * Helper MD4 digest update for mblk's.
389 static int
390 md4_digest_update_mblk(MD4_CTX *md4_ctx, crypto_data_t *data)
392 off_t offset = data->cd_offset;
393 size_t length = data->cd_length;
394 mblk_t *mp;
395 size_t cur_len;
398 * Jump to the first mblk_t containing data to be digested.
400 for (mp = data->cd_mp; mp != NULL && offset >= MBLKL(mp);
401 offset -= MBLKL(mp), mp = mp->b_cont)
403 if (mp == NULL) {
405 * The caller specified an offset that is larger than the
406 * total size of the buffers it provided.
408 return (CRYPTO_DATA_LEN_RANGE);
412 * Now do the digesting on the mblk chain.
414 while (mp != NULL && length > 0) {
415 cur_len = MIN(MBLKL(mp) - offset, length);
416 MD4Update(md4_ctx, mp->b_rptr + offset, cur_len);
417 length -= cur_len;
418 offset = 0;
419 mp = mp->b_cont;
422 if (mp == NULL && length > 0) {
424 * The end of the mblk was reached but the length requested
425 * could not be processed, i.e. The caller requested
426 * to digest more data than it provided.
428 return (CRYPTO_DATA_LEN_RANGE);
431 return (CRYPTO_SUCCESS);
435 * Helper MD4 digest final for mblk's.
436 * digest_len is the length of the desired digest. If digest_len
437 * is smaller than the default MD4 digest length, the caller
438 * must pass a scratch buffer, digest_scratch, which must
439 * be at least MD4_DIGEST_LENGTH bytes.
441 static int
442 md4_digest_final_mblk(MD4_CTX *md4_ctx, crypto_data_t *digest,
443 ulong_t digest_len, uchar_t *digest_scratch)
445 off_t offset = digest->cd_offset;
446 mblk_t *mp;
449 * Jump to the first mblk_t that will be used to store the digest.
451 for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp);
452 offset -= MBLKL(mp), mp = mp->b_cont)
454 if (mp == NULL) {
456 * The caller specified an offset that is larger than the
457 * total size of the buffers it provided.
459 return (CRYPTO_DATA_LEN_RANGE);
462 if (offset + digest_len <= MBLKL(mp)) {
464 * The computed MD4 digest will fit in the current mblk.
465 * Do the MD4Final() in-place.
467 if (digest_len != MD4_DIGEST_LENGTH) {
469 * The caller requested a short digest. Digest
470 * into a scratch buffer and return to
471 * the user only what was requested.
473 MD4Final(digest_scratch, md4_ctx);
474 bcopy(digest_scratch, mp->b_rptr + offset, digest_len);
475 } else {
476 MD4Final(mp->b_rptr + offset, md4_ctx);
478 } else {
480 * The computed digest will be crossing one or more mblk's.
481 * This is bad performance-wise but we need to support it.
482 * Allocate a small scratch buffer on the stack and
483 * copy it piece meal to the specified digest iovec's.
485 uchar_t digest_tmp[MD4_DIGEST_LENGTH];
486 off_t scratch_offset = 0;
487 size_t length = digest_len;
488 size_t cur_len;
490 MD4Final(digest_tmp, md4_ctx);
492 while (mp != NULL && length > 0) {
493 cur_len = MIN(MBLKL(mp) - offset, length);
494 bcopy(digest_tmp + scratch_offset,
495 mp->b_rptr + offset, cur_len);
497 length -= cur_len;
498 mp = mp->b_cont;
499 scratch_offset += cur_len;
500 offset = 0;
503 if (mp == NULL && length > 0) {
505 * The end of the specified mblk was reached but
506 * the length requested could not be processed, i.e.
507 * The caller requested to digest more data than it
508 * provided.
510 return (CRYPTO_DATA_LEN_RANGE);
514 return (CRYPTO_SUCCESS);
517 /* ARGSUSED */
518 static int
519 md4_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest,
520 crypto_req_handle_t req)
522 int ret = CRYPTO_SUCCESS;
524 ASSERT(ctx->cc_provider_private != NULL);
527 * We need to just return the length needed to store the output.
528 * We should not destroy the context for the following cases.
530 if ((digest->cd_length == 0) ||
531 (digest->cd_length < MD4_DIGEST_LENGTH)) {
532 digest->cd_length = MD4_DIGEST_LENGTH;
533 return (CRYPTO_BUFFER_TOO_SMALL);
537 * Do the MD4 update on the specified input data.
539 switch (data->cd_format) {
540 case CRYPTO_DATA_RAW:
541 MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
542 data->cd_raw.iov_base + data->cd_offset,
543 data->cd_length);
544 break;
545 case CRYPTO_DATA_UIO:
546 ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
547 data);
548 break;
549 case CRYPTO_DATA_MBLK:
550 ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
551 data);
552 break;
553 default:
554 ret = CRYPTO_ARGUMENTS_BAD;
557 if (ret != CRYPTO_SUCCESS) {
558 /* the update failed, free context and bail */
559 kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
560 ctx->cc_provider_private = NULL;
561 digest->cd_length = 0;
562 return (ret);
566 * Do an MD4 final, must be done separately since the digest
567 * type can be different than the input data type.
569 switch (digest->cd_format) {
570 case CRYPTO_DATA_RAW:
571 MD4Final((unsigned char *)digest->cd_raw.iov_base +
572 digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
573 break;
574 case CRYPTO_DATA_UIO:
575 ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
576 digest, MD4_DIGEST_LENGTH, NULL);
577 break;
578 case CRYPTO_DATA_MBLK:
579 ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
580 digest, MD4_DIGEST_LENGTH, NULL);
581 break;
582 default:
583 ret = CRYPTO_ARGUMENTS_BAD;
586 /* all done, free context and return */
588 if (ret == CRYPTO_SUCCESS) {
589 digest->cd_length = MD4_DIGEST_LENGTH;
590 } else {
591 digest->cd_length = 0;
594 kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
595 ctx->cc_provider_private = NULL;
596 return (ret);
599 /* ARGSUSED */
600 static int
601 md4_digest_update(crypto_ctx_t *ctx, crypto_data_t *data,
602 crypto_req_handle_t req)
604 int ret = CRYPTO_SUCCESS;
606 ASSERT(ctx->cc_provider_private != NULL);
609 * Do the MD4 update on the specified input data.
611 switch (data->cd_format) {
612 case CRYPTO_DATA_RAW:
613 MD4Update(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
614 data->cd_raw.iov_base + data->cd_offset,
615 data->cd_length);
616 break;
617 case CRYPTO_DATA_UIO:
618 ret = md4_digest_update_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
619 data);
620 break;
621 case CRYPTO_DATA_MBLK:
622 ret = md4_digest_update_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
623 data);
624 break;
625 default:
626 ret = CRYPTO_ARGUMENTS_BAD;
629 return (ret);
632 /* ARGSUSED */
633 static int
634 md4_digest_final(crypto_ctx_t *ctx, crypto_data_t *digest,
635 crypto_req_handle_t req)
637 int ret = CRYPTO_SUCCESS;
639 ASSERT(ctx->cc_provider_private != NULL);
642 * We need to just return the length needed to store the output.
643 * We should not destroy the context for the following cases.
645 if ((digest->cd_length == 0) ||
646 (digest->cd_length < MD4_DIGEST_LENGTH)) {
647 digest->cd_length = MD4_DIGEST_LENGTH;
648 return (CRYPTO_BUFFER_TOO_SMALL);
652 * Do an MD4 final.
654 switch (digest->cd_format) {
655 case CRYPTO_DATA_RAW:
656 MD4Final((unsigned char *)digest->cd_raw.iov_base +
657 digest->cd_offset, &PROV_MD4_CTX(ctx)->mc_md4_ctx);
658 break;
659 case CRYPTO_DATA_UIO:
660 ret = md4_digest_final_uio(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
661 digest, MD4_DIGEST_LENGTH, NULL);
662 break;
663 case CRYPTO_DATA_MBLK:
664 ret = md4_digest_final_mblk(&PROV_MD4_CTX(ctx)->mc_md4_ctx,
665 digest, MD4_DIGEST_LENGTH, NULL);
666 break;
667 default:
668 ret = CRYPTO_ARGUMENTS_BAD;
671 /* all done, free context and return */
673 if (ret == CRYPTO_SUCCESS) {
674 digest->cd_length = MD4_DIGEST_LENGTH;
675 } else {
676 digest->cd_length = 0;
679 kmem_free(ctx->cc_provider_private, sizeof (md4_ctx_t));
680 ctx->cc_provider_private = NULL;
682 return (ret);
685 /* ARGSUSED */
686 static int
687 md4_digest_atomic(crypto_provider_handle_t provider,
688 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
689 crypto_data_t *data, crypto_data_t *digest,
690 crypto_req_handle_t req)
692 int ret = CRYPTO_SUCCESS;
693 MD4_CTX md4_ctx;
695 if (mechanism->cm_type != MD4_MECH_INFO_TYPE)
696 return (CRYPTO_MECHANISM_INVALID);
699 * Do the MD4 init.
701 MD4Init(&md4_ctx);
704 * Do the MD4 update on the specified input data.
706 switch (data->cd_format) {
707 case CRYPTO_DATA_RAW:
708 MD4Update(&md4_ctx, data->cd_raw.iov_base + data->cd_offset,
709 data->cd_length);
710 break;
711 case CRYPTO_DATA_UIO:
712 ret = md4_digest_update_uio(&md4_ctx, data);
713 break;
714 case CRYPTO_DATA_MBLK:
715 ret = md4_digest_update_mblk(&md4_ctx, data);
716 break;
717 default:
718 ret = CRYPTO_ARGUMENTS_BAD;
721 if (ret != CRYPTO_SUCCESS) {
722 /* the update failed, bail */
723 digest->cd_length = 0;
724 return (ret);
728 * Do an MD4 final, must be done separately since the digest
729 * type can be different than the input data type.
731 switch (digest->cd_format) {
732 case CRYPTO_DATA_RAW:
733 MD4Final((unsigned char *)digest->cd_raw.iov_base +
734 digest->cd_offset, &md4_ctx);
735 break;
736 case CRYPTO_DATA_UIO:
737 ret = md4_digest_final_uio(&md4_ctx, digest,
738 MD4_DIGEST_LENGTH, NULL);
739 break;
740 case CRYPTO_DATA_MBLK:
741 ret = md4_digest_final_mblk(&md4_ctx, digest,
742 MD4_DIGEST_LENGTH, NULL);
743 break;
744 default:
745 ret = CRYPTO_ARGUMENTS_BAD;
748 if (ret == CRYPTO_SUCCESS) {
749 digest->cd_length = MD4_DIGEST_LENGTH;
750 } else {
751 digest->cd_length = 0;
754 return (ret);