1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 /*
  26  * Copyright 2015 by Saso Kiselkov. All rights reserved.
  27  */
  28 
  29 #include <sys/types.h>
  30 #include <sys/sysmacros.h>
  31 #include <modes/modes.h>
  32 #include "aes_impl.h"
  33 #ifndef _KERNEL
  34 #include <stdlib.h>
  35 #endif  /* !_KERNEL */
  36 
  37 #if defined(__amd64)
  38 
  39 /*
  40  * XORs a range of contiguous AES blocks in `data' with blocks in 'dst'
  41  * and places the result in `dst'. On x86-64 this exploits the 128-bit
  42  * floating point registers (xmm) to maximize performance.
  43  */
  44 static void
  45 aes_xor_range(const uint8_t *data, uint8_t *dst, uint64_t length)
  46 {
  47         uint64_t i = 0;
  48 
  49         /* First use the unrolled version. */
  50         for (; i + 8 * AES_BLOCK_LEN <= length; i += 8 * AES_BLOCK_LEN)
  51                 aes_xor_intel8(&data[i], &dst[i]);
  52         /* Finish the rest in single blocks. */
  53         for (; i < length; i += AES_BLOCK_LEN)
  54                 aes_xor_intel(&data[i], &dst[i]);
  55 }
  56 
  57 #else   /* !__amd64 */
  58 
  59 /*
  60  * XORs a range of contiguous AES blocks in `data' with blocks in 'dst'
  61  * and places the result in `dst'.
  62  */
  63 static void
  64 aes_xor_range(const uint8_t *data, uint8_t *dst, uint64_t length)
  65 {
  66         uint64_t i = 0;
  67 
  68         if (IS_P2ALIGNED2(dst, data, sizeof (uint64_t))) {
  69                 /* Unroll the loop to enable efficiency. */
  70                 for (; i + 8 * AES_BLOCK_LEN < length; i += 8 * AES_BLOCK_LEN) {
  71                         AES_XOR_BLOCK_ALIGNED(&data[i + 0x00], &dst[i + 0x00]);
  72                         AES_XOR_BLOCK_ALIGNED(&data[i + 0x10], &dst[i + 0x10]);
  73                         AES_XOR_BLOCK_ALIGNED(&data[i + 0x20], &dst[i + 0x20]);
  74                         AES_XOR_BLOCK_ALIGNED(&data[i + 0x30], &dst[i + 0x30]);
  75                         AES_XOR_BLOCK_ALIGNED(&data[i + 0x40], &dst[i + 0x40]);
  76                         AES_XOR_BLOCK_ALIGNED(&data[i + 0x50], &dst[i + 0x50]);
  77                         AES_XOR_BLOCK_ALIGNED(&data[i + 0x60], &dst[i + 0x60]);
  78                         AES_XOR_BLOCK_ALIGNED(&data[i + 0x70], &dst[i + 0x70]);
  79                 }
  80         }
  81         /* Finish the rest in single blocks. */
  82         for (; i < length; i += AES_BLOCK_LEN)
  83                 AES_XOR_BLOCK(&data[i], &dst[i]);
  84 }
  85 
  86 #endif  /* !__amd64 */
  87 
  88 /* Copy a 16-byte AES block from "in" to "out" */
  89 void
  90 aes_copy_block(const uint8_t *in, uint8_t *out)
  91 {
  92         if (IS_P2ALIGNED2(in, out, sizeof (uint32_t))) {
  93                 AES_COPY_BLOCK_ALIGNED(in, out);
  94         } else {
  95                 AES_COPY_BLOCK_UNALIGNED(in, out);
  96         }
  97 }
  98 
  99 /* XOR a 16-byte AES block of data into dst */
 100 void
 101 aes_xor_block(const uint8_t *data, uint8_t *dst)
 102 {
 103         if (IS_P2ALIGNED2(dst, data, sizeof (uint32_t))) {
 104                 AES_XOR_BLOCK_ALIGNED(data, dst);
 105         } else {
 106                 AES_XOR_BLOCK_UNALIGNED(data, dst);
 107         }
 108 }
 109 
 110 /*
 111  * Encrypt multiple blocks of data according to mode.
 112  */
 113 int
 114 aes_encrypt_contiguous_blocks(void *ctx, char *data, size_t length,
 115     crypto_data_t *out)
 116 {
 117         aes_ctx_t *aes_ctx = ctx;
 118         int rv = CRYPTO_SUCCESS;
 119 
 120         for (size_t i = 0; i < length; i += AES_OPSZ) {
 121                 size_t opsz = MIN(length - i, AES_OPSZ);
 122                 AES_ACCEL_SAVESTATE(savestate);
 123                 aes_accel_enter(savestate);
 124 
 125                 if (aes_ctx->ac_flags & CTR_MODE) {
 126                         rv = ctr_mode_contiguous_blocks(ctx, &data[i], opsz,
 127                             out, AES_BLOCK_LEN, aes_encrypt_block,
 128                             AES_XOR_BLOCK, aes_ctr_mode);
 129 #ifdef _KERNEL
 130                 } else if (aes_ctx->ac_flags & CCM_MODE) {
 131                         rv = ccm_mode_encrypt_contiguous_blocks(ctx, &data[i],
 132                             opsz, out, AES_BLOCK_LEN, aes_encrypt_block,
 133                             AES_COPY_BLOCK, AES_XOR_BLOCK);
 134                 } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
 135                         rv = gcm_mode_encrypt_contiguous_blocks(ctx, &data[i],
 136                             opsz, out, AES_BLOCK_LEN, aes_encrypt_block,
 137                             AES_COPY_BLOCK, AES_XOR_BLOCK, aes_ctr_mode);
 138 #endif
 139                 } else if (aes_ctx->ac_flags & CBC_MODE) {
 140                         rv = cbc_encrypt_contiguous_blocks(ctx, &data[i], opsz,
 141                             out, AES_BLOCK_LEN, aes_encrypt_block,
 142                             AES_COPY_BLOCK, AES_XOR_BLOCK, aes_encrypt_cbc);
 143                 } else {
 144                         rv = ecb_cipher_contiguous_blocks(ctx, &data[i], opsz,
 145                             out, AES_BLOCK_LEN, aes_encrypt_block,
 146                             aes_encrypt_ecb);
 147                 }
 148 
 149                 aes_accel_exit(savestate);
 150 
 151                 if (rv != CRYPTO_SUCCESS)
 152                                 break;
 153         }
 154 
 155         return (rv);
 156 }
 157 
 158 /*
 159  * Decrypt multiple blocks of data according to mode.
 160  */
 161 int
 162 aes_decrypt_contiguous_blocks(void *ctx, char *data, size_t length,
 163     crypto_data_t *out)
 164 {
 165         aes_ctx_t *aes_ctx = ctx;
 166         int rv = CRYPTO_SUCCESS;
 167 
 168 
 169         for (size_t i = 0; i < length; i += AES_OPSZ) {
 170                 size_t opsz = MIN(length - i, AES_OPSZ);
 171                 AES_ACCEL_SAVESTATE(savestate);
 172                 aes_accel_enter(savestate);
 173 
 174                 if (aes_ctx->ac_flags & CTR_MODE) {
 175                         rv = ctr_mode_contiguous_blocks(ctx, &data[i], opsz,
 176                             out, AES_BLOCK_LEN, aes_encrypt_block,
 177                             AES_XOR_BLOCK, aes_ctr_mode);
 178                         if (rv == CRYPTO_DATA_LEN_RANGE)
 179                                 rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
 180 #ifdef _KERNEL
 181                 } else if (aes_ctx->ac_flags & CCM_MODE) {
 182                         rv = ccm_mode_decrypt_contiguous_blocks(ctx, &data[i],
 183                             opsz, out, AES_BLOCK_LEN, aes_encrypt_block,
 184                             AES_COPY_BLOCK, AES_XOR_BLOCK);
 185                 } else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
 186                         rv = gcm_mode_decrypt_contiguous_blocks(ctx, &data[i],
 187                             opsz, out, AES_BLOCK_LEN, aes_encrypt_block,
 188                             AES_COPY_BLOCK, AES_XOR_BLOCK, aes_ctr_mode);
 189 #endif
 190                 } else if (aes_ctx->ac_flags & CBC_MODE) {
 191                         rv = cbc_decrypt_contiguous_blocks(ctx, &data[i],
 192                             opsz, out, AES_BLOCK_LEN, aes_decrypt_block,
 193                             AES_COPY_BLOCK, AES_XOR_BLOCK, aes_decrypt_ecb,
 194                             aes_xor_range);
 195                 } else {
 196                         rv = ecb_cipher_contiguous_blocks(ctx, &data[i],
 197                             opsz, out, AES_BLOCK_LEN, aes_decrypt_block,
 198                             aes_decrypt_ecb);
 199                         if (rv == CRYPTO_DATA_LEN_RANGE)
 200                                 rv = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
 201                 }
 202 
 203                 aes_accel_exit(savestate);
 204 
 205                 if (rv != CRYPTO_SUCCESS)
 206                                 break;
 207         }
 208 
 209         return (rv);
 210 }