1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 /* 26 * Copyright 2015 by Saso Kiselkov. All rights reserved. 27 */ 28 29 #ifndef _KERNEL 30 #include <strings.h> 31 #include <limits.h> 32 #include <assert.h> 33 #include <security/cryptoki.h> 34 #endif 35 36 #include <sys/types.h> 37 #define INLINE_CRYPTO_GET_PTRS 38 #include <modes/modes.h> 39 #include <sys/crypto/common.h> 40 #include <sys/crypto/impl.h> 41 42 boolean_t cbc_fastpath_enabled = B_TRUE; 43 44 static void 45 cbc_decrypt_fastpath(cbc_ctx_t *ctx, const uint8_t *data, size_t length, 46 uint8_t *out, size_t block_size, 47 int (*decrypt)(const void *, const uint8_t *, uint8_t *), 48 int (*decrypt_ecb)(const void *, const uint8_t *, uint8_t *, uint64_t), 49 void (*xor_block)(const uint8_t *, uint8_t *), 50 void (*xor_block_range)(const uint8_t *, uint8_t *, uint64_t)) 51 { 52 const uint8_t *iv = (uint8_t *)ctx->cbc_iv; 53 54 /* Use bulk decryption when available. */ 55 if (decrypt_ecb != NULL) { 56 decrypt_ecb(ctx->cbc_keysched, data, out, length); 57 } else { 58 for (size_t i = 0; i < length; i += block_size) 59 decrypt(ctx->cbc_keysched, &data[i], &out[i]); 60 } 61 62 /* Use bulk XOR when available. */ 63 if (xor_block_range != NULL && length >= 2 * block_size) { 64 xor_block(iv, out); 65 xor_block_range(data, &out[block_size], length - block_size); 66 } else { 67 for (size_t i = 0; i < length; i += block_size) { 68 xor_block(iv, &out[i]); 69 iv = &data[i]; 70 } 71 } 72 } 73 74 /* 75 * Algorithm independent CBC functions. 76 */ 77 int 78 cbc_encrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, 79 crypto_data_t *out, size_t block_size, 80 int (*encrypt)(const void *, const uint8_t *, uint8_t *), 81 void (*copy_block)(const uint8_t *, uint8_t *), 82 void (*xor_block)(const uint8_t *, uint8_t *), 83 int (*encrypt_cbc)(const void *, const uint8_t *, uint8_t *, 84 const uint8_t *, uint64_t)) 85 { 86 size_t remainder = length; 87 size_t need; 88 uint8_t *datap = (uint8_t *)data; 89 uint8_t *blockp; 90 uint8_t *lastp; 91 void *iov_or_mp; 92 offset_t offset; 93 uint8_t *out_data_1; 94 uint8_t *out_data_2; 95 size_t out_data_1_len; 96 97 /* 98 * CBC encryption fastpath requirements: 99 * - fastpath is enabled 100 * - algorithm-specific acceleration function is available 101 * - input is block-aligned 102 * - output is a single contiguous region or the user requested that 103 * we overwrite their input buffer (input/output aliasing allowed) 104 */ 105 if (cbc_fastpath_enabled && encrypt_cbc != NULL && length != 0 && 106 ctx->cbc_remainder_len == 0 && (length & (block_size - 1)) == 0 && 107 (out == NULL || CRYPTO_DATA_IS_SINGLE_BLOCK(out))) { 108 if (out == NULL) { 109 encrypt_cbc(ctx->cbc_keysched, (uint8_t *)data, 110 (uint8_t *)data, (uint8_t *)ctx->cbc_iv, length); 111 ctx->cbc_lastp = (uint8_t *)&data[length - block_size]; 112 } else { 113 uint8_t *outp = CRYPTO_DATA_FIRST_BLOCK(out); 114 encrypt_cbc(ctx->cbc_keysched, (uint8_t *)data, outp, 115 (uint8_t *)ctx->cbc_iv, length); 116 out->cd_offset += length; 117 ctx->cbc_lastp = &outp[length - block_size]; 118 } 119 goto out; 120 } 121 122 if (length + ctx->cbc_remainder_len < block_size) { 123 /* accumulate bytes here and return */ 124 bcopy(datap, 125 (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len, 126 length); 127 ctx->cbc_remainder_len += length; 128 ctx->cbc_copy_to = datap; 129 return (CRYPTO_SUCCESS); 130 } 131 132 lastp = (uint8_t *)ctx->cbc_iv; 133 if (out != NULL) 134 crypto_init_ptrs(out, &iov_or_mp, &offset); 135 136 do { 137 /* Unprocessed data from last call. */ 138 if (ctx->cbc_remainder_len > 0) { 139 need = block_size - ctx->cbc_remainder_len; 140 141 if (need > remainder) 142 return (CRYPTO_DATA_LEN_RANGE); 143 144 bcopy(datap, &((uint8_t *)ctx->cbc_remainder) 145 [ctx->cbc_remainder_len], need); 146 147 blockp = (uint8_t *)ctx->cbc_remainder; 148 } else { 149 blockp = datap; 150 } 151 152 if (out == NULL) { 153 /* 154 * XOR the previous cipher block or IV with the 155 * current clear block. 156 */ 157 xor_block(lastp, blockp); 158 encrypt(ctx->cbc_keysched, blockp, blockp); 159 160 ctx->cbc_lastp = blockp; 161 lastp = blockp; 162 163 if (ctx->cbc_remainder_len > 0) { 164 bcopy(blockp, ctx->cbc_copy_to, 165 ctx->cbc_remainder_len); 166 bcopy(blockp + ctx->cbc_remainder_len, datap, 167 need); 168 } 169 } else { 170 /* 171 * XOR the previous cipher block or IV with the 172 * current clear block. 173 */ 174 xor_block(blockp, lastp); 175 encrypt(ctx->cbc_keysched, lastp, lastp); 176 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, 177 &out_data_1_len, &out_data_2, block_size); 178 179 /* copy block to where it belongs */ 180 if (out_data_1_len == block_size) { 181 copy_block(lastp, out_data_1); 182 } else { 183 bcopy(lastp, out_data_1, out_data_1_len); 184 if (out_data_2 != NULL) { 185 bcopy(lastp + out_data_1_len, 186 out_data_2, 187 block_size - out_data_1_len); 188 } 189 } 190 /* update offset */ 191 out->cd_offset += block_size; 192 } 193 194 /* Update pointer to next block of data to be processed. */ 195 if (ctx->cbc_remainder_len != 0) { 196 datap += need; 197 ctx->cbc_remainder_len = 0; 198 } else { 199 datap += block_size; 200 } 201 202 remainder = (size_t)&data[length] - (size_t)datap; 203 204 /* Incomplete last block. */ 205 if (remainder > 0 && remainder < block_size) { 206 bcopy(datap, ctx->cbc_remainder, remainder); 207 ctx->cbc_remainder_len = remainder; 208 ctx->cbc_copy_to = datap; 209 goto out; 210 } 211 ctx->cbc_copy_to = NULL; 212 213 } while (remainder > 0); 214 215 out: 216 /* 217 * Save the last encrypted block in the context. 218 */ 219 if (ctx->cbc_lastp != NULL) { 220 copy_block((uint8_t *)ctx->cbc_lastp, (uint8_t *)ctx->cbc_iv); 221 ctx->cbc_lastp = (uint8_t *)ctx->cbc_iv; 222 } 223 224 return (CRYPTO_SUCCESS); 225 } 226 227 #define OTHER(a, ctx) \ 228 (((a) == (ctx)->cbc_lastblock) ? (ctx)->cbc_iv : (ctx)->cbc_lastblock) 229 230 /* ARGSUSED */ 231 int 232 cbc_decrypt_contiguous_blocks(cbc_ctx_t *ctx, char *data, size_t length, 233 crypto_data_t *out, size_t block_size, 234 int (*decrypt)(const void *, const uint8_t *, uint8_t *), 235 void (*copy_block)(const uint8_t *, uint8_t *), 236 void (*xor_block)(const uint8_t *, uint8_t *), 237 int (*decrypt_ecb)(const void *, const uint8_t *, uint8_t *, uint64_t), 238 void (*xor_block_range)(const uint8_t *, uint8_t *, uint64_t)) 239 { 240 size_t remainder = length; 241 size_t need; 242 uint8_t *datap = (uint8_t *)data; 243 uint8_t *blockp; 244 uint8_t *lastp; 245 void *iov_or_mp; 246 offset_t offset; 247 uint8_t *out_data_1; 248 uint8_t *out_data_2; 249 size_t out_data_1_len; 250 251 /* 252 * CBC decryption fastpath requirements: 253 * - fastpath is enabled 254 * - input is block-aligned 255 * - output is a single contiguous region and doesn't alias input 256 */ 257 if (cbc_fastpath_enabled && ctx->cbc_remainder_len == 0 && 258 length != 0 && (length & (block_size - 1)) == 0 && 259 CRYPTO_DATA_IS_SINGLE_BLOCK(out)) { 260 uint8_t *outp = CRYPTO_DATA_FIRST_BLOCK(out); 261 262 cbc_decrypt_fastpath(ctx, (uint8_t *)data, length, outp, 263 block_size, decrypt, decrypt_ecb, xor_block, 264 xor_block_range); 265 out->cd_offset += length; 266 bcopy(&data[length - block_size], ctx->cbc_iv, block_size); 267 ctx->cbc_lastp = (uint8_t *)ctx->cbc_iv; 268 return (CRYPTO_SUCCESS); 269 } 270 271 if (length + ctx->cbc_remainder_len < block_size) { 272 /* accumulate bytes here and return */ 273 bcopy(datap, 274 (uint8_t *)ctx->cbc_remainder + ctx->cbc_remainder_len, 275 length); 276 ctx->cbc_remainder_len += length; 277 ctx->cbc_copy_to = datap; 278 return (CRYPTO_SUCCESS); 279 } 280 281 lastp = ctx->cbc_lastp; 282 if (out != NULL) 283 crypto_init_ptrs(out, &iov_or_mp, &offset); 284 285 do { 286 /* Unprocessed data from last call. */ 287 if (ctx->cbc_remainder_len > 0) { 288 need = block_size - ctx->cbc_remainder_len; 289 290 if (need > remainder) 291 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE); 292 293 bcopy(datap, &((uint8_t *)ctx->cbc_remainder) 294 [ctx->cbc_remainder_len], need); 295 296 blockp = (uint8_t *)ctx->cbc_remainder; 297 } else { 298 blockp = datap; 299 } 300 301 /* LINTED: pointer alignment */ 302 copy_block(blockp, (uint8_t *)OTHER((uint64_t *)lastp, ctx)); 303 304 if (out != NULL) { 305 decrypt(ctx->cbc_keysched, blockp, 306 (uint8_t *)ctx->cbc_remainder); 307 blockp = (uint8_t *)ctx->cbc_remainder; 308 } else { 309 decrypt(ctx->cbc_keysched, blockp, blockp); 310 } 311 312 /* 313 * XOR the previous cipher block or IV with the 314 * currently decrypted block. 315 */ 316 xor_block(lastp, blockp); 317 318 /* LINTED: pointer alignment */ 319 lastp = (uint8_t *)OTHER((uint64_t *)lastp, ctx); 320 321 if (out != NULL) { 322 crypto_get_ptrs(out, &iov_or_mp, &offset, &out_data_1, 323 &out_data_1_len, &out_data_2, block_size); 324 325 bcopy(blockp, out_data_1, out_data_1_len); 326 if (out_data_2 != NULL) { 327 bcopy(blockp + out_data_1_len, out_data_2, 328 block_size - out_data_1_len); 329 } 330 331 /* update offset */ 332 out->cd_offset += block_size; 333 334 } else if (ctx->cbc_remainder_len > 0) { 335 /* copy temporary block to where it belongs */ 336 bcopy(blockp, ctx->cbc_copy_to, ctx->cbc_remainder_len); 337 bcopy(blockp + ctx->cbc_remainder_len, datap, need); 338 } 339 340 /* Update pointer to next block of data to be processed. */ 341 if (ctx->cbc_remainder_len != 0) { 342 datap += need; 343 ctx->cbc_remainder_len = 0; 344 } else { 345 datap += block_size; 346 } 347 348 remainder = (size_t)&data[length] - (size_t)datap; 349 350 /* Incomplete last block. */ 351 if (remainder > 0 && remainder < block_size) { 352 bcopy(datap, ctx->cbc_remainder, remainder); 353 ctx->cbc_remainder_len = remainder; 354 ctx->cbc_lastp = lastp; 355 ctx->cbc_copy_to = datap; 356 return (CRYPTO_SUCCESS); 357 } 358 ctx->cbc_copy_to = NULL; 359 360 } while (remainder > 0); 361 362 ctx->cbc_lastp = lastp; 363 return (CRYPTO_SUCCESS); 364 } 365 366 int 367 cbc_init_ctx(cbc_ctx_t *cbc_ctx, char *param, size_t param_len, 368 size_t block_size, void (*copy_block)(const uint8_t *, uint64_t *)) 369 { 370 /* 371 * Copy IV into context. 372 * 373 * If cm_param == NULL then the IV comes from the 374 * cd_miscdata field in the crypto_data structure. 375 */ 376 if (param != NULL) { 377 #ifdef _KERNEL 378 ASSERT(param_len == block_size); 379 #else 380 assert(param_len == block_size); 381 #endif 382 copy_block((uchar_t *)param, cbc_ctx->cbc_iv); 383 } 384 385 cbc_ctx->cbc_lastp = (uint8_t *)&cbc_ctx->cbc_iv[0]; 386 cbc_ctx->cbc_flags |= CBC_MODE; 387 return (CRYPTO_SUCCESS); 388 } 389 390 /* ARGSUSED */ 391 void * 392 cbc_alloc_ctx(int kmflag) 393 { 394 cbc_ctx_t *cbc_ctx; 395 396 #ifdef _KERNEL 397 if ((cbc_ctx = kmem_zalloc(sizeof (cbc_ctx_t), kmflag)) == NULL) 398 #else 399 if ((cbc_ctx = calloc(1, sizeof (cbc_ctx_t))) == NULL) 400 #endif 401 return (NULL); 402 403 cbc_ctx->cbc_flags = CBC_MODE; 404 return (cbc_ctx); 405 }