Print this page
6640 dca gets the instance number a lot, never actually uses it
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/crypto/io/dca.c
+++ new/usr/src/uts/common/crypto/io/dca.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27
28 28 /*
29 29 * Deimos - cryptographic acceleration based upon Broadcom 582x.
30 30 */
31 31
32 32 #include <sys/types.h>
33 33 #include <sys/modctl.h>
34 34 #include <sys/conf.h>
35 35 #include <sys/devops.h>
36 36 #include <sys/ddi.h>
37 37 #include <sys/sunddi.h>
38 38 #include <sys/cmn_err.h>
39 39 #include <sys/varargs.h>
40 40 #include <sys/file.h>
41 41 #include <sys/stat.h>
42 42 #include <sys/kmem.h>
43 43 #include <sys/ioccom.h>
44 44 #include <sys/open.h>
45 45 #include <sys/cred.h>
46 46 #include <sys/kstat.h>
47 47 #include <sys/strsun.h>
48 48 #include <sys/note.h>
49 49 #include <sys/crypto/common.h>
50 50 #include <sys/crypto/spi.h>
51 51 #include <sys/ddifm.h>
52 52 #include <sys/fm/protocol.h>
53 53 #include <sys/fm/util.h>
54 54 #include <sys/fm/io/ddi.h>
55 55 #include <sys/crypto/dca.h>
56 56
57 57 /*
58 58 * Core Deimos driver.
59 59 */
60 60
61 61 static void dca_enlist2(dca_listnode_t *, dca_listnode_t *,
62 62 kmutex_t *);
63 63 static void dca_rmlist2(dca_listnode_t *node, kmutex_t *);
64 64 static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *);
65 65 static void dca_free_context_list(dca_t *dca);
66 66 static int dca_free_context_low(crypto_ctx_t *ctx);
67 67 static int dca_attach(dev_info_t *, ddi_attach_cmd_t);
68 68 static int dca_detach(dev_info_t *, ddi_detach_cmd_t);
69 69 static int dca_suspend(dca_t *);
70 70 static int dca_resume(dca_t *);
71 71 static int dca_init(dca_t *);
72 72 static int dca_reset(dca_t *, int);
73 73 static int dca_initworklist(dca_t *, dca_worklist_t *);
74 74 static void dca_uninit(dca_t *);
75 75 static void dca_initq(dca_listnode_t *);
76 76 static void dca_enqueue(dca_listnode_t *, dca_listnode_t *);
77 77 static dca_listnode_t *dca_dequeue(dca_listnode_t *);
78 78 static dca_listnode_t *dca_unqueue(dca_listnode_t *);
79 79 static dca_request_t *dca_newreq(dca_t *);
80 80 static dca_work_t *dca_getwork(dca_t *, int);
81 81 static void dca_freework(dca_work_t *);
82 82 static dca_work_t *dca_newwork(dca_t *);
83 83 static void dca_destroywork(dca_work_t *);
84 84 static void dca_schedule(dca_t *, int);
85 85 static void dca_reclaim(dca_t *, int);
86 86 static uint_t dca_intr(char *);
87 87 static void dca_failure(dca_t *, ddi_fault_location_t,
88 88 dca_fma_eclass_t index, uint64_t, int, char *, ...);
89 89 static void dca_jobtimeout(void *);
90 90 static int dca_drain(dca_t *);
91 91 static void dca_undrain(dca_t *);
92 92 static void dca_rejectjobs(dca_t *);
93 93
94 94 #ifdef SCHEDDELAY
95 95 static void dca_schedtimeout(void *);
96 96 #endif
97 97
98 98 /*
99 99 * We want these inlined for performance.
100 100 */
101 101 #ifndef DEBUG
102 102 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork)
103 103 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done)
104 104 #pragma inline(dca_reverse, dca_length)
105 105 #endif
106 106
107 107 /*
108 108 * Device operations.
109 109 */
110 110 static struct dev_ops devops = {
111 111 DEVO_REV, /* devo_rev */
112 112 0, /* devo_refcnt */
113 113 nodev, /* devo_getinfo */
114 114 nulldev, /* devo_identify */
115 115 nulldev, /* devo_probe */
116 116 dca_attach, /* devo_attach */
117 117 dca_detach, /* devo_detach */
118 118 nodev, /* devo_reset */
119 119 NULL, /* devo_cb_ops */
120 120 NULL, /* devo_bus_ops */
121 121 ddi_power, /* devo_power */
122 122 ddi_quiesce_not_supported, /* devo_quiesce */
123 123 };
124 124
125 125 #define IDENT "PCI Crypto Accelerator"
126 126 #define IDENT_SYM "Crypto Accel Sym 2.0"
127 127 #define IDENT_ASYM "Crypto Accel Asym 2.0"
128 128
129 129 /* Space-padded, will be filled in dynamically during registration */
130 130 #define IDENT3 "PCI Crypto Accelerator Mod 2.0"
131 131
132 132 #define VENDOR "Sun Microsystems, Inc."
133 133
134 134 #define STALETIME (30 * SECOND)
135 135
136 136 #define crypto_prov_notify crypto_provider_notification
137 137 /* A 28 char function name doesn't leave much line space */
138 138
139 139 /*
140 140 * Module linkage.
141 141 */
142 142 static struct modldrv modldrv = {
143 143 &mod_driverops, /* drv_modops */
144 144 IDENT, /* drv_linkinfo */
145 145 &devops, /* drv_dev_ops */
146 146 };
147 147
148 148 extern struct mod_ops mod_cryptoops;
149 149
150 150 static struct modlcrypto modlcrypto = {
151 151 &mod_cryptoops,
152 152 IDENT3
153 153 };
154 154
155 155 static struct modlinkage modlinkage = {
156 156 MODREV_1, /* ml_rev */
157 157 &modldrv, /* ml_linkage */
158 158 &modlcrypto,
159 159 NULL
160 160 };
161 161
162 162 /*
163 163 * CSPI information (entry points, provider info, etc.)
164 164 */
165 165
166 166 /* Mechanisms for the symmetric cipher provider */
167 167 static crypto_mech_info_t dca_mech_info_tab1[] = {
168 168 /* DES-CBC */
169 169 {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE,
170 170 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
171 171 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
172 172 DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
173 173 /* 3DES-CBC */
174 174 {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE,
175 175 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
176 176 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
177 177 DES3_MIN_KEY_LEN, DES3_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
178 178 };
179 179
180 180 /* Mechanisms for the asymmetric cipher provider */
181 181 static crypto_mech_info_t dca_mech_info_tab2[] = {
182 182 /* DSA */
183 183 {SUN_CKM_DSA, DSA_MECH_INFO_TYPE,
184 184 CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY |
185 185 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC,
186 186 CRYPTO_BYTES2BITS(DSA_MIN_KEY_LEN),
187 187 CRYPTO_BYTES2BITS(DSA_MAX_KEY_LEN),
188 188 CRYPTO_KEYSIZE_UNIT_IN_BITS},
189 189
190 190 /* RSA */
191 191 {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE,
192 192 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
193 193 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
194 194 CRYPTO_FG_VERIFY_RECOVER |
195 195 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
196 196 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
197 197 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
198 198 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
199 199 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
200 200 CRYPTO_KEYSIZE_UNIT_IN_BITS},
201 201 {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE,
202 202 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
203 203 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
204 204 CRYPTO_FG_VERIFY_RECOVER |
205 205 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
206 206 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
207 207 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
208 208 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
209 209 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
210 210 CRYPTO_KEYSIZE_UNIT_IN_BITS}
211 211 };
212 212
213 213 static void dca_provider_status(crypto_provider_handle_t, uint_t *);
214 214
215 215 static crypto_control_ops_t dca_control_ops = {
216 216 dca_provider_status
217 217 };
218 218
219 219 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
220 220 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
221 221 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
222 222 crypto_req_handle_t);
223 223 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *,
224 224 crypto_data_t *, crypto_req_handle_t);
225 225 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *,
226 226 crypto_req_handle_t);
227 227 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
228 228 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
229 229 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
230 230
231 231 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
232 232 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
233 233 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
234 234 crypto_req_handle_t);
235 235 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *,
236 236 crypto_data_t *, crypto_req_handle_t);
237 237 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *,
238 238 crypto_req_handle_t);
239 239 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
240 240 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
241 241 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
242 242
243 243 static crypto_cipher_ops_t dca_cipher_ops = {
244 244 dca_encrypt_init,
245 245 dca_encrypt,
246 246 dca_encrypt_update,
247 247 dca_encrypt_final,
248 248 dca_encrypt_atomic,
249 249 dca_decrypt_init,
250 250 dca_decrypt,
251 251 dca_decrypt_update,
252 252 dca_decrypt_final,
253 253 dca_decrypt_atomic
254 254 };
255 255
256 256 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
257 257 crypto_spi_ctx_template_t, crypto_req_handle_t);
258 258 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
259 259 crypto_req_handle_t);
260 260 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *,
261 261 crypto_req_handle_t);
262 262 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *,
263 263 crypto_req_handle_t);
264 264 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t,
265 265 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
266 266 crypto_spi_ctx_template_t, crypto_req_handle_t);
267 267 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
268 268 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
269 269 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
270 270 crypto_req_handle_t);
271 271 static int dca_sign_recover_atomic(crypto_provider_handle_t,
272 272 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
273 273 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
274 274
275 275 static crypto_sign_ops_t dca_sign_ops = {
276 276 dca_sign_init,
277 277 dca_sign,
278 278 dca_sign_update,
279 279 dca_sign_final,
280 280 dca_sign_atomic,
281 281 dca_sign_recover_init,
282 282 dca_sign_recover,
283 283 dca_sign_recover_atomic
284 284 };
285 285
286 286 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *,
287 287 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
288 288 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
289 289 crypto_req_handle_t);
290 290 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *,
291 291 crypto_req_handle_t);
292 292 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *,
293 293 crypto_req_handle_t);
294 294 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
295 295 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
296 296 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
297 297 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
298 298 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
299 299 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *,
300 300 crypto_data_t *, crypto_req_handle_t);
301 301 static int dca_verify_recover_atomic(crypto_provider_handle_t,
302 302 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
303 303 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
304 304
305 305 static crypto_verify_ops_t dca_verify_ops = {
306 306 dca_verify_init,
307 307 dca_verify,
308 308 dca_verify_update,
309 309 dca_verify_final,
310 310 dca_verify_atomic,
311 311 dca_verify_recover_init,
312 312 dca_verify_recover,
313 313 dca_verify_recover_atomic
314 314 };
315 315
316 316 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t,
317 317 uchar_t *, size_t, crypto_req_handle_t);
318 318
319 319 static crypto_random_number_ops_t dca_random_number_ops = {
320 320 NULL,
321 321 dca_generate_random
322 322 };
323 323
324 324 static int ext_info_sym(crypto_provider_handle_t prov,
325 325 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
326 326 static int ext_info_asym(crypto_provider_handle_t prov,
327 327 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
328 328 static int ext_info_base(crypto_provider_handle_t prov,
329 329 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id);
330 330
331 331 static crypto_provider_management_ops_t dca_provmanage_ops_1 = {
332 332 ext_info_sym, /* ext_info */
333 333 NULL, /* init_token */
334 334 NULL, /* init_pin */
335 335 NULL /* set_pin */
336 336 };
337 337
338 338 static crypto_provider_management_ops_t dca_provmanage_ops_2 = {
339 339 ext_info_asym, /* ext_info */
340 340 NULL, /* init_token */
341 341 NULL, /* init_pin */
342 342 NULL /* set_pin */
343 343 };
344 344
345 345 int dca_free_context(crypto_ctx_t *);
346 346
347 347 static crypto_ctx_ops_t dca_ctx_ops = {
348 348 NULL,
349 349 dca_free_context
350 350 };
351 351
352 352 /* Operations for the symmetric cipher provider */
353 353 static crypto_ops_t dca_crypto_ops1 = {
354 354 &dca_control_ops,
355 355 NULL, /* digest_ops */
356 356 &dca_cipher_ops,
357 357 NULL, /* mac_ops */
358 358 NULL, /* sign_ops */
359 359 NULL, /* verify_ops */
360 360 NULL, /* dual_ops */
361 361 NULL, /* cipher_mac_ops */
362 362 NULL, /* random_number_ops */
363 363 NULL, /* session_ops */
364 364 NULL, /* object_ops */
365 365 NULL, /* key_ops */
366 366 &dca_provmanage_ops_1, /* management_ops */
367 367 &dca_ctx_ops
368 368 };
369 369
370 370 /* Operations for the asymmetric cipher provider */
371 371 static crypto_ops_t dca_crypto_ops2 = {
372 372 &dca_control_ops,
373 373 NULL, /* digest_ops */
374 374 &dca_cipher_ops,
375 375 NULL, /* mac_ops */
376 376 &dca_sign_ops,
377 377 &dca_verify_ops,
378 378 NULL, /* dual_ops */
379 379 NULL, /* cipher_mac_ops */
380 380 &dca_random_number_ops,
381 381 NULL, /* session_ops */
382 382 NULL, /* object_ops */
383 383 NULL, /* key_ops */
384 384 &dca_provmanage_ops_2, /* management_ops */
385 385 &dca_ctx_ops
386 386 };
387 387
388 388 /* Provider information for the symmetric cipher provider */
389 389 static crypto_provider_info_t dca_prov_info1 = {
390 390 CRYPTO_SPI_VERSION_1,
391 391 NULL, /* pi_provider_description */
392 392 CRYPTO_HW_PROVIDER,
393 393 NULL, /* pi_provider_dev */
394 394 NULL, /* pi_provider_handle */
395 395 &dca_crypto_ops1,
396 396 sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t),
397 397 dca_mech_info_tab1,
398 398 0, /* pi_logical_provider_count */
399 399 NULL /* pi_logical_providers */
400 400 };
401 401
402 402 /* Provider information for the asymmetric cipher provider */
403 403 static crypto_provider_info_t dca_prov_info2 = {
404 404 CRYPTO_SPI_VERSION_1,
405 405 NULL, /* pi_provider_description */
406 406 CRYPTO_HW_PROVIDER,
↓ open down ↓ |
406 lines elided |
↑ open up ↑ |
407 407 NULL, /* pi_provider_dev */
408 408 NULL, /* pi_provider_handle */
409 409 &dca_crypto_ops2,
410 410 sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t),
411 411 dca_mech_info_tab2,
412 412 0, /* pi_logical_provider_count */
413 413 NULL /* pi_logical_providers */
414 414 };
415 415
416 416 /* Convenience macros */
417 -/* Retrieve the softc and instance number from a SPI crypto context */
418 -#define DCA_SOFTC_FROM_CTX(ctx, softc, instance) { \
419 - (softc) = (dca_t *)(ctx)->cc_provider; \
420 - (instance) = ddi_get_instance((softc)->dca_dip); \
421 -}
422 -
417 +#define DCA_SOFTC_FROM_CTX(ctx) ((dca_t *)(ctx)->cc_provider)
423 418 #define DCA_MECH_FROM_CTX(ctx) \
424 419 (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type)
425 420
426 421 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
427 422 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
428 423 dca_chain_t *head, int *n_chain);
429 424 static uint64_t dca_ena(uint64_t ena);
430 425 static caddr_t dca_bufdaddr_out(crypto_data_t *data);
431 426 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index);
432 427 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
433 428 dca_fma_eclass_t eclass_index);
434 429
435 430 static void dca_fma_init(dca_t *dca);
436 431 static void dca_fma_fini(dca_t *dca);
437 432 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
438 433 const void *impl_data);
439 434
440 435
441 436 static dca_device_t dca_devices[] = {
442 437 /* Broadcom vanilla variants */
443 438 { 0x14e4, 0x5820, "Broadcom 5820" },
444 439 { 0x14e4, 0x5821, "Broadcom 5821" },
445 440 { 0x14e4, 0x5822, "Broadcom 5822" },
446 441 { 0x14e4, 0x5825, "Broadcom 5825" },
447 442 /* Sun specific OEMd variants */
448 443 { 0x108e, 0x5454, "SCA" },
449 444 { 0x108e, 0x5455, "SCA 1000" },
450 445 { 0x108e, 0x5457, "SCA 500" },
451 446 /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */
452 447 { 0x108e, 0x1, "SCA 500" },
453 448 };
454 449
455 450 /*
456 451 * Device attributes.
457 452 */
458 453 static struct ddi_device_acc_attr dca_regsattr = {
459 454 DDI_DEVICE_ATTR_V1,
460 455 DDI_STRUCTURE_LE_ACC,
461 456 DDI_STRICTORDER_ACC,
462 457 DDI_FLAGERR_ACC
463 458 };
464 459
465 460 static struct ddi_device_acc_attr dca_devattr = {
466 461 DDI_DEVICE_ATTR_V0,
467 462 DDI_STRUCTURE_LE_ACC,
468 463 DDI_STRICTORDER_ACC
469 464 };
470 465
471 466 #if !defined(i386) && !defined(__i386)
472 467 static struct ddi_device_acc_attr dca_bufattr = {
473 468 DDI_DEVICE_ATTR_V0,
474 469 DDI_NEVERSWAP_ACC,
475 470 DDI_STRICTORDER_ACC
476 471 };
477 472 #endif
478 473
479 474 static struct ddi_dma_attr dca_dmaattr = {
480 475 DMA_ATTR_V0, /* dma_attr_version */
481 476 0x0, /* dma_attr_addr_lo */
482 477 0xffffffffUL, /* dma_attr_addr_hi */
483 478 0x00ffffffUL, /* dma_attr_count_max */
484 479 0x40, /* dma_attr_align */
485 480 0x40, /* dma_attr_burstsizes */
486 481 0x1, /* dma_attr_minxfer */
487 482 0x00ffffffUL, /* dma_attr_maxxfer */
488 483 0xffffffffUL, /* dma_attr_seg */
489 484 #if defined(i386) || defined(__i386) || defined(__amd64)
490 485 512, /* dma_attr_sgllen */
491 486 #else
492 487 1, /* dma_attr_sgllen */
493 488 #endif
494 489 1, /* dma_attr_granular */
495 490 DDI_DMA_FLAGERR /* dma_attr_flags */
496 491 };
497 492
498 493 static void *dca_state = NULL;
499 494 int dca_mindma = 2500;
500 495
501 496 /*
502 497 * FMA eclass string definitions. Note that these string arrays must be
503 498 * consistent with the dca_fma_eclass_t enum.
504 499 */
505 500 static char *dca_fma_eclass_sca1000[] = {
506 501 "sca1000.hw.device",
507 502 "sca1000.hw.timeout",
508 503 "sca1000.none"
509 504 };
510 505
511 506 static char *dca_fma_eclass_sca500[] = {
512 507 "sca500.hw.device",
513 508 "sca500.hw.timeout",
514 509 "sca500.none"
515 510 };
516 511
517 512 /*
518 513 * DDI entry points.
519 514 */
520 515 int
521 516 _init(void)
522 517 {
523 518 int rv;
524 519
525 520 DBG(NULL, DMOD, "dca: in _init");
526 521
527 522 if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) {
528 523 /* this should *never* happen! */
529 524 return (rv);
530 525 }
531 526
532 527 if ((rv = mod_install(&modlinkage)) != 0) {
533 528 /* cleanup here */
534 529 ddi_soft_state_fini(&dca_state);
535 530 return (rv);
536 531 }
537 532
538 533 return (0);
539 534 }
540 535
541 536 int
542 537 _fini(void)
543 538 {
544 539 int rv;
545 540
546 541 DBG(NULL, DMOD, "dca: in _fini");
547 542
548 543 if ((rv = mod_remove(&modlinkage)) == 0) {
549 544 /* cleanup here */
550 545 ddi_soft_state_fini(&dca_state);
551 546 }
552 547 return (rv);
553 548 }
554 549
555 550 int
556 551 _info(struct modinfo *modinfop)
557 552 {
558 553 DBG(NULL, DMOD, "dca: in _info");
559 554
560 555 return (mod_info(&modlinkage, modinfop));
561 556 }
562 557
563 558 int
564 559 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
565 560 {
566 561 ddi_acc_handle_t pci;
567 562 int instance;
568 563 ddi_iblock_cookie_t ibc;
569 564 int intr_added = 0;
570 565 dca_t *dca;
571 566 ushort_t venid;
572 567 ushort_t devid;
573 568 ushort_t revid;
574 569 ushort_t subsysid;
575 570 ushort_t subvenid;
576 571 int i;
577 572 int ret;
578 573 char ID[64];
579 574 static char *unknowndev = "Unknown device";
580 575
581 576 #if DEBUG
582 577 /* these are only used for debugging */
583 578 ushort_t pcicomm;
584 579 ushort_t pcistat;
585 580 uchar_t cachelinesz;
586 581 uchar_t mingnt;
587 582 uchar_t maxlat;
588 583 uchar_t lattmr;
589 584 #endif
590 585
591 586 instance = ddi_get_instance(dip);
592 587
593 588 DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance);
594 589
595 590 switch (cmd) {
596 591 case DDI_RESUME:
597 592 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
598 593 dca_diperror(dip, "no soft state in detach");
599 594 return (DDI_FAILURE);
600 595 }
601 596 /* assumption: we won't be DDI_DETACHed until we return */
602 597 return (dca_resume(dca));
603 598 case DDI_ATTACH:
604 599 break;
605 600 default:
606 601 return (DDI_FAILURE);
607 602 }
608 603
609 604 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
610 605 dca_diperror(dip, "slot does not support PCI bus-master");
611 606 return (DDI_FAILURE);
612 607 }
613 608
614 609 if (ddi_intr_hilevel(dip, 0) != 0) {
615 610 dca_diperror(dip, "hilevel interrupts not supported");
616 611 return (DDI_FAILURE);
617 612 }
618 613
619 614 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
620 615 dca_diperror(dip, "unable to setup PCI config handle");
621 616 return (DDI_FAILURE);
622 617 }
623 618
624 619 /* common PCI attributes */
625 620 venid = pci_config_get16(pci, PCI_VENID);
626 621 devid = pci_config_get16(pci, PCI_DEVID);
627 622 revid = pci_config_get8(pci, PCI_REVID);
628 623 subvenid = pci_config_get16(pci, PCI_SUBVENID);
629 624 subsysid = pci_config_get16(pci, PCI_SUBSYSID);
630 625
631 626 /*
632 627 * Broadcom-specific timings.
633 628 * We disable these timers/counters since they can cause
634 629 * incorrect false failures when the bus is just a little
635 630 * bit slow, or busy.
636 631 */
637 632 pci_config_put8(pci, PCI_TRDYTO, 0);
638 633 pci_config_put8(pci, PCI_RETRIES, 0);
639 634
640 635 /* initialize PCI access settings */
641 636 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
642 637 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
643 638
644 639 /* set up our PCI latency timer */
645 640 pci_config_put8(pci, PCI_LATTMR, 0x40);
646 641
647 642 #if DEBUG
648 643 /* read registers (for debugging) */
649 644 pcicomm = pci_config_get16(pci, PCI_COMM);
650 645 pcistat = pci_config_get16(pci, PCI_STATUS);
651 646 cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ);
652 647 mingnt = pci_config_get8(pci, PCI_MINGNT);
653 648 maxlat = pci_config_get8(pci, PCI_MAXLAT);
654 649 lattmr = pci_config_get8(pci, PCI_LATTMR);
655 650 #endif
656 651
657 652 pci_config_teardown(&pci);
658 653
659 654 if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) {
660 655 dca_diperror(dip, "unable to get iblock cookie");
661 656 return (DDI_FAILURE);
662 657 }
663 658
664 659 if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) {
665 660 dca_diperror(dip, "unable to allocate soft state");
666 661 return (DDI_FAILURE);
667 662 }
668 663
669 664 dca = ddi_get_soft_state(dca_state, instance);
670 665 ASSERT(dca != NULL);
671 666 dca->dca_dip = dip;
672 667 WORKLIST(dca, MCR1)->dwl_prov = NULL;
673 668 WORKLIST(dca, MCR2)->dwl_prov = NULL;
674 669 /* figure pagesize */
675 670 dca->dca_pagesize = ddi_ptob(dip, 1);
676 671
677 672 /*
678 673 * Search for the device in our supported devices table. This
679 674 * is here for two reasons. First, we want to ensure that
680 675 * only Sun-qualified (and presumably Sun-labeled) devices can
681 676 * be used with this driver. Second, some devices have
682 677 * specific differences. E.g. the 5821 has support for a
683 678 * special mode of RC4, deeper queues, power management, and
684 679 * other changes. Also, the export versions of some of these
685 680 * chips don't support RC4 or 3DES, so we catch that here.
686 681 *
687 682 * Note that we only look at the upper nibble of the device
688 683 * id, which is used to distinguish export vs. domestic
689 684 * versions of the chip. (The lower nibble is used for
690 685 * stepping information.)
691 686 */
692 687 for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) {
693 688 /*
694 689 * Try to match the subsystem information first.
695 690 */
696 691 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) &&
697 692 subsysid && (subsysid == dca_devices[i].dd_device_id)) {
698 693 dca->dca_model = dca_devices[i].dd_model;
699 694 dca->dca_devid = dca_devices[i].dd_device_id;
700 695 break;
701 696 }
702 697 /*
703 698 * Failing that, try the generic vendor and device id.
704 699 * Even if we find a match, we keep searching anyway,
705 700 * since we would prefer to find a match based on the
706 701 * subsystem ids.
707 702 */
708 703 if ((venid == dca_devices[i].dd_vendor_id) &&
709 704 (devid == dca_devices[i].dd_device_id)) {
710 705 dca->dca_model = dca_devices[i].dd_model;
711 706 dca->dca_devid = dca_devices[i].dd_device_id;
712 707 }
713 708 }
714 709 /* try and handle an unrecognized device */
715 710 if (dca->dca_model == NULL) {
716 711 dca->dca_model = unknowndev;
717 712 dca_error(dca, "device not recognized, not supported");
718 713 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d",
719 714 i, venid, devid, revid);
720 715 }
721 716
722 717 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description",
723 718 dca->dca_model) != DDI_SUCCESS) {
724 719 dca_error(dca, "unable to create description property");
725 720 return (DDI_FAILURE);
726 721 }
727 722
728 723 DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x",
729 724 pcicomm, pcistat, cachelinesz);
730 725 DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x",
731 726 mingnt, maxlat, lattmr);
732 727
733 728 /*
734 729 * initialize locks, etc.
735 730 */
736 731 (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc);
737 732
738 733 /* use RNGSHA1 by default */
739 734 if (ddi_getprop(DDI_DEV_T_ANY, dip,
740 735 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) {
741 736 dca->dca_flags |= DCA_RNGSHA1;
742 737 }
743 738
744 739 /* initialize FMA */
745 740 dca_fma_init(dca);
746 741
747 742 /* initialize some key data structures */
748 743 if (dca_init(dca) != DDI_SUCCESS) {
749 744 goto failed;
750 745 }
751 746
752 747 /* initialize kstats */
753 748 dca_ksinit(dca);
754 749
755 750 /* setup access to registers */
756 751 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs,
757 752 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) {
758 753 dca_error(dca, "unable to map registers");
759 754 goto failed;
760 755 }
761 756
762 757 DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1));
763 758 DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL));
764 759 DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT));
765 760 DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA));
766 761 DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2));
767 762
768 763 /* reset the chip */
769 764 if (dca_reset(dca, 0) < 0) {
770 765 goto failed;
771 766 }
772 767
773 768 /* initialize the chip */
774 769 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
775 770 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
776 771 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
777 772 goto failed;
778 773 }
779 774
780 775 /* add the interrupt */
781 776 if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr,
782 777 (void *)dca) != DDI_SUCCESS) {
783 778 DBG(dca, DWARN, "ddi_add_intr failed");
784 779 goto failed;
785 780 } else {
786 781 intr_added = 1;
787 782 }
788 783
789 784 /* enable interrupts on the device */
790 785 /*
791 786 * XXX: Note, 5820A1 errata indicates that this may clobber
792 787 * bits 24 and 23, which affect the speed of the RNG. Since
793 788 * we always want to run in full-speed mode, this should be
794 789 * harmless.
795 790 */
796 791 if (dca->dca_devid == 0x5825) {
797 792 /* for 5825 - increase the DMA read size */
798 793 SETBIT(dca, CSR_DMACTL,
799 794 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
800 795 } else {
801 796 SETBIT(dca, CSR_DMACTL,
802 797 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
803 798 }
804 799 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
805 800 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
806 801 goto failed;
807 802 }
808 803
809 804 /* register MCR1 with the crypto framework */
810 805 /* Be careful not to exceed 32 chars */
811 806 (void) sprintf(ID, "%s/%d %s",
812 807 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM);
813 808 dca_prov_info1.pi_provider_description = ID;
814 809 dca_prov_info1.pi_provider_dev.pd_hw = dip;
815 810 dca_prov_info1.pi_provider_handle = dca;
816 811 if ((ret = crypto_register_provider(&dca_prov_info1,
817 812 &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) {
818 813 cmn_err(CE_WARN,
819 814 "crypto_register_provider() failed (%d) for MCR1", ret);
820 815 goto failed;
821 816 }
822 817
823 818 /* register MCR2 with the crypto framework */
824 819 /* Be careful not to exceed 32 chars */
825 820 (void) sprintf(ID, "%s/%d %s",
826 821 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM);
827 822 dca_prov_info2.pi_provider_description = ID;
828 823 dca_prov_info2.pi_provider_dev.pd_hw = dip;
829 824 dca_prov_info2.pi_provider_handle = dca;
830 825 if ((ret = crypto_register_provider(&dca_prov_info2,
831 826 &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) {
832 827 cmn_err(CE_WARN,
833 828 "crypto_register_provider() failed (%d) for MCR2", ret);
834 829 goto failed;
835 830 }
836 831
837 832 crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov,
838 833 CRYPTO_PROVIDER_READY);
839 834 crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov,
840 835 CRYPTO_PROVIDER_READY);
841 836
842 837 /* Initialize the local random number pool for this instance */
843 838 if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) {
844 839 goto failed;
845 840 }
846 841
847 842 mutex_enter(&dca->dca_intrlock);
848 843 dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca,
849 844 drv_usectohz(SECOND));
850 845 mutex_exit(&dca->dca_intrlock);
851 846
852 847 ddi_set_driver_private(dip, (caddr_t)dca);
853 848
854 849 ddi_report_dev(dip);
855 850
856 851 if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) {
857 852 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED);
858 853 }
859 854
860 855 return (DDI_SUCCESS);
861 856
862 857 failed:
863 858 /* unregister from the crypto framework */
864 859 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
865 860 (void) crypto_unregister_provider(
866 861 WORKLIST(dca, MCR1)->dwl_prov);
867 862 }
868 863 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
869 864 (void) crypto_unregister_provider(
870 865 WORKLIST(dca, MCR2)->dwl_prov);
871 866 }
872 867 if (intr_added) {
873 868 CLRBIT(dca, CSR_DMACTL,
874 869 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
875 870 /* unregister intr handler */
876 871 ddi_remove_intr(dip, 0, dca->dca_icookie);
877 872 }
878 873 if (dca->dca_regs_handle) {
879 874 ddi_regs_map_free(&dca->dca_regs_handle);
880 875 }
881 876 if (dca->dca_intrstats) {
882 877 kstat_delete(dca->dca_intrstats);
883 878 }
884 879 if (dca->dca_ksp) {
885 880 kstat_delete(dca->dca_ksp);
886 881 }
887 882 dca_uninit(dca);
888 883
889 884 /* finalize FMA */
890 885 dca_fma_fini(dca);
891 886
892 887 mutex_destroy(&dca->dca_intrlock);
893 888 ddi_soft_state_free(dca_state, instance);
894 889 return (DDI_FAILURE);
895 890
896 891 }
897 892
898 893 int
899 894 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
900 895 {
901 896 int instance;
902 897 dca_t *dca;
903 898 timeout_id_t tid;
904 899
905 900 instance = ddi_get_instance(dip);
906 901
907 902 DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance);
908 903
909 904 switch (cmd) {
910 905 case DDI_SUSPEND:
911 906 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
912 907 dca_diperror(dip, "no soft state in detach");
913 908 return (DDI_FAILURE);
914 909 }
915 910 /* assumption: we won't be DDI_DETACHed until we return */
916 911 return (dca_suspend(dca));
917 912
918 913 case DDI_DETACH:
919 914 break;
920 915 default:
921 916 return (DDI_FAILURE);
922 917 }
923 918
924 919 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
925 920 dca_diperror(dip, "no soft state in detach");
926 921 return (DDI_FAILURE);
927 922 }
928 923
929 924 /*
930 925 * Unregister from kCF.
931 926 * This needs to be done at the beginning of detach.
932 927 */
933 928 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
934 929 if (crypto_unregister_provider(
935 930 WORKLIST(dca, MCR1)->dwl_prov) != CRYPTO_SUCCESS) {
936 931 dca_error(dca, "unable to unregister MCR1 from kcf");
937 932 return (DDI_FAILURE);
938 933 }
939 934 }
940 935
941 936 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
942 937 if (crypto_unregister_provider(
943 938 WORKLIST(dca, MCR2)->dwl_prov) != CRYPTO_SUCCESS) {
944 939 dca_error(dca, "unable to unregister MCR2 from kcf");
945 940 return (DDI_FAILURE);
946 941 }
947 942 }
948 943
949 944 /*
950 945 * Cleanup the private context list. Once the
951 946 * crypto_unregister_provider returns, it is safe to do so.
952 947 */
953 948 dca_free_context_list(dca);
954 949
955 950 /* Cleanup the local random number pool */
956 951 dca_random_fini(dca);
957 952
958 953 /* send any jobs in the waitq back to kCF */
959 954 dca_rejectjobs(dca);
960 955
961 956 /* untimeout the timeouts */
962 957 mutex_enter(&dca->dca_intrlock);
963 958 tid = dca->dca_jobtid;
964 959 dca->dca_jobtid = 0;
965 960 mutex_exit(&dca->dca_intrlock);
966 961 if (tid) {
967 962 (void) untimeout(tid);
968 963 }
969 964
970 965 /* disable device interrupts */
971 966 CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
972 967
973 968 /* unregister interrupt handlers */
974 969 ddi_remove_intr(dip, 0, dca->dca_icookie);
975 970
976 971 /* release our regs handle */
977 972 ddi_regs_map_free(&dca->dca_regs_handle);
978 973
979 974 /* toss out kstats */
980 975 if (dca->dca_intrstats) {
981 976 kstat_delete(dca->dca_intrstats);
982 977 }
983 978 if (dca->dca_ksp) {
984 979 kstat_delete(dca->dca_ksp);
985 980 }
986 981
987 982 mutex_destroy(&dca->dca_intrlock);
988 983 dca_uninit(dca);
989 984
990 985 /* finalize FMA */
991 986 dca_fma_fini(dca);
992 987
993 988 ddi_soft_state_free(dca_state, instance);
994 989
995 990 return (DDI_SUCCESS);
996 991 }
997 992
998 993 int
999 994 dca_resume(dca_t *dca)
1000 995 {
1001 996 ddi_acc_handle_t pci;
1002 997
1003 998 if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) {
1004 999 dca_error(dca, "unable to setup PCI config handle");
1005 1000 return (DDI_FAILURE);
1006 1001 }
1007 1002
1008 1003 /*
1009 1004 * Reprogram registers in PCI configuration space.
1010 1005 */
1011 1006
1012 1007 /* Broadcom-specific timers -- we disable them. */
1013 1008 pci_config_put8(pci, PCI_TRDYTO, 0);
1014 1009 pci_config_put8(pci, PCI_RETRIES, 0);
1015 1010
1016 1011 /* initialize PCI access settings */
1017 1012 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
1018 1013 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
1019 1014
1020 1015 /* set up our PCI latency timer */
1021 1016 pci_config_put8(pci, PCI_LATTMR, 0x40);
1022 1017
1023 1018 pci_config_teardown(&pci);
1024 1019
1025 1020 if (dca_reset(dca, 0) < 0) {
1026 1021 dca_error(dca, "unable to reset device during resume");
1027 1022 return (DDI_FAILURE);
1028 1023 }
1029 1024
1030 1025 /*
1031 1026 * Now restore the card-specific CSRs.
1032 1027 */
1033 1028
1034 1029 /* restore endianness settings */
1035 1030 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
1036 1031 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1037 1032 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1038 1033 return (DDI_FAILURE);
1039 1034
1040 1035 /* restore interrupt enables */
1041 1036 if (dca->dca_devid == 0x5825) {
1042 1037 /* for 5825 set 256 byte read size to improve performance */
1043 1038 SETBIT(dca, CSR_DMACTL,
1044 1039 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
1045 1040 } else {
1046 1041 SETBIT(dca, CSR_DMACTL,
1047 1042 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
1048 1043 }
1049 1044 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1050 1045 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1051 1046 return (DDI_FAILURE);
1052 1047
1053 1048 /* resume scheduling jobs on the device */
1054 1049 dca_undrain(dca);
1055 1050
1056 1051 return (DDI_SUCCESS);
1057 1052 }
1058 1053
1059 1054 int
1060 1055 dca_suspend(dca_t *dca)
1061 1056 {
1062 1057 if ((dca_drain(dca)) != 0) {
1063 1058 return (DDI_FAILURE);
1064 1059 }
1065 1060 if (dca_reset(dca, 0) < 0) {
1066 1061 dca_error(dca, "unable to reset device during suspend");
1067 1062 return (DDI_FAILURE);
1068 1063 }
1069 1064 return (DDI_SUCCESS);
1070 1065 }
1071 1066
1072 1067 /*
1073 1068 * Hardware access stuff.
1074 1069 */
1075 1070 int
1076 1071 dca_reset(dca_t *dca, int failreset)
1077 1072 {
1078 1073 int i;
1079 1074
1080 1075 if (dca->dca_regs_handle == NULL) {
1081 1076 return (-1);
1082 1077 }
1083 1078
1084 1079 PUTCSR(dca, CSR_DMACTL, DMACTL_RESET);
1085 1080 if (!failreset) {
1086 1081 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1087 1082 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1088 1083 return (-1);
1089 1084 }
1090 1085
1091 1086 /* now wait for a reset */
1092 1087 for (i = 1; i < 100; i++) {
1093 1088 uint32_t dmactl;
1094 1089 drv_usecwait(100);
1095 1090 dmactl = GETCSR(dca, CSR_DMACTL);
1096 1091 if (!failreset) {
1097 1092 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1098 1093 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1099 1094 return (-1);
1100 1095 }
1101 1096 if ((dmactl & DMACTL_RESET) == 0) {
1102 1097 DBG(dca, DCHATTY, "reset in %d usec", i * 100);
1103 1098 return (0);
1104 1099 }
1105 1100 }
1106 1101 if (!failreset) {
1107 1102 dca_failure(dca, DDI_DEVICE_FAULT,
1108 1103 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1109 1104 "timeout waiting for reset after %d usec", i * 100);
1110 1105 }
1111 1106 return (-1);
1112 1107 }
1113 1108
1114 1109 int
1115 1110 dca_initworklist(dca_t *dca, dca_worklist_t *wlp)
1116 1111 {
1117 1112 int i;
1118 1113 int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR);
1119 1114
1120 1115 /*
1121 1116 * Set up work queue.
1122 1117 */
1123 1118 mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1124 1119 mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER,
1125 1120 dca->dca_icookie);
1126 1121 mutex_init(&wlp->dwl_freelock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1127 1122 cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL);
1128 1123
1129 1124 mutex_enter(&wlp->dwl_lock);
1130 1125
1131 1126 dca_initq(&wlp->dwl_freereqs);
1132 1127 dca_initq(&wlp->dwl_waitq);
1133 1128 dca_initq(&wlp->dwl_freework);
1134 1129 dca_initq(&wlp->dwl_runq);
1135 1130
1136 1131 for (i = 0; i < MAXWORK; i++) {
1137 1132 dca_work_t *workp;
1138 1133
1139 1134 if ((workp = dca_newwork(dca)) == NULL) {
1140 1135 dca_error(dca, "unable to allocate work");
1141 1136 mutex_exit(&wlp->dwl_lock);
1142 1137 return (DDI_FAILURE);
1143 1138 }
1144 1139 workp->dw_wlp = wlp;
1145 1140 dca_freework(workp);
1146 1141 }
1147 1142 mutex_exit(&wlp->dwl_lock);
1148 1143
1149 1144 for (i = 0; i < reqprealloc; i++) {
1150 1145 dca_request_t *reqp;
1151 1146
1152 1147 if ((reqp = dca_newreq(dca)) == NULL) {
1153 1148 dca_error(dca, "unable to allocate request");
1154 1149 return (DDI_FAILURE);
1155 1150 }
1156 1151 reqp->dr_dca = dca;
1157 1152 reqp->dr_wlp = wlp;
1158 1153 dca_freereq(reqp);
1159 1154 }
1160 1155 return (DDI_SUCCESS);
1161 1156 }
1162 1157
1163 1158 int
1164 1159 dca_init(dca_t *dca)
1165 1160 {
1166 1161 dca_worklist_t *wlp;
1167 1162
1168 1163 /* Initialize the private context list and the corresponding lock. */
1169 1164 mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL);
1170 1165 dca_initq(&dca->dca_ctx_list);
1171 1166
1172 1167 /*
1173 1168 * MCR1 algorithms.
1174 1169 */
1175 1170 wlp = WORKLIST(dca, MCR1);
1176 1171 (void) sprintf(wlp->dwl_name, "dca%d:mcr1",
1177 1172 ddi_get_instance(dca->dca_dip));
1178 1173 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1179 1174 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1180 1175 "mcr1_lowater", MCR1LOWATER);
1181 1176 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1182 1177 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1183 1178 "mcr1_hiwater", MCR1HIWATER);
1184 1179 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1185 1180 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1186 1181 "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR);
1187 1182 wlp->dwl_dca = dca;
1188 1183 wlp->dwl_mcr = MCR1;
1189 1184 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1190 1185 return (DDI_FAILURE);
1191 1186 }
1192 1187
1193 1188 /*
1194 1189 * MCR2 algorithms.
1195 1190 */
1196 1191 wlp = WORKLIST(dca, MCR2);
1197 1192 (void) sprintf(wlp->dwl_name, "dca%d:mcr2",
1198 1193 ddi_get_instance(dca->dca_dip));
1199 1194 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1200 1195 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1201 1196 "mcr2_lowater", MCR2LOWATER);
1202 1197 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1203 1198 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1204 1199 "mcr2_hiwater", MCR2HIWATER);
1205 1200 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1206 1201 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1207 1202 "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR);
1208 1203 wlp->dwl_dca = dca;
1209 1204 wlp->dwl_mcr = MCR2;
1210 1205 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1211 1206 return (DDI_FAILURE);
1212 1207 }
1213 1208 return (DDI_SUCCESS);
1214 1209 }
1215 1210
1216 1211 /*
1217 1212 * Uninitialize worklists. This routine should only be called when no
1218 1213 * active jobs (hence DMA mappings) exist. One way to ensure this is
1219 1214 * to unregister from kCF before calling this routine. (This is done
1220 1215 * e.g. in detach(9e).)
1221 1216 */
1222 1217 void
1223 1218 dca_uninit(dca_t *dca)
1224 1219 {
1225 1220 int mcr;
1226 1221
1227 1222 mutex_destroy(&dca->dca_ctx_list_lock);
1228 1223
1229 1224 for (mcr = MCR1; mcr <= MCR2; mcr++) {
1230 1225 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1231 1226 dca_work_t *workp;
1232 1227 dca_request_t *reqp;
1233 1228
1234 1229 if (dca->dca_regs_handle == NULL) {
1235 1230 continue;
1236 1231 }
1237 1232
1238 1233 mutex_enter(&wlp->dwl_lock);
1239 1234 while ((workp = dca_getwork(dca, mcr)) != NULL) {
1240 1235 dca_destroywork(workp);
1241 1236 }
1242 1237 mutex_exit(&wlp->dwl_lock);
1243 1238 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) {
1244 1239 dca_destroyreq(reqp);
1245 1240 }
1246 1241
1247 1242 mutex_destroy(&wlp->dwl_lock);
1248 1243 mutex_destroy(&wlp->dwl_freereqslock);
1249 1244 mutex_destroy(&wlp->dwl_freelock);
1250 1245 cv_destroy(&wlp->dwl_cv);
1251 1246 wlp->dwl_prov = NULL;
1252 1247 }
1253 1248 }
1254 1249
1255 1250 static void
1256 1251 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock)
1257 1252 {
1258 1253 if (!q || !node)
1259 1254 return;
1260 1255
1261 1256 mutex_enter(lock);
1262 1257 node->dl_next2 = q;
1263 1258 node->dl_prev2 = q->dl_prev2;
1264 1259 node->dl_next2->dl_prev2 = node;
1265 1260 node->dl_prev2->dl_next2 = node;
1266 1261 mutex_exit(lock);
1267 1262 }
1268 1263
1269 1264 static void
1270 1265 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock)
1271 1266 {
1272 1267 if (!node)
1273 1268 return;
1274 1269
1275 1270 mutex_enter(lock);
1276 1271 node->dl_next2->dl_prev2 = node->dl_prev2;
1277 1272 node->dl_prev2->dl_next2 = node->dl_next2;
1278 1273 node->dl_next2 = NULL;
1279 1274 node->dl_prev2 = NULL;
1280 1275 mutex_exit(lock);
1281 1276 }
1282 1277
1283 1278 static dca_listnode_t *
1284 1279 dca_delist2(dca_listnode_t *q, kmutex_t *lock)
1285 1280 {
1286 1281 dca_listnode_t *node;
1287 1282
1288 1283 mutex_enter(lock);
1289 1284 if ((node = q->dl_next2) == q) {
1290 1285 mutex_exit(lock);
1291 1286 return (NULL);
1292 1287 }
1293 1288
1294 1289 node->dl_next2->dl_prev2 = node->dl_prev2;
1295 1290 node->dl_prev2->dl_next2 = node->dl_next2;
1296 1291 node->dl_next2 = NULL;
1297 1292 node->dl_prev2 = NULL;
1298 1293 mutex_exit(lock);
1299 1294
1300 1295 return (node);
1301 1296 }
1302 1297
1303 1298 void
1304 1299 dca_initq(dca_listnode_t *q)
1305 1300 {
1306 1301 q->dl_next = q;
1307 1302 q->dl_prev = q;
1308 1303 q->dl_next2 = q;
1309 1304 q->dl_prev2 = q;
1310 1305 }
1311 1306
1312 1307 void
1313 1308 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node)
1314 1309 {
1315 1310 /*
1316 1311 * Enqueue submits at the "tail" of the list, i.e. just
1317 1312 * behind the sentinel.
1318 1313 */
1319 1314 node->dl_next = q;
1320 1315 node->dl_prev = q->dl_prev;
1321 1316 node->dl_next->dl_prev = node;
1322 1317 node->dl_prev->dl_next = node;
1323 1318 }
1324 1319
1325 1320 void
1326 1321 dca_rmqueue(dca_listnode_t *node)
1327 1322 {
1328 1323 node->dl_next->dl_prev = node->dl_prev;
1329 1324 node->dl_prev->dl_next = node->dl_next;
1330 1325 node->dl_next = NULL;
1331 1326 node->dl_prev = NULL;
1332 1327 }
1333 1328
1334 1329 dca_listnode_t *
1335 1330 dca_dequeue(dca_listnode_t *q)
1336 1331 {
1337 1332 dca_listnode_t *node;
1338 1333 /*
1339 1334 * Dequeue takes from the "head" of the list, i.e. just after
1340 1335 * the sentinel.
1341 1336 */
1342 1337 if ((node = q->dl_next) == q) {
1343 1338 /* queue is empty */
1344 1339 return (NULL);
1345 1340 }
1346 1341 dca_rmqueue(node);
1347 1342 return (node);
1348 1343 }
1349 1344
1350 1345 /* this is the opposite of dequeue, it takes things off in LIFO order */
1351 1346 dca_listnode_t *
1352 1347 dca_unqueue(dca_listnode_t *q)
1353 1348 {
1354 1349 dca_listnode_t *node;
1355 1350 /*
1356 1351 * unqueue takes from the "tail" of the list, i.e. just before
1357 1352 * the sentinel.
1358 1353 */
1359 1354 if ((node = q->dl_prev) == q) {
1360 1355 /* queue is empty */
1361 1356 return (NULL);
1362 1357 }
1363 1358 dca_rmqueue(node);
1364 1359 return (node);
1365 1360 }
1366 1361
1367 1362 dca_listnode_t *
1368 1363 dca_peekqueue(dca_listnode_t *q)
1369 1364 {
1370 1365 dca_listnode_t *node;
1371 1366
1372 1367 if ((node = q->dl_next) == q) {
1373 1368 return (NULL);
1374 1369 } else {
1375 1370 return (node);
1376 1371 }
1377 1372 }
1378 1373
1379 1374 /*
1380 1375 * Interrupt service routine.
1381 1376 */
1382 1377 uint_t
1383 1378 dca_intr(char *arg)
1384 1379 {
1385 1380 dca_t *dca = (dca_t *)arg;
1386 1381 uint32_t status;
1387 1382
1388 1383 mutex_enter(&dca->dca_intrlock);
1389 1384 status = GETCSR(dca, CSR_DMASTAT);
1390 1385 PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS);
1391 1386 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1392 1387 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
1393 1388 mutex_exit(&dca->dca_intrlock);
1394 1389 return ((uint_t)DDI_FAILURE);
1395 1390 }
1396 1391
1397 1392 DBG(dca, DINTR, "interrupted, status = 0x%x!", status);
1398 1393
1399 1394 if ((status & DMASTAT_INTERRUPTS) == 0) {
1400 1395 /* increment spurious interrupt kstat */
1401 1396 if (dca->dca_intrstats) {
1402 1397 KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++;
1403 1398 }
1404 1399 mutex_exit(&dca->dca_intrlock);
1405 1400 return (DDI_INTR_UNCLAIMED);
1406 1401 }
1407 1402
1408 1403 if (dca->dca_intrstats) {
1409 1404 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++;
1410 1405 }
1411 1406 if (status & DMASTAT_MCR1INT) {
1412 1407 DBG(dca, DINTR, "MCR1 interrupted");
1413 1408 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock));
1414 1409 dca_schedule(dca, MCR1);
1415 1410 dca_reclaim(dca, MCR1);
1416 1411 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock));
1417 1412 }
1418 1413
1419 1414 if (status & DMASTAT_MCR2INT) {
1420 1415 DBG(dca, DINTR, "MCR2 interrupted");
1421 1416 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock));
1422 1417 dca_schedule(dca, MCR2);
1423 1418 dca_reclaim(dca, MCR2);
1424 1419 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock));
1425 1420 }
1426 1421
1427 1422 if (status & DMASTAT_ERRINT) {
1428 1423 uint32_t erraddr;
1429 1424 erraddr = GETCSR(dca, CSR_DMAEA);
1430 1425 mutex_exit(&dca->dca_intrlock);
1431 1426
1432 1427 /*
1433 1428 * bit 1 of the error address indicates failure during
1434 1429 * read if set, during write otherwise.
1435 1430 */
1436 1431 dca_failure(dca, DDI_DEVICE_FAULT,
1437 1432 DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1438 1433 "DMA master access error %s address 0x%x",
1439 1434 erraddr & 0x1 ? "reading" : "writing", erraddr & ~1);
1440 1435 return (DDI_INTR_CLAIMED);
1441 1436 }
1442 1437
1443 1438 mutex_exit(&dca->dca_intrlock);
1444 1439
1445 1440 return (DDI_INTR_CLAIMED);
1446 1441 }
1447 1442
1448 1443 /*
1449 1444 * Reverse a string of bytes from s1 into s2. The reversal happens
1450 1445 * from the tail of s1. If len1 < len2, then null bytes will be
1451 1446 * padded to the end of s2. If len2 < len1, then (presumably null)
1452 1447 * bytes will be dropped from the start of s1.
1453 1448 *
1454 1449 * The rationale here is that when s1 (source) is shorter, then we
1455 1450 * are reversing from big-endian ordering, into device ordering, and
1456 1451 * want to add some extra nulls to the tail (MSB) side of the device.
1457 1452 *
1458 1453 * Similarly, when s2 (dest) is shorter, then we are truncating what
1459 1454 * are presumably null MSB bits from the device.
1460 1455 *
1461 1456 * There is an expectation when reversing from the device back into
1462 1457 * big-endian, that the number of bytes to reverse and the target size
1463 1458 * will match, and no truncation or padding occurs.
1464 1459 */
1465 1460 void
1466 1461 dca_reverse(void *s1, void *s2, int len1, int len2)
1467 1462 {
1468 1463 caddr_t src, dst;
1469 1464
1470 1465 if (len1 == 0) {
1471 1466 if (len2) {
1472 1467 bzero(s2, len2);
1473 1468 }
1474 1469 return;
1475 1470 }
1476 1471 src = (caddr_t)s1 + len1 - 1;
1477 1472 dst = s2;
1478 1473 while ((src >= (caddr_t)s1) && (len2)) {
1479 1474 *dst++ = *src--;
1480 1475 len2--;
1481 1476 }
1482 1477 while (len2 > 0) {
1483 1478 *dst++ = 0;
1484 1479 len2--;
1485 1480 }
1486 1481 }
1487 1482
1488 1483 uint16_t
1489 1484 dca_padfull(int num)
1490 1485 {
1491 1486 if (num <= 512) {
1492 1487 return (BITS2BYTES(512));
1493 1488 }
1494 1489 if (num <= 768) {
1495 1490 return (BITS2BYTES(768));
1496 1491 }
1497 1492 if (num <= 1024) {
1498 1493 return (BITS2BYTES(1024));
1499 1494 }
1500 1495 if (num <= 1536) {
1501 1496 return (BITS2BYTES(1536));
1502 1497 }
1503 1498 if (num <= 2048) {
1504 1499 return (BITS2BYTES(2048));
1505 1500 }
1506 1501 return (0);
1507 1502 }
1508 1503
1509 1504 uint16_t
1510 1505 dca_padhalf(int num)
1511 1506 {
1512 1507 if (num <= 256) {
1513 1508 return (BITS2BYTES(256));
1514 1509 }
1515 1510 if (num <= 384) {
1516 1511 return (BITS2BYTES(384));
1517 1512 }
1518 1513 if (num <= 512) {
1519 1514 return (BITS2BYTES(512));
1520 1515 }
1521 1516 if (num <= 768) {
1522 1517 return (BITS2BYTES(768));
1523 1518 }
1524 1519 if (num <= 1024) {
1525 1520 return (BITS2BYTES(1024));
1526 1521 }
1527 1522 return (0);
1528 1523 }
1529 1524
1530 1525 dca_work_t *
1531 1526 dca_newwork(dca_t *dca)
1532 1527 {
1533 1528 dca_work_t *workp;
1534 1529 size_t size;
1535 1530 ddi_dma_cookie_t c;
1536 1531 unsigned nc;
1537 1532 int rv;
1538 1533
1539 1534 workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP);
1540 1535
1541 1536 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1542 1537 DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah);
1543 1538 if (rv != 0) {
1544 1539 dca_error(dca, "unable to alloc MCR DMA handle");
1545 1540 dca_destroywork(workp);
1546 1541 return (NULL);
1547 1542 }
1548 1543
1549 1544 rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah,
1550 1545 ROUNDUP(MCR_SIZE, dca->dca_pagesize),
1551 1546 &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1552 1547 &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch);
1553 1548 if (rv != 0) {
1554 1549 dca_error(dca, "unable to alloc MCR DMA memory");
1555 1550 dca_destroywork(workp);
1556 1551 return (NULL);
1557 1552 }
1558 1553
1559 1554 rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL,
1560 1555 workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR,
1561 1556 DDI_DMA_SLEEP, NULL, &c, &nc);
1562 1557 if (rv != DDI_DMA_MAPPED) {
1563 1558 dca_error(dca, "unable to map MCR DMA memory");
1564 1559 dca_destroywork(workp);
1565 1560 return (NULL);
1566 1561 }
1567 1562
1568 1563 workp->dw_mcr_paddr = c.dmac_address;
1569 1564 return (workp);
1570 1565 }
1571 1566
1572 1567 void
1573 1568 dca_destroywork(dca_work_t *workp)
1574 1569 {
1575 1570 if (workp->dw_mcr_paddr) {
1576 1571 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah);
1577 1572 }
1578 1573 if (workp->dw_mcr_acch) {
1579 1574 ddi_dma_mem_free(&workp->dw_mcr_acch);
1580 1575 }
1581 1576 if (workp->dw_mcr_dmah) {
1582 1577 ddi_dma_free_handle(&workp->dw_mcr_dmah);
1583 1578 }
1584 1579 kmem_free(workp, sizeof (dca_work_t));
1585 1580 }
1586 1581
1587 1582 dca_request_t *
1588 1583 dca_newreq(dca_t *dca)
1589 1584 {
1590 1585 dca_request_t *reqp;
1591 1586 size_t size;
1592 1587 ddi_dma_cookie_t c;
1593 1588 unsigned nc;
1594 1589 int rv;
1595 1590 int n_chain = 0;
1596 1591
1597 1592 size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH;
1598 1593
1599 1594 reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP);
1600 1595
1601 1596 reqp->dr_dca = dca;
1602 1597
1603 1598 /*
1604 1599 * Setup the DMA region for the context and descriptors.
1605 1600 */
1606 1601 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP,
1607 1602 NULL, &reqp->dr_ctx_dmah);
1608 1603 if (rv != DDI_SUCCESS) {
1609 1604 dca_error(dca, "failure allocating request DMA handle");
1610 1605 dca_destroyreq(reqp);
1611 1606 return (NULL);
1612 1607 }
1613 1608
1614 1609 /* for driver hardening, allocate in whole pages */
1615 1610 rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah,
1616 1611 ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT,
1617 1612 DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size,
1618 1613 &reqp->dr_ctx_acch);
1619 1614 if (rv != DDI_SUCCESS) {
1620 1615 dca_error(dca, "unable to alloc request DMA memory");
1621 1616 dca_destroyreq(reqp);
1622 1617 return (NULL);
1623 1618 }
1624 1619
1625 1620 rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL,
1626 1621 reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE,
1627 1622 DDI_DMA_SLEEP, 0, &c, &nc);
1628 1623 if (rv != DDI_DMA_MAPPED) {
1629 1624 dca_error(dca, "failed binding request DMA handle");
1630 1625 dca_destroyreq(reqp);
1631 1626 return (NULL);
1632 1627 }
1633 1628 reqp->dr_ctx_paddr = c.dmac_address;
1634 1629
1635 1630 reqp->dr_dma_size = size;
1636 1631
1637 1632 /*
1638 1633 * Set up the dma for our scratch/shared buffers.
1639 1634 */
1640 1635 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1641 1636 DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah);
1642 1637 if (rv != DDI_SUCCESS) {
1643 1638 dca_error(dca, "failure allocating ibuf DMA handle");
1644 1639 dca_destroyreq(reqp);
1645 1640 return (NULL);
1646 1641 }
1647 1642 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1648 1643 DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah);
1649 1644 if (rv != DDI_SUCCESS) {
1650 1645 dca_error(dca, "failure allocating obuf DMA handle");
1651 1646 dca_destroyreq(reqp);
1652 1647 return (NULL);
1653 1648 }
1654 1649
1655 1650 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1656 1651 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah);
1657 1652 if (rv != DDI_SUCCESS) {
1658 1653 dca_error(dca, "failure allocating chain_in DMA handle");
1659 1654 dca_destroyreq(reqp);
1660 1655 return (NULL);
1661 1656 }
1662 1657
1663 1658 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1664 1659 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah);
1665 1660 if (rv != DDI_SUCCESS) {
1666 1661 dca_error(dca, "failure allocating chain_out DMA handle");
1667 1662 dca_destroyreq(reqp);
1668 1663 return (NULL);
1669 1664 }
1670 1665
1671 1666 /*
1672 1667 * for driver hardening, allocate in whole pages.
1673 1668 */
1674 1669 size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1675 1670 #if defined(i386) || defined(__i386)
1676 1671 /*
1677 1672 * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter
1678 1673 * may fail on x86 platform if a physically contiguous memory chunk
1679 1674 * cannot be found. From initial testing, we did not see performance
1680 1675 * degradation as seen on Sparc.
1681 1676 */
1682 1677 if ((reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
1683 1678 dca_error(dca, "unable to alloc request ibuf memory");
1684 1679 dca_destroyreq(reqp);
1685 1680 return (NULL);
1686 1681 }
1687 1682 if ((reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
1688 1683 dca_error(dca, "unable to alloc request obuf memory");
1689 1684 dca_destroyreq(reqp);
1690 1685 return (NULL);
1691 1686 }
1692 1687 #else
1693 1688 /*
1694 1689 * We could kmem_alloc for Sparc too. However, it gives worse
1695 1690 * performance when transferring more than one page data. For example,
1696 1691 * using 4 threads and 12032 byte data and 3DES on 900MHZ Sparc system,
1697 1692 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for
1698 1693 * the same throughput.
1699 1694 */
1700 1695 rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah,
1701 1696 size, &dca_bufattr,
1702 1697 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr,
1703 1698 &size, &reqp->dr_ibuf_acch);
1704 1699 if (rv != DDI_SUCCESS) {
1705 1700 dca_error(dca, "unable to alloc request DMA memory");
1706 1701 dca_destroyreq(reqp);
1707 1702 return (NULL);
1708 1703 }
1709 1704
1710 1705 rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah,
1711 1706 size, &dca_bufattr,
1712 1707 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr,
1713 1708 &size, &reqp->dr_obuf_acch);
1714 1709 if (rv != DDI_SUCCESS) {
1715 1710 dca_error(dca, "unable to alloc request DMA memory");
1716 1711 dca_destroyreq(reqp);
1717 1712 return (NULL);
1718 1713 }
1719 1714 #endif
1720 1715
1721 1716 /* Skip the used portion in the context page */
1722 1717 reqp->dr_offset = CTX_MAXLENGTH;
1723 1718 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1724 1719 reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah,
1725 1720 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1726 1721 &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) {
1727 1722 (void) dca_destroyreq(reqp);
1728 1723 return (NULL);
1729 1724 }
1730 1725 reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
1731 1726 /* Skip the space used by the input buffer */
1732 1727 reqp->dr_offset += DESC_SIZE * n_chain;
1733 1728
1734 1729 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1735 1730 reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah,
1736 1731 DDI_DMA_READ | DDI_DMA_STREAMING,
1737 1732 &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) {
1738 1733 (void) dca_destroyreq(reqp);
1739 1734 return (NULL);
1740 1735 }
1741 1736 reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
1742 1737 /* Skip the space used by the output buffer */
1743 1738 reqp->dr_offset += DESC_SIZE * n_chain;
1744 1739
1745 1740 DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d",
1746 1741 reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH);
1747 1742 return (reqp);
1748 1743 }
1749 1744
1750 1745 void
1751 1746 dca_destroyreq(dca_request_t *reqp)
1752 1747 {
1753 1748 #if defined(i386) || defined(__i386)
1754 1749 dca_t *dca = reqp->dr_dca;
1755 1750 size_t size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1756 1751 #endif
1757 1752
1758 1753 /*
1759 1754 * Clean up DMA for the context structure.
1760 1755 */
1761 1756 if (reqp->dr_ctx_paddr) {
1762 1757 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah);
1763 1758 }
1764 1759
1765 1760 if (reqp->dr_ctx_acch) {
1766 1761 ddi_dma_mem_free(&reqp->dr_ctx_acch);
1767 1762 }
1768 1763
1769 1764 if (reqp->dr_ctx_dmah) {
1770 1765 ddi_dma_free_handle(&reqp->dr_ctx_dmah);
1771 1766 }
1772 1767
1773 1768 /*
1774 1769 * Clean up DMA for the scratch buffer.
1775 1770 */
1776 1771 #if defined(i386) || defined(__i386)
1777 1772 if (reqp->dr_ibuf_dmah) {
1778 1773 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1779 1774 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1780 1775 }
1781 1776 if (reqp->dr_obuf_dmah) {
1782 1777 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1783 1778 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1784 1779 }
1785 1780
1786 1781 kmem_free(reqp->dr_ibuf_kaddr, size);
1787 1782 kmem_free(reqp->dr_obuf_kaddr, size);
1788 1783 #else
1789 1784 if (reqp->dr_ibuf_paddr) {
1790 1785 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1791 1786 }
1792 1787 if (reqp->dr_obuf_paddr) {
1793 1788 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1794 1789 }
1795 1790
1796 1791 if (reqp->dr_ibuf_acch) {
1797 1792 ddi_dma_mem_free(&reqp->dr_ibuf_acch);
1798 1793 }
1799 1794 if (reqp->dr_obuf_acch) {
1800 1795 ddi_dma_mem_free(&reqp->dr_obuf_acch);
1801 1796 }
1802 1797
1803 1798 if (reqp->dr_ibuf_dmah) {
1804 1799 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1805 1800 }
1806 1801 if (reqp->dr_obuf_dmah) {
1807 1802 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1808 1803 }
1809 1804 #endif
1810 1805 /*
1811 1806 * These two DMA handles should have been unbinded in
1812 1807 * dca_unbindchains() function
1813 1808 */
1814 1809 if (reqp->dr_chain_in_dmah) {
1815 1810 ddi_dma_free_handle(&reqp->dr_chain_in_dmah);
1816 1811 }
1817 1812 if (reqp->dr_chain_out_dmah) {
1818 1813 ddi_dma_free_handle(&reqp->dr_chain_out_dmah);
1819 1814 }
1820 1815
1821 1816 kmem_free(reqp, sizeof (dca_request_t));
1822 1817 }
1823 1818
1824 1819 dca_work_t *
1825 1820 dca_getwork(dca_t *dca, int mcr)
1826 1821 {
1827 1822 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1828 1823 dca_work_t *workp;
1829 1824
1830 1825 mutex_enter(&wlp->dwl_freelock);
1831 1826 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework);
1832 1827 mutex_exit(&wlp->dwl_freelock);
1833 1828 if (workp) {
1834 1829 int nreqs;
1835 1830 bzero(workp->dw_mcr_kaddr, 8);
1836 1831
1837 1832 /* clear out old requests */
1838 1833 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) {
1839 1834 workp->dw_reqs[nreqs] = NULL;
1840 1835 }
1841 1836 }
1842 1837 return (workp);
1843 1838 }
1844 1839
1845 1840 void
1846 1841 dca_freework(dca_work_t *workp)
1847 1842 {
1848 1843 mutex_enter(&workp->dw_wlp->dwl_freelock);
1849 1844 dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp);
1850 1845 mutex_exit(&workp->dw_wlp->dwl_freelock);
1851 1846 }
1852 1847
1853 1848 dca_request_t *
1854 1849 dca_getreq(dca_t *dca, int mcr, int tryhard)
1855 1850 {
1856 1851 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1857 1852 dca_request_t *reqp;
1858 1853
1859 1854 mutex_enter(&wlp->dwl_freereqslock);
1860 1855 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs);
1861 1856 mutex_exit(&wlp->dwl_freereqslock);
1862 1857 if (reqp) {
1863 1858 reqp->dr_flags = 0;
1864 1859 reqp->dr_callback = NULL;
1865 1860 } else if (tryhard) {
1866 1861 /*
1867 1862 * failed to get a free one, try an allocation, the hard way.
1868 1863 * XXX: Kstat desired here.
1869 1864 */
1870 1865 if ((reqp = dca_newreq(dca)) != NULL) {
1871 1866 reqp->dr_wlp = wlp;
1872 1867 reqp->dr_dca = dca;
1873 1868 reqp->dr_flags = 0;
1874 1869 reqp->dr_callback = NULL;
1875 1870 }
1876 1871 }
1877 1872 return (reqp);
1878 1873 }
1879 1874
1880 1875 void
1881 1876 dca_freereq(dca_request_t *reqp)
1882 1877 {
1883 1878 reqp->dr_kcf_req = NULL;
1884 1879 if (!(reqp->dr_flags & DR_NOCACHE)) {
1885 1880 mutex_enter(&reqp->dr_wlp->dwl_freereqslock);
1886 1881 dca_enqueue(&reqp->dr_wlp->dwl_freereqs,
1887 1882 (dca_listnode_t *)reqp);
1888 1883 mutex_exit(&reqp->dr_wlp->dwl_freereqslock);
1889 1884 }
1890 1885 }
1891 1886
1892 1887 /*
1893 1888 * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer
1894 1889 * is mapped to a single physical address. On x86, a user buffer is mapped
1895 1890 * to multiple physical addresses. These physical addresses are chained
1896 1891 * using the method specified in Broadcom BCM5820 specification.
1897 1892 */
1898 1893 int
1899 1894 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt)
1900 1895 {
1901 1896 int rv;
1902 1897 caddr_t kaddr;
1903 1898 uint_t flags;
1904 1899 int n_chain = 0;
1905 1900
1906 1901 if (reqp->dr_flags & DR_INPLACE) {
1907 1902 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
1908 1903 } else {
1909 1904 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING;
1910 1905 }
1911 1906
1912 1907 /* first the input */
1913 1908 if (incnt) {
1914 1909 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) {
1915 1910 DBG(NULL, DWARN, "unrecognised crypto data format");
1916 1911 return (DDI_FAILURE);
1917 1912 }
1918 1913 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset,
1919 1914 kaddr, reqp->dr_chain_in_dmah, flags,
1920 1915 &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) {
1921 1916 (void) dca_unbindchains(reqp);
1922 1917 return (rv);
1923 1918 }
1924 1919
1925 1920 /*
1926 1921 * The offset and length are altered by the calling routine
1927 1922 * reqp->dr_in->cd_offset += incnt;
1928 1923 * reqp->dr_in->cd_length -= incnt;
1929 1924 */
1930 1925 /* Save the first one in the chain for MCR */
1931 1926 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr;
1932 1927 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr;
1933 1928 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length;
1934 1929 } else {
1935 1930 reqp->dr_in_paddr = NULL;
1936 1931 reqp->dr_in_next = 0;
1937 1932 reqp->dr_in_len = 0;
1938 1933 }
1939 1934
1940 1935 if (reqp->dr_flags & DR_INPLACE) {
1941 1936 reqp->dr_out_paddr = reqp->dr_in_paddr;
1942 1937 reqp->dr_out_len = reqp->dr_in_len;
1943 1938 reqp->dr_out_next = reqp->dr_in_next;
1944 1939 return (DDI_SUCCESS);
1945 1940 }
1946 1941
1947 1942 /* then the output */
1948 1943 if (outcnt) {
1949 1944 flags = DDI_DMA_READ | DDI_DMA_STREAMING;
1950 1945 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) {
1951 1946 DBG(NULL, DWARN, "unrecognised crypto data format");
1952 1947 (void) dca_unbindchains(reqp);
1953 1948 return (DDI_FAILURE);
1954 1949 }
1955 1950 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset +
1956 1951 n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah,
1957 1952 flags, &reqp->dr_chain_out_head, &n_chain);
1958 1953 if (rv != DDI_SUCCESS) {
1959 1954 (void) dca_unbindchains(reqp);
1960 1955 return (DDI_FAILURE);
1961 1956 }
1962 1957
1963 1958 /* Save the first one in the chain for MCR */
1964 1959 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr;
1965 1960 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr;
1966 1961 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length;
1967 1962 } else {
1968 1963 reqp->dr_out_paddr = NULL;
1969 1964 reqp->dr_out_next = 0;
1970 1965 reqp->dr_out_len = 0;
1971 1966 }
1972 1967
1973 1968 return (DDI_SUCCESS);
1974 1969 }
1975 1970
1976 1971 /*
1977 1972 * Unbind the user buffers from the DMA handles.
1978 1973 */
1979 1974 int
1980 1975 dca_unbindchains(dca_request_t *reqp)
1981 1976 {
1982 1977 int rv = DDI_SUCCESS;
1983 1978 int rv1 = DDI_SUCCESS;
1984 1979
1985 1980 /* Clear the input chain */
1986 1981 if (reqp->dr_chain_in_head.dc_buffer_paddr != NULL) {
1987 1982 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah);
1988 1983 reqp->dr_chain_in_head.dc_buffer_paddr = 0;
1989 1984 }
1990 1985
1991 1986 if (reqp->dr_flags & DR_INPLACE) {
1992 1987 return (rv);
1993 1988 }
1994 1989
1995 1990 /* Clear the output chain */
1996 1991 if (reqp->dr_chain_out_head.dc_buffer_paddr != NULL) {
1997 1992 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah);
1998 1993 reqp->dr_chain_out_head.dc_buffer_paddr = 0;
1999 1994 }
2000 1995
2001 1996 return ((rv != DDI_SUCCESS)? rv : rv1);
2002 1997 }
2003 1998
2004 1999 /*
2005 2000 * Build either input chain or output chain. It is single-item chain for Sparc,
2006 2001 * and possible mutiple-item chain for x86.
2007 2002 */
2008 2003 static int
2009 2004 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
2010 2005 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
2011 2006 dca_chain_t *head, int *n_chain)
2012 2007 {
2013 2008 ddi_dma_cookie_t c;
2014 2009 uint_t nc;
2015 2010 int rv;
2016 2011 caddr_t chain_kaddr_pre;
2017 2012 caddr_t chain_kaddr;
2018 2013 uint32_t chain_paddr;
2019 2014 int i;
2020 2015
2021 2016 /* Advance past the context structure to the starting address */
2022 2017 chain_paddr = reqp->dr_ctx_paddr + dr_offset;
2023 2018 chain_kaddr = reqp->dr_ctx_kaddr + dr_offset;
2024 2019
2025 2020 /*
2026 2021 * Bind the kernel address to the DMA handle. On x86, the actual
2027 2022 * buffer is mapped into multiple physical addresses. On Sparc,
2028 2023 * the actual buffer is mapped into a single address.
2029 2024 */
2030 2025 rv = ddi_dma_addr_bind_handle(handle,
2031 2026 NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc);
2032 2027 if (rv != DDI_DMA_MAPPED) {
2033 2028 return (DDI_FAILURE);
2034 2029 }
2035 2030
2036 2031 (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV);
2037 2032 if ((rv = dca_check_dma_handle(reqp->dr_dca, handle,
2038 2033 DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) {
2039 2034 reqp->destroy = TRUE;
2040 2035 return (rv);
2041 2036 }
2042 2037
2043 2038 *n_chain = nc;
2044 2039
2045 2040 /* Setup the data buffer chain for DMA transfer */
2046 2041 chain_kaddr_pre = NULL;
2047 2042 head->dc_buffer_paddr = 0;
2048 2043 head->dc_next_paddr = 0;
2049 2044 head->dc_buffer_length = 0;
2050 2045 for (i = 0; i < nc; i++) {
2051 2046 /* PIO */
2052 2047 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address);
2053 2048 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0);
2054 2049 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size);
2055 2050
2056 2051 /* Remember the head of the chain */
2057 2052 if (head->dc_buffer_paddr == 0) {
2058 2053 head->dc_buffer_paddr = c.dmac_address;
2059 2054 head->dc_buffer_length = c.dmac_size;
2060 2055 }
2061 2056
2062 2057 /* Link to the previous one if one exists */
2063 2058 if (chain_kaddr_pre) {
2064 2059 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT,
2065 2060 chain_paddr);
2066 2061 if (head->dc_next_paddr == 0)
2067 2062 head->dc_next_paddr = chain_paddr;
2068 2063 }
2069 2064 chain_kaddr_pre = chain_kaddr;
2070 2065
2071 2066 /* Maintain pointers */
2072 2067 chain_paddr += DESC_SIZE;
2073 2068 chain_kaddr += DESC_SIZE;
2074 2069
2075 2070 /* Retrieve the next cookie if there is one */
2076 2071 if (i < nc-1)
2077 2072 ddi_dma_nextcookie(handle, &c);
2078 2073 }
2079 2074
2080 2075 /* Set the next pointer in the last entry to NULL */
2081 2076 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0);
2082 2077
2083 2078 return (DDI_SUCCESS);
2084 2079 }
2085 2080
2086 2081 /*
2087 2082 * Schedule some work.
2088 2083 */
2089 2084 int
2090 2085 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched)
2091 2086 {
2092 2087 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2093 2088
2094 2089 mutex_enter(&wlp->dwl_lock);
2095 2090
2096 2091 DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p",
2097 2092 reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr,
2098 2093 reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr);
2099 2094 DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x",
2100 2095 reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr);
2101 2096 /* sync out the entire context and descriptor chains */
2102 2097 (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2103 2098 if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah,
2104 2099 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2105 2100 reqp->destroy = TRUE;
2106 2101 mutex_exit(&wlp->dwl_lock);
2107 2102 return (CRYPTO_DEVICE_ERROR);
2108 2103 }
2109 2104
2110 2105 dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp);
2111 2106 wlp->dwl_count++;
2112 2107 wlp->dwl_lastsubmit = ddi_get_lbolt();
2113 2108 reqp->dr_wlp = wlp;
2114 2109
2115 2110 if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) {
2116 2111 /* we are fully loaded now, let kCF know */
2117 2112
2118 2113 wlp->dwl_flowctl++;
2119 2114 wlp->dwl_busy = 1;
2120 2115
2121 2116 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY);
2122 2117 }
2123 2118
2124 2119 if (dosched) {
2125 2120 #ifdef SCHEDDELAY
2126 2121 /* possibly wait for more work to arrive */
2127 2122 if (wlp->dwl_count >= wlp->dwl_reqspermcr) {
2128 2123 dca_schedule(dca, mcr);
2129 2124 } else if (!wlp->dwl_schedtid) {
2130 2125 /* wait 1 msec for more work before doing it */
2131 2126 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2132 2127 (void *)wlp, drv_usectohz(MSEC));
2133 2128 }
2134 2129 #else
2135 2130 dca_schedule(dca, mcr);
2136 2131 #endif
2137 2132 }
2138 2133 mutex_exit(&wlp->dwl_lock);
2139 2134
2140 2135 return (CRYPTO_QUEUED);
2141 2136 }
2142 2137
2143 2138 void
2144 2139 dca_schedule(dca_t *dca, int mcr)
2145 2140 {
2146 2141 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2147 2142 int csr;
2148 2143 int full;
2149 2144 uint32_t status;
2150 2145
2151 2146 ASSERT(mutex_owned(&wlp->dwl_lock));
2152 2147 /*
2153 2148 * If the card is draining or has an outstanding failure,
2154 2149 * don't schedule any more work on it right now
2155 2150 */
2156 2151 if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) {
2157 2152 return;
2158 2153 }
2159 2154
2160 2155 if (mcr == MCR2) {
2161 2156 csr = CSR_MCR2;
2162 2157 full = DMASTAT_MCR2FULL;
2163 2158 } else {
2164 2159 csr = CSR_MCR1;
2165 2160 full = DMASTAT_MCR1FULL;
2166 2161 }
2167 2162
2168 2163 for (;;) {
2169 2164 dca_work_t *workp;
2170 2165 uint32_t offset;
2171 2166 int nreqs;
2172 2167
2173 2168 status = GETCSR(dca, CSR_DMASTAT);
2174 2169 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2175 2170 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
2176 2171 return;
2177 2172
2178 2173 if ((status & full) != 0)
2179 2174 break;
2180 2175
2181 2176 #ifdef SCHEDDELAY
2182 2177 /* if there isn't enough to do, don't bother now */
2183 2178 if ((wlp->dwl_count < wlp->dwl_reqspermcr) &&
2184 2179 (ddi_get_lbolt() < (wlp->dwl_lastsubmit +
2185 2180 drv_usectohz(MSEC)))) {
2186 2181 /* wait a bit longer... */
2187 2182 if (wlp->dwl_schedtid == 0) {
2188 2183 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2189 2184 (void *)wlp, drv_usectohz(MSEC));
2190 2185 }
2191 2186 return;
2192 2187 }
2193 2188 #endif
2194 2189
2195 2190 /* grab a work structure */
2196 2191 workp = dca_getwork(dca, mcr);
2197 2192
2198 2193 if (workp == NULL) {
2199 2194 /*
2200 2195 * There must be work ready to be reclaimed,
2201 2196 * in this case, since the chip can only hold
2202 2197 * less work outstanding than there are total.
2203 2198 */
2204 2199 dca_reclaim(dca, mcr);
2205 2200 continue;
2206 2201 }
2207 2202
2208 2203 nreqs = 0;
2209 2204 offset = MCR_CTXADDR;
2210 2205
2211 2206 while (nreqs < wlp->dwl_reqspermcr) {
2212 2207 dca_request_t *reqp;
2213 2208
2214 2209 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq);
2215 2210 if (reqp == NULL) {
2216 2211 /* nothing left to process */
2217 2212 break;
2218 2213 }
2219 2214 /*
2220 2215 * Update flow control.
2221 2216 */
2222 2217 wlp->dwl_count--;
2223 2218 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2224 2219 (wlp->dwl_busy)) {
2225 2220 wlp->dwl_busy = 0;
2226 2221 crypto_prov_notify(wlp->dwl_prov,
2227 2222 CRYPTO_PROVIDER_READY);
2228 2223 }
2229 2224
2230 2225 /*
2231 2226 * Context address.
2232 2227 */
2233 2228 PUTMCR32(workp, offset, reqp->dr_ctx_paddr);
2234 2229 offset += 4;
2235 2230
2236 2231 /*
2237 2232 * Input chain.
2238 2233 */
2239 2234 /* input buffer address */
2240 2235 PUTMCR32(workp, offset, reqp->dr_in_paddr);
2241 2236 offset += 4;
2242 2237 /* next input buffer entry */
2243 2238 PUTMCR32(workp, offset, reqp->dr_in_next);
2244 2239 offset += 4;
2245 2240 /* input buffer length */
2246 2241 PUTMCR16(workp, offset, reqp->dr_in_len);
2247 2242 offset += 2;
2248 2243 /* zero the reserved field */
2249 2244 PUTMCR16(workp, offset, 0);
2250 2245 offset += 2;
2251 2246
2252 2247 /*
2253 2248 * Overall length.
2254 2249 */
2255 2250 /* reserved field */
2256 2251 PUTMCR16(workp, offset, 0);
2257 2252 offset += 2;
2258 2253 /* total packet length */
2259 2254 PUTMCR16(workp, offset, reqp->dr_pkt_length);
2260 2255 offset += 2;
2261 2256
2262 2257 /*
2263 2258 * Output chain.
2264 2259 */
2265 2260 /* output buffer address */
2266 2261 PUTMCR32(workp, offset, reqp->dr_out_paddr);
2267 2262 offset += 4;
2268 2263 /* next output buffer entry */
2269 2264 PUTMCR32(workp, offset, reqp->dr_out_next);
2270 2265 offset += 4;
2271 2266 /* output buffer length */
2272 2267 PUTMCR16(workp, offset, reqp->dr_out_len);
2273 2268 offset += 2;
2274 2269 /* zero the reserved field */
2275 2270 PUTMCR16(workp, offset, 0);
2276 2271 offset += 2;
2277 2272
2278 2273 /*
2279 2274 * Note submission.
2280 2275 */
2281 2276 workp->dw_reqs[nreqs] = reqp;
2282 2277 nreqs++;
2283 2278 }
2284 2279
2285 2280 if (nreqs == 0) {
2286 2281 /* nothing in the queue! */
2287 2282 dca_freework(workp);
2288 2283 return;
2289 2284 }
2290 2285
2291 2286 wlp->dwl_submit++;
2292 2287
2293 2288 PUTMCR16(workp, MCR_FLAGS, 0);
2294 2289 PUTMCR16(workp, MCR_COUNT, nreqs);
2295 2290
2296 2291 DBG(dca, DCHATTY,
2297 2292 "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d",
2298 2293 workp->dw_mcr_paddr, workp->dw_mcr_kaddr,
2299 2294 nreqs, mcr);
2300 2295
2301 2296 workp->dw_lbolt = ddi_get_lbolt();
2302 2297 /* Make sure MCR is synced out to device. */
2303 2298 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0,
2304 2299 DDI_DMA_SYNC_FORDEV);
2305 2300 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2306 2301 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2307 2302 dca_destroywork(workp);
2308 2303 return;
2309 2304 }
2310 2305
2311 2306 PUTCSR(dca, csr, workp->dw_mcr_paddr);
2312 2307 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2313 2308 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2314 2309 dca_destroywork(workp);
2315 2310 return;
2316 2311 } else {
2317 2312 dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp);
2318 2313 }
2319 2314
2320 2315 DBG(dca, DCHATTY, "posted");
2321 2316 }
2322 2317 }
2323 2318
2324 2319 /*
2325 2320 * Reclaim completed work, called in interrupt context.
2326 2321 */
2327 2322 void
2328 2323 dca_reclaim(dca_t *dca, int mcr)
2329 2324 {
2330 2325 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2331 2326 dca_work_t *workp;
2332 2327 ushort_t flags;
2333 2328 int nreclaimed = 0;
2334 2329 int i;
2335 2330
2336 2331 DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr);
2337 2332 ASSERT(mutex_owned(&wlp->dwl_lock));
2338 2333 /*
2339 2334 * For each MCR in the submitted (runq), we check to see if
2340 2335 * it has been processed. If so, then we note each individual
2341 2336 * job in the MCR, and and do the completion processing for
2342 2337 * each of such job.
2343 2338 */
2344 2339 for (;;) {
2345 2340
2346 2341 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2347 2342 if (workp == NULL) {
2348 2343 break;
2349 2344 }
2350 2345
2351 2346 /* only sync the MCR flags, since that's all we need */
2352 2347 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4,
2353 2348 DDI_DMA_SYNC_FORKERNEL);
2354 2349 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2355 2350 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2356 2351 dca_rmqueue((dca_listnode_t *)workp);
2357 2352 dca_destroywork(workp);
2358 2353 return;
2359 2354 }
2360 2355
2361 2356 flags = GETMCR16(workp, MCR_FLAGS);
2362 2357 if ((flags & MCRFLAG_FINISHED) == 0) {
2363 2358 /* chip is still working on it */
2364 2359 DBG(dca, DRECLAIM,
2365 2360 "chip still working on it (MCR%d)", mcr);
2366 2361 break;
2367 2362 }
2368 2363
2369 2364 /* its really for us, so remove it from the queue */
2370 2365 dca_rmqueue((dca_listnode_t *)workp);
2371 2366
2372 2367 /* if we were draining, signal on the cv */
2373 2368 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2374 2369 cv_signal(&wlp->dwl_cv);
2375 2370 }
2376 2371
2377 2372 /* update statistics, done under the lock */
2378 2373 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2379 2374 dca_request_t *reqp = workp->dw_reqs[i];
2380 2375 if (reqp == NULL) {
2381 2376 continue;
2382 2377 }
2383 2378 if (reqp->dr_byte_stat >= 0) {
2384 2379 dca->dca_stats[reqp->dr_byte_stat] +=
2385 2380 reqp->dr_pkt_length;
2386 2381 }
2387 2382 if (reqp->dr_job_stat >= 0) {
2388 2383 dca->dca_stats[reqp->dr_job_stat]++;
2389 2384 }
2390 2385 }
2391 2386 mutex_exit(&wlp->dwl_lock);
2392 2387
2393 2388 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2394 2389 dca_request_t *reqp = workp->dw_reqs[i];
2395 2390
2396 2391 if (reqp == NULL) {
2397 2392 continue;
2398 2393 }
2399 2394
2400 2395 /* Do the callback. */
2401 2396 workp->dw_reqs[i] = NULL;
2402 2397 dca_done(reqp, CRYPTO_SUCCESS);
2403 2398
2404 2399 nreclaimed++;
2405 2400 }
2406 2401
2407 2402 /* now we can release the work */
2408 2403 dca_freework(workp);
2409 2404
2410 2405 mutex_enter(&wlp->dwl_lock);
2411 2406 }
2412 2407 DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed);
2413 2408 }
2414 2409
2415 2410 int
2416 2411 dca_length(crypto_data_t *cdata)
2417 2412 {
2418 2413 return (cdata->cd_length);
2419 2414 }
2420 2415
2421 2416 /*
2422 2417 * This is the callback function called from the interrupt when a kCF job
2423 2418 * completes. It does some driver-specific things, and then calls the
2424 2419 * kCF-provided callback. Finally, it cleans up the state for the work
2425 2420 * request and drops the reference count to allow for DR.
2426 2421 */
2427 2422 void
2428 2423 dca_done(dca_request_t *reqp, int err)
2429 2424 {
2430 2425 uint64_t ena = 0;
2431 2426
2432 2427 /* unbind any chains we were using */
2433 2428 if (dca_unbindchains(reqp) != DDI_SUCCESS) {
2434 2429 /* DMA failure */
2435 2430 ena = dca_ena(ena);
2436 2431 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT,
2437 2432 DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR,
2438 2433 "fault on buffer DMA handle");
2439 2434 if (err == CRYPTO_SUCCESS) {
2440 2435 err = CRYPTO_DEVICE_ERROR;
2441 2436 }
2442 2437 }
2443 2438
2444 2439 if (reqp->dr_callback != NULL) {
2445 2440 reqp->dr_callback(reqp, err);
2446 2441 } else {
2447 2442 dca_freereq(reqp);
2448 2443 }
2449 2444 }
2450 2445
2451 2446 /*
2452 2447 * Call this when a failure is detected. It will reset the chip,
2453 2448 * log a message, alert kCF, and mark jobs in the runq as failed.
2454 2449 */
2455 2450 /* ARGSUSED */
2456 2451 void
2457 2452 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index,
2458 2453 uint64_t ena, int errno, char *mess, ...)
2459 2454 {
2460 2455 va_list ap;
2461 2456 char buf[256];
2462 2457 int mcr;
2463 2458 char *eclass;
2464 2459 int have_mutex;
2465 2460
2466 2461 va_start(ap, mess);
2467 2462 (void) vsprintf(buf, mess, ap);
2468 2463 va_end(ap);
2469 2464
2470 2465 eclass = dca_fma_eclass_string(dca->dca_model, index);
2471 2466
2472 2467 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) &&
2473 2468 index != DCA_FM_ECLASS_NONE) {
2474 2469 ddi_fm_ereport_post(dca->dca_dip, eclass, ena,
2475 2470 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2476 2471 FM_EREPORT_VERS0, NULL);
2477 2472
2478 2473 /* Report the impact of the failure to the DDI. */
2479 2474 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST);
2480 2475 } else {
2481 2476 /* Just log the error string to the message log */
2482 2477 dca_error(dca, buf);
2483 2478 }
2484 2479
2485 2480 /*
2486 2481 * Indicate a failure (keeps schedule from running).
2487 2482 */
2488 2483 dca->dca_flags |= DCA_FAILED;
2489 2484
2490 2485 /*
2491 2486 * Reset the chip. This should also have as a side effect, the
2492 2487 * disabling of all interrupts from the device.
2493 2488 */
2494 2489 (void) dca_reset(dca, 1);
2495 2490
2496 2491 /*
2497 2492 * Report the failure to kCF.
2498 2493 */
2499 2494 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2500 2495 if (WORKLIST(dca, mcr)->dwl_prov) {
2501 2496 crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov,
2502 2497 CRYPTO_PROVIDER_FAILED);
2503 2498 }
2504 2499 }
2505 2500
2506 2501 /*
2507 2502 * Return jobs not sent to hardware back to kCF.
2508 2503 */
2509 2504 dca_rejectjobs(dca);
2510 2505
2511 2506 /*
2512 2507 * From this point on, no new work should be arriving, and the
2513 2508 * chip should not be doing any active DMA.
2514 2509 */
2515 2510
2516 2511 /*
2517 2512 * Now find all the work submitted to the device and fail
2518 2513 * them.
2519 2514 */
2520 2515 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2521 2516 dca_worklist_t *wlp;
2522 2517 int i;
2523 2518
2524 2519 wlp = WORKLIST(dca, mcr);
2525 2520
2526 2521 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2527 2522 continue;
2528 2523 }
2529 2524 for (;;) {
2530 2525 dca_work_t *workp;
2531 2526
2532 2527 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2533 2528 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq);
2534 2529 if (workp == NULL) {
2535 2530 if (have_mutex)
2536 2531 mutex_exit(&wlp->dwl_lock);
2537 2532 break;
2538 2533 }
2539 2534 mutex_exit(&wlp->dwl_lock);
2540 2535
2541 2536 /*
2542 2537 * Free up requests
2543 2538 */
2544 2539 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2545 2540 dca_request_t *reqp = workp->dw_reqs[i];
2546 2541 if (reqp) {
2547 2542 dca_done(reqp, errno);
2548 2543 workp->dw_reqs[i] = NULL;
2549 2544 }
2550 2545 }
2551 2546
2552 2547 mutex_enter(&wlp->dwl_lock);
2553 2548 /*
2554 2549 * If waiting to drain, signal on the waiter.
2555 2550 */
2556 2551 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2557 2552 cv_signal(&wlp->dwl_cv);
2558 2553 }
2559 2554
2560 2555 /*
2561 2556 * Return the work and request structures to
2562 2557 * the free pool.
2563 2558 */
2564 2559 dca_freework(workp);
2565 2560 if (have_mutex)
2566 2561 mutex_exit(&wlp->dwl_lock);
2567 2562 }
2568 2563 }
2569 2564
2570 2565 }
2571 2566
2572 2567 #ifdef SCHEDDELAY
2573 2568 /*
2574 2569 * Reschedule worklist as needed.
2575 2570 */
2576 2571 void
2577 2572 dca_schedtimeout(void *arg)
2578 2573 {
2579 2574 dca_worklist_t *wlp = (dca_worklist_t *)arg;
2580 2575 mutex_enter(&wlp->dwl_lock);
2581 2576 wlp->dwl_schedtid = 0;
2582 2577 dca_schedule(wlp->dwl_dca, wlp->dwl_mcr);
2583 2578 mutex_exit(&wlp->dwl_lock);
2584 2579 }
2585 2580 #endif
2586 2581
2587 2582 /*
2588 2583 * Check for stalled jobs.
2589 2584 */
2590 2585 void
2591 2586 dca_jobtimeout(void *arg)
2592 2587 {
2593 2588 int mcr;
2594 2589 dca_t *dca = (dca_t *)arg;
2595 2590 int hung = 0;
2596 2591
2597 2592 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2598 2593 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2599 2594 dca_work_t *workp;
2600 2595 clock_t when;
2601 2596
2602 2597 mutex_enter(&wlp->dwl_lock);
2603 2598 when = ddi_get_lbolt();
2604 2599
2605 2600 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2606 2601 if (workp == NULL) {
2607 2602 /* nothing sitting in the queue */
2608 2603 mutex_exit(&wlp->dwl_lock);
2609 2604 continue;
2610 2605 }
2611 2606
2612 2607 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) {
2613 2608 /* request has been queued for less than STALETIME */
2614 2609 mutex_exit(&wlp->dwl_lock);
2615 2610 continue;
2616 2611 }
2617 2612
2618 2613 /* job has been sitting around for over 1 second, badness */
2619 2614 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp,
2620 2615 mcr);
2621 2616
2622 2617 /* put it back in the queue, until we reset the chip */
2623 2618 hung++;
2624 2619 mutex_exit(&wlp->dwl_lock);
2625 2620 }
2626 2621
2627 2622 if (hung) {
2628 2623 dca_failure(dca, DDI_DEVICE_FAULT,
2629 2624 DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR,
2630 2625 "timeout processing job.)");
2631 2626 }
2632 2627
2633 2628 /* reschedule ourself */
2634 2629 mutex_enter(&dca->dca_intrlock);
2635 2630 if (dca->dca_jobtid == 0) {
2636 2631 /* timeout has been canceled, prior to DR */
2637 2632 mutex_exit(&dca->dca_intrlock);
2638 2633 return;
2639 2634 }
2640 2635
2641 2636 /* check again in 1 second */
2642 2637 dca->dca_jobtid = timeout(dca_jobtimeout, arg,
2643 2638 drv_usectohz(SECOND));
2644 2639 mutex_exit(&dca->dca_intrlock);
2645 2640 }
2646 2641
2647 2642 /*
2648 2643 * This returns all jobs back to kCF. It assumes that processing
2649 2644 * on the worklist has halted.
2650 2645 */
2651 2646 void
2652 2647 dca_rejectjobs(dca_t *dca)
2653 2648 {
2654 2649 int mcr;
2655 2650 int have_mutex;
2656 2651 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2657 2652 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2658 2653 dca_request_t *reqp;
2659 2654
2660 2655 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2661 2656 continue;
2662 2657 }
2663 2658 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2664 2659 for (;;) {
2665 2660 reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq);
2666 2661 if (reqp == NULL) {
2667 2662 break;
2668 2663 }
2669 2664 /* update flow control */
2670 2665 wlp->dwl_count--;
2671 2666 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2672 2667 (wlp->dwl_busy)) {
2673 2668 wlp->dwl_busy = 0;
2674 2669 crypto_prov_notify(wlp->dwl_prov,
2675 2670 CRYPTO_PROVIDER_READY);
2676 2671 }
2677 2672 mutex_exit(&wlp->dwl_lock);
2678 2673
2679 2674 (void) dca_unbindchains(reqp);
2680 2675 reqp->dr_callback(reqp, EAGAIN);
2681 2676 mutex_enter(&wlp->dwl_lock);
2682 2677 }
2683 2678 if (have_mutex)
2684 2679 mutex_exit(&wlp->dwl_lock);
2685 2680 }
2686 2681 }
2687 2682
2688 2683 int
2689 2684 dca_drain(dca_t *dca)
2690 2685 {
2691 2686 int mcr;
2692 2687 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2693 2688 #ifdef SCHEDDELAY
2694 2689 timeout_id_t tid;
2695 2690 #endif
2696 2691 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2697 2692
2698 2693 mutex_enter(&wlp->dwl_lock);
2699 2694 wlp->dwl_drain = 1;
2700 2695
2701 2696 /* give it up to a second to drain from the chip */
2702 2697 if (!QEMPTY(&wlp->dwl_runq)) {
2703 2698 (void) cv_reltimedwait(&wlp->dwl_cv, &wlp->dwl_lock,
2704 2699 drv_usectohz(STALETIME), TR_CLOCK_TICK);
2705 2700
2706 2701 if (!QEMPTY(&wlp->dwl_runq)) {
2707 2702 dca_error(dca, "unable to drain device");
2708 2703 mutex_exit(&wlp->dwl_lock);
2709 2704 dca_undrain(dca);
2710 2705 return (EBUSY);
2711 2706 }
2712 2707 }
2713 2708
2714 2709 #ifdef SCHEDDELAY
2715 2710 tid = wlp->dwl_schedtid;
2716 2711 mutex_exit(&wlp->dwl_lock);
2717 2712
2718 2713 /*
2719 2714 * untimeout outside the lock -- this is safe because we
2720 2715 * have set the drain flag, so dca_schedule() will not
2721 2716 * reschedule another timeout
2722 2717 */
2723 2718 if (tid) {
2724 2719 untimeout(tid);
2725 2720 }
2726 2721 #else
2727 2722 mutex_exit(&wlp->dwl_lock);
2728 2723 #endif
2729 2724 }
2730 2725 return (0);
2731 2726 }
2732 2727
2733 2728 void
2734 2729 dca_undrain(dca_t *dca)
2735 2730 {
2736 2731 int mcr;
2737 2732
2738 2733 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2739 2734 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2740 2735 mutex_enter(&wlp->dwl_lock);
2741 2736 wlp->dwl_drain = 0;
2742 2737 dca_schedule(dca, mcr);
2743 2738 mutex_exit(&wlp->dwl_lock);
2744 2739 }
2745 2740 }
2746 2741
2747 2742 /*
2748 2743 * Duplicate the crypto_data_t structure, but point to the original
2749 2744 * buffers.
2750 2745 */
2751 2746 int
2752 2747 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput)
2753 2748 {
2754 2749 ninput->cd_format = input->cd_format;
2755 2750 ninput->cd_offset = input->cd_offset;
2756 2751 ninput->cd_length = input->cd_length;
2757 2752 ninput->cd_miscdata = input->cd_miscdata;
2758 2753
2759 2754 switch (input->cd_format) {
2760 2755 case CRYPTO_DATA_RAW:
2761 2756 ninput->cd_raw.iov_base = input->cd_raw.iov_base;
2762 2757 ninput->cd_raw.iov_len = input->cd_raw.iov_len;
2763 2758 break;
2764 2759
2765 2760 case CRYPTO_DATA_UIO:
2766 2761 ninput->cd_uio = input->cd_uio;
2767 2762 break;
2768 2763
2769 2764 case CRYPTO_DATA_MBLK:
2770 2765 ninput->cd_mp = input->cd_mp;
2771 2766 break;
2772 2767
2773 2768 default:
2774 2769 DBG(NULL, DWARN,
2775 2770 "dca_dupcrypto: unrecognised crypto data format");
2776 2771 return (CRYPTO_FAILED);
2777 2772 }
2778 2773
2779 2774 return (CRYPTO_SUCCESS);
2780 2775 }
2781 2776
2782 2777 /*
2783 2778 * Performs validation checks on the input and output data structures.
2784 2779 */
2785 2780 int
2786 2781 dca_verifyio(crypto_data_t *input, crypto_data_t *output)
2787 2782 {
2788 2783 int rv = CRYPTO_SUCCESS;
2789 2784
2790 2785 switch (input->cd_format) {
2791 2786 case CRYPTO_DATA_RAW:
2792 2787 break;
2793 2788
2794 2789 case CRYPTO_DATA_UIO:
2795 2790 /* we support only kernel buffer */
2796 2791 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) {
2797 2792 DBG(NULL, DWARN, "non kernel input uio buffer");
2798 2793 rv = CRYPTO_ARGUMENTS_BAD;
2799 2794 }
2800 2795 break;
2801 2796
2802 2797 case CRYPTO_DATA_MBLK:
2803 2798 break;
2804 2799
2805 2800 default:
2806 2801 DBG(NULL, DWARN, "unrecognised input crypto data format");
2807 2802 rv = CRYPTO_ARGUMENTS_BAD;
2808 2803 }
2809 2804
2810 2805 switch (output->cd_format) {
2811 2806 case CRYPTO_DATA_RAW:
2812 2807 break;
2813 2808
2814 2809 case CRYPTO_DATA_UIO:
2815 2810 /* we support only kernel buffer */
2816 2811 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) {
2817 2812 DBG(NULL, DWARN, "non kernel output uio buffer");
2818 2813 rv = CRYPTO_ARGUMENTS_BAD;
2819 2814 }
2820 2815 break;
2821 2816
2822 2817 case CRYPTO_DATA_MBLK:
2823 2818 break;
2824 2819
2825 2820 default:
2826 2821 DBG(NULL, DWARN, "unrecognised output crypto data format");
2827 2822 rv = CRYPTO_ARGUMENTS_BAD;
2828 2823 }
2829 2824
2830 2825 return (rv);
2831 2826 }
2832 2827
2833 2828 /*
2834 2829 * data: source crypto_data_t struct
2835 2830 * off: offset into the source before commencing copy
2836 2831 * count: the amount of data to copy
2837 2832 * dest: destination buffer
2838 2833 */
2839 2834 int
2840 2835 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest)
2841 2836 {
2842 2837 int rv = CRYPTO_SUCCESS;
2843 2838 uio_t *uiop;
2844 2839 uint_t vec_idx;
2845 2840 size_t cur_len;
2846 2841 mblk_t *mp;
2847 2842
2848 2843 if (count == 0) {
2849 2844 /* We don't want anything so we're done. */
2850 2845 return (rv);
2851 2846 }
2852 2847
2853 2848 /*
2854 2849 * Sanity check that we haven't specified a length greater than the
2855 2850 * offset adjusted size of the buffer.
2856 2851 */
2857 2852 if (count > (data->cd_length - off)) {
2858 2853 return (CRYPTO_DATA_LEN_RANGE);
2859 2854 }
2860 2855
2861 2856 /* Add the internal crypto_data offset to the requested offset. */
2862 2857 off += data->cd_offset;
2863 2858
2864 2859 switch (data->cd_format) {
2865 2860 case CRYPTO_DATA_RAW:
2866 2861 bcopy(data->cd_raw.iov_base + off, dest, count);
2867 2862 break;
2868 2863
2869 2864 case CRYPTO_DATA_UIO:
2870 2865 /*
2871 2866 * Jump to the first iovec containing data to be
2872 2867 * processed.
2873 2868 */
2874 2869 uiop = data->cd_uio;
2875 2870 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
2876 2871 off >= uiop->uio_iov[vec_idx].iov_len;
2877 2872 off -= uiop->uio_iov[vec_idx++].iov_len)
2878 2873 ;
2879 2874 if (vec_idx == uiop->uio_iovcnt) {
2880 2875 /*
2881 2876 * The caller specified an offset that is larger than
2882 2877 * the total size of the buffers it provided.
2883 2878 */
2884 2879 return (CRYPTO_DATA_LEN_RANGE);
2885 2880 }
2886 2881
2887 2882 /*
2888 2883 * Now process the iovecs.
2889 2884 */
2890 2885 while (vec_idx < uiop->uio_iovcnt && count > 0) {
2891 2886 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
2892 2887 off, count);
2893 2888 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
2894 2889 cur_len);
2895 2890 count -= cur_len;
2896 2891 dest += cur_len;
2897 2892 vec_idx++;
2898 2893 off = 0;
2899 2894 }
2900 2895
2901 2896 if (vec_idx == uiop->uio_iovcnt && count > 0) {
2902 2897 /*
2903 2898 * The end of the specified iovec's was reached but
2904 2899 * the length requested could not be processed
2905 2900 * (requested to digest more data than it provided).
2906 2901 */
2907 2902 return (CRYPTO_DATA_LEN_RANGE);
2908 2903 }
2909 2904 break;
2910 2905
2911 2906 case CRYPTO_DATA_MBLK:
2912 2907 /*
2913 2908 * Jump to the first mblk_t containing data to be processed.
2914 2909 */
2915 2910 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp);
2916 2911 off -= MBLKL(mp), mp = mp->b_cont)
2917 2912 ;
2918 2913 if (mp == NULL) {
2919 2914 /*
2920 2915 * The caller specified an offset that is larger than
2921 2916 * the total size of the buffers it provided.
2922 2917 */
2923 2918 return (CRYPTO_DATA_LEN_RANGE);
2924 2919 }
2925 2920
2926 2921 /*
2927 2922 * Now do the processing on the mblk chain.
2928 2923 */
2929 2924 while (mp != NULL && count > 0) {
2930 2925 cur_len = min(MBLKL(mp) - off, count);
2931 2926 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
2932 2927 count -= cur_len;
2933 2928 dest += cur_len;
2934 2929 mp = mp->b_cont;
2935 2930 off = 0;
2936 2931 }
2937 2932
2938 2933 if (mp == NULL && count > 0) {
2939 2934 /*
2940 2935 * The end of the mblk was reached but the length
2941 2936 * requested could not be processed, (requested to
2942 2937 * digest more data than it provided).
2943 2938 */
2944 2939 return (CRYPTO_DATA_LEN_RANGE);
2945 2940 }
2946 2941 break;
2947 2942
2948 2943 default:
2949 2944 DBG(NULL, DWARN, "unrecognised crypto data format");
2950 2945 rv = CRYPTO_ARGUMENTS_BAD;
2951 2946 }
2952 2947 return (rv);
2953 2948 }
2954 2949
2955 2950
2956 2951 /*
2957 2952 * Performs the input, output or hard scatter/gather checks on the specified
2958 2953 * crypto_data_t struct. Returns true if the data is scatter/gather in nature
2959 2954 * ie fails the test.
2960 2955 */
2961 2956 int
2962 2957 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val)
2963 2958 {
2964 2959 uio_t *uiop;
2965 2960 mblk_t *mp;
2966 2961 int rv = FALSE;
2967 2962
2968 2963 switch (val) {
2969 2964 case DCA_SG_CONTIG:
2970 2965 /*
2971 2966 * Check for a contiguous data buffer.
2972 2967 */
2973 2968 switch (data->cd_format) {
2974 2969 case CRYPTO_DATA_RAW:
2975 2970 /* Contiguous in nature */
2976 2971 break;
2977 2972
2978 2973 case CRYPTO_DATA_UIO:
2979 2974 if (data->cd_uio->uio_iovcnt > 1)
2980 2975 rv = TRUE;
2981 2976 break;
2982 2977
2983 2978 case CRYPTO_DATA_MBLK:
2984 2979 mp = data->cd_mp;
2985 2980 if (mp->b_cont != NULL)
2986 2981 rv = TRUE;
2987 2982 break;
2988 2983
2989 2984 default:
2990 2985 DBG(NULL, DWARN, "unrecognised crypto data format");
2991 2986 }
2992 2987 break;
2993 2988
2994 2989 case DCA_SG_WALIGN:
2995 2990 /*
2996 2991 * Check for a contiguous data buffer that is 32-bit word
2997 2992 * aligned and is of word multiples in size.
2998 2993 */
2999 2994 switch (data->cd_format) {
3000 2995 case CRYPTO_DATA_RAW:
3001 2996 if ((data->cd_raw.iov_len % sizeof (uint32_t)) ||
3002 2997 ((uintptr_t)data->cd_raw.iov_base %
3003 2998 sizeof (uint32_t))) {
3004 2999 rv = TRUE;
3005 3000 }
3006 3001 break;
3007 3002
3008 3003 case CRYPTO_DATA_UIO:
3009 3004 uiop = data->cd_uio;
3010 3005 if (uiop->uio_iovcnt > 1) {
3011 3006 return (TRUE);
3012 3007 }
3013 3008 /* So there is only one iovec */
3014 3009 if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) ||
3015 3010 ((uintptr_t)uiop->uio_iov[0].iov_base %
3016 3011 sizeof (uint32_t))) {
3017 3012 rv = TRUE;
3018 3013 }
3019 3014 break;
3020 3015
3021 3016 case CRYPTO_DATA_MBLK:
3022 3017 mp = data->cd_mp;
3023 3018 if (mp->b_cont != NULL) {
3024 3019 return (TRUE);
3025 3020 }
3026 3021 /* So there is only one mblk in the chain */
3027 3022 if ((MBLKL(mp) % sizeof (uint32_t)) ||
3028 3023 ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) {
3029 3024 rv = TRUE;
3030 3025 }
3031 3026 break;
3032 3027
3033 3028 default:
3034 3029 DBG(NULL, DWARN, "unrecognised crypto data format");
3035 3030 }
3036 3031 break;
3037 3032
3038 3033 case DCA_SG_PALIGN:
3039 3034 /*
3040 3035 * Check that the data buffer is page aligned and is of
3041 3036 * page multiples in size.
3042 3037 */
3043 3038 switch (data->cd_format) {
3044 3039 case CRYPTO_DATA_RAW:
3045 3040 if ((data->cd_length % dca->dca_pagesize) ||
3046 3041 ((uintptr_t)data->cd_raw.iov_base %
3047 3042 dca->dca_pagesize)) {
3048 3043 rv = TRUE;
3049 3044 }
3050 3045 break;
3051 3046
3052 3047 case CRYPTO_DATA_UIO:
3053 3048 uiop = data->cd_uio;
3054 3049 if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) ||
3055 3050 ((uintptr_t)uiop->uio_iov[0].iov_base %
3056 3051 dca->dca_pagesize)) {
3057 3052 rv = TRUE;
3058 3053 }
3059 3054 break;
3060 3055
3061 3056 case CRYPTO_DATA_MBLK:
3062 3057 mp = data->cd_mp;
3063 3058 if ((MBLKL(mp) % dca->dca_pagesize) ||
3064 3059 ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) {
3065 3060 rv = TRUE;
3066 3061 }
3067 3062 break;
3068 3063
3069 3064 default:
3070 3065 DBG(NULL, DWARN, "unrecognised crypto data format");
3071 3066 }
3072 3067 break;
3073 3068
3074 3069 default:
3075 3070 DBG(NULL, DWARN, "unrecognised scatter/gather param type");
3076 3071 }
3077 3072
3078 3073 return (rv);
3079 3074 }
3080 3075
3081 3076 /*
3082 3077 * Increments the cd_offset and decrements the cd_length as the data is
3083 3078 * gathered from the crypto_data_t struct.
3084 3079 * The data is reverse-copied into the dest buffer if the flag is true.
3085 3080 */
3086 3081 int
3087 3082 dca_gather(crypto_data_t *in, char *dest, int count, int reverse)
3088 3083 {
3089 3084 int rv = CRYPTO_SUCCESS;
3090 3085 uint_t vec_idx;
3091 3086 uio_t *uiop;
3092 3087 off_t off = in->cd_offset;
3093 3088 size_t cur_len;
3094 3089 mblk_t *mp;
3095 3090
3096 3091 switch (in->cd_format) {
3097 3092 case CRYPTO_DATA_RAW:
3098 3093 if (count > in->cd_length) {
3099 3094 /*
3100 3095 * The caller specified a length greater than the
3101 3096 * size of the buffer.
3102 3097 */
3103 3098 return (CRYPTO_DATA_LEN_RANGE);
3104 3099 }
3105 3100 if (reverse)
3106 3101 dca_reverse(in->cd_raw.iov_base + off, dest, count,
3107 3102 count);
3108 3103 else
3109 3104 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3110 3105 in->cd_offset += count;
3111 3106 in->cd_length -= count;
3112 3107 break;
3113 3108
3114 3109 case CRYPTO_DATA_UIO:
3115 3110 /*
3116 3111 * Jump to the first iovec containing data to be processed.
3117 3112 */
3118 3113 uiop = in->cd_uio;
3119 3114 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3120 3115 off >= uiop->uio_iov[vec_idx].iov_len;
3121 3116 off -= uiop->uio_iov[vec_idx++].iov_len)
3122 3117 ;
3123 3118 if (vec_idx == uiop->uio_iovcnt) {
3124 3119 /*
3125 3120 * The caller specified an offset that is larger than
3126 3121 * the total size of the buffers it provided.
3127 3122 */
3128 3123 return (CRYPTO_DATA_LEN_RANGE);
3129 3124 }
3130 3125
3131 3126 /*
3132 3127 * Now process the iovecs.
3133 3128 */
3134 3129 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3135 3130 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3136 3131 off, count);
3137 3132 count -= cur_len;
3138 3133 if (reverse) {
3139 3134 /* Fill the dest buffer from the end */
3140 3135 dca_reverse(uiop->uio_iov[vec_idx].iov_base +
3141 3136 off, dest+count, cur_len, cur_len);
3142 3137 } else {
3143 3138 bcopy(uiop->uio_iov[vec_idx].iov_base + off,
3144 3139 dest, cur_len);
3145 3140 dest += cur_len;
3146 3141 }
3147 3142 in->cd_offset += cur_len;
3148 3143 in->cd_length -= cur_len;
3149 3144 vec_idx++;
3150 3145 off = 0;
3151 3146 }
3152 3147
3153 3148 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3154 3149 /*
3155 3150 * The end of the specified iovec's was reached but
3156 3151 * the length requested could not be processed
3157 3152 * (requested to digest more data than it provided).
3158 3153 */
3159 3154 return (CRYPTO_DATA_LEN_RANGE);
3160 3155 }
3161 3156 break;
3162 3157
3163 3158 case CRYPTO_DATA_MBLK:
3164 3159 /*
3165 3160 * Jump to the first mblk_t containing data to be processed.
3166 3161 */
3167 3162 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3168 3163 off -= MBLKL(mp), mp = mp->b_cont)
3169 3164 ;
3170 3165 if (mp == NULL) {
3171 3166 /*
3172 3167 * The caller specified an offset that is larger than
3173 3168 * the total size of the buffers it provided.
3174 3169 */
3175 3170 return (CRYPTO_DATA_LEN_RANGE);
3176 3171 }
3177 3172
3178 3173 /*
3179 3174 * Now do the processing on the mblk chain.
3180 3175 */
3181 3176 while (mp != NULL && count > 0) {
3182 3177 cur_len = min(MBLKL(mp) - off, count);
3183 3178 count -= cur_len;
3184 3179 if (reverse) {
3185 3180 /* Fill the dest buffer from the end */
3186 3181 dca_reverse((char *)(mp->b_rptr + off),
3187 3182 dest+count, cur_len, cur_len);
3188 3183 } else {
3189 3184 bcopy((char *)(mp->b_rptr + off), dest,
3190 3185 cur_len);
3191 3186 dest += cur_len;
3192 3187 }
3193 3188 in->cd_offset += cur_len;
3194 3189 in->cd_length -= cur_len;
3195 3190 mp = mp->b_cont;
3196 3191 off = 0;
3197 3192 }
3198 3193
3199 3194 if (mp == NULL && count > 0) {
3200 3195 /*
3201 3196 * The end of the mblk was reached but the length
3202 3197 * requested could not be processed, (requested to
3203 3198 * digest more data than it provided).
3204 3199 */
3205 3200 return (CRYPTO_DATA_LEN_RANGE);
3206 3201 }
3207 3202 break;
3208 3203
3209 3204 default:
3210 3205 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format");
3211 3206 rv = CRYPTO_ARGUMENTS_BAD;
3212 3207 }
3213 3208 return (rv);
3214 3209 }
3215 3210
3216 3211 /*
3217 3212 * Increments the cd_offset and decrements the cd_length as the data is
3218 3213 * gathered from the crypto_data_t struct.
3219 3214 */
3220 3215 int
3221 3216 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest,
3222 3217 int count)
3223 3218 {
3224 3219 int rv = CRYPTO_SUCCESS;
3225 3220 caddr_t baddr;
3226 3221 uint_t vec_idx;
3227 3222 uio_t *uiop;
3228 3223 off_t off = in->cd_offset;
3229 3224 size_t cur_len;
3230 3225 mblk_t *mp;
3231 3226
3232 3227 /* Process the residual first */
3233 3228 if (*residlen > 0) {
3234 3229 uint_t num = min(count, *residlen);
3235 3230 bcopy(resid, dest, num);
3236 3231 *residlen -= num;
3237 3232 if (*residlen > 0) {
3238 3233 /*
3239 3234 * Requested amount 'count' is less than what's in
3240 3235 * the residual, so shuffle any remaining resid to
3241 3236 * the front.
3242 3237 */
3243 3238 baddr = resid + num;
3244 3239 bcopy(baddr, resid, *residlen);
3245 3240 }
3246 3241 dest += num;
3247 3242 count -= num;
3248 3243 }
3249 3244
3250 3245 /* Now process what's in the crypto_data_t structs */
3251 3246 switch (in->cd_format) {
3252 3247 case CRYPTO_DATA_RAW:
3253 3248 if (count > in->cd_length) {
3254 3249 /*
3255 3250 * The caller specified a length greater than the
3256 3251 * size of the buffer.
3257 3252 */
3258 3253 return (CRYPTO_DATA_LEN_RANGE);
3259 3254 }
3260 3255 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3261 3256 in->cd_offset += count;
3262 3257 in->cd_length -= count;
3263 3258 break;
3264 3259
3265 3260 case CRYPTO_DATA_UIO:
3266 3261 /*
3267 3262 * Jump to the first iovec containing data to be processed.
3268 3263 */
3269 3264 uiop = in->cd_uio;
3270 3265 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3271 3266 off >= uiop->uio_iov[vec_idx].iov_len;
3272 3267 off -= uiop->uio_iov[vec_idx++].iov_len)
3273 3268 ;
3274 3269 if (vec_idx == uiop->uio_iovcnt) {
3275 3270 /*
3276 3271 * The caller specified an offset that is larger than
3277 3272 * the total size of the buffers it provided.
3278 3273 */
3279 3274 return (CRYPTO_DATA_LEN_RANGE);
3280 3275 }
3281 3276
3282 3277 /*
3283 3278 * Now process the iovecs.
3284 3279 */
3285 3280 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3286 3281 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3287 3282 off, count);
3288 3283 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
3289 3284 cur_len);
3290 3285 count -= cur_len;
3291 3286 dest += cur_len;
3292 3287 in->cd_offset += cur_len;
3293 3288 in->cd_length -= cur_len;
3294 3289 vec_idx++;
3295 3290 off = 0;
3296 3291 }
3297 3292
3298 3293 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3299 3294 /*
3300 3295 * The end of the specified iovec's was reached but
3301 3296 * the length requested could not be processed
3302 3297 * (requested to digest more data than it provided).
3303 3298 */
3304 3299 return (CRYPTO_DATA_LEN_RANGE);
3305 3300 }
3306 3301 break;
3307 3302
3308 3303 case CRYPTO_DATA_MBLK:
3309 3304 /*
3310 3305 * Jump to the first mblk_t containing data to be processed.
3311 3306 */
3312 3307 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3313 3308 off -= MBLKL(mp), mp = mp->b_cont)
3314 3309 ;
3315 3310 if (mp == NULL) {
3316 3311 /*
3317 3312 * The caller specified an offset that is larger than
3318 3313 * the total size of the buffers it provided.
3319 3314 */
3320 3315 return (CRYPTO_DATA_LEN_RANGE);
3321 3316 }
3322 3317
3323 3318 /*
3324 3319 * Now do the processing on the mblk chain.
3325 3320 */
3326 3321 while (mp != NULL && count > 0) {
3327 3322 cur_len = min(MBLKL(mp) - off, count);
3328 3323 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
3329 3324 count -= cur_len;
3330 3325 dest += cur_len;
3331 3326 in->cd_offset += cur_len;
3332 3327 in->cd_length -= cur_len;
3333 3328 mp = mp->b_cont;
3334 3329 off = 0;
3335 3330 }
3336 3331
3337 3332 if (mp == NULL && count > 0) {
3338 3333 /*
3339 3334 * The end of the mblk was reached but the length
3340 3335 * requested could not be processed, (requested to
3341 3336 * digest more data than it provided).
3342 3337 */
3343 3338 return (CRYPTO_DATA_LEN_RANGE);
3344 3339 }
3345 3340 break;
3346 3341
3347 3342 default:
3348 3343 DBG(NULL, DWARN,
3349 3344 "dca_resid_gather: unrecognised crypto data format");
3350 3345 rv = CRYPTO_ARGUMENTS_BAD;
3351 3346 }
3352 3347 return (rv);
3353 3348 }
3354 3349
3355 3350 /*
3356 3351 * Appends the data to the crypto_data_t struct increasing cd_length.
3357 3352 * cd_offset is left unchanged.
3358 3353 * Data is reverse-copied if the flag is TRUE.
3359 3354 */
3360 3355 int
3361 3356 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse)
3362 3357 {
3363 3358 int rv = CRYPTO_SUCCESS;
3364 3359 off_t offset = out->cd_offset + out->cd_length;
3365 3360 uint_t vec_idx;
3366 3361 uio_t *uiop;
3367 3362 size_t cur_len;
3368 3363 mblk_t *mp;
3369 3364
3370 3365 switch (out->cd_format) {
3371 3366 case CRYPTO_DATA_RAW:
3372 3367 if (out->cd_raw.iov_len - offset < count) {
3373 3368 /* Trying to write out more than space available. */
3374 3369 return (CRYPTO_DATA_LEN_RANGE);
3375 3370 }
3376 3371 if (reverse)
3377 3372 dca_reverse((void*) src, out->cd_raw.iov_base + offset,
3378 3373 count, count);
3379 3374 else
3380 3375 bcopy(src, out->cd_raw.iov_base + offset, count);
3381 3376 out->cd_length += count;
3382 3377 break;
3383 3378
3384 3379 case CRYPTO_DATA_UIO:
3385 3380 /*
3386 3381 * Jump to the first iovec that can be written to.
3387 3382 */
3388 3383 uiop = out->cd_uio;
3389 3384 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3390 3385 offset >= uiop->uio_iov[vec_idx].iov_len;
3391 3386 offset -= uiop->uio_iov[vec_idx++].iov_len)
3392 3387 ;
3393 3388 if (vec_idx == uiop->uio_iovcnt) {
3394 3389 /*
3395 3390 * The caller specified an offset that is larger than
3396 3391 * the total size of the buffers it provided.
3397 3392 */
3398 3393 return (CRYPTO_DATA_LEN_RANGE);
3399 3394 }
3400 3395
3401 3396 /*
3402 3397 * Now process the iovecs.
3403 3398 */
3404 3399 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3405 3400 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3406 3401 offset, count);
3407 3402 count -= cur_len;
3408 3403 if (reverse) {
3409 3404 dca_reverse((void*) (src+count),
3410 3405 uiop->uio_iov[vec_idx].iov_base +
3411 3406 offset, cur_len, cur_len);
3412 3407 } else {
3413 3408 bcopy(src, uiop->uio_iov[vec_idx].iov_base +
3414 3409 offset, cur_len);
3415 3410 src += cur_len;
3416 3411 }
3417 3412 out->cd_length += cur_len;
3418 3413 vec_idx++;
3419 3414 offset = 0;
3420 3415 }
3421 3416
3422 3417 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3423 3418 /*
3424 3419 * The end of the specified iovec's was reached but
3425 3420 * the length requested could not be processed
3426 3421 * (requested to write more data than space provided).
3427 3422 */
3428 3423 return (CRYPTO_DATA_LEN_RANGE);
3429 3424 }
3430 3425 break;
3431 3426
3432 3427 case CRYPTO_DATA_MBLK:
3433 3428 /*
3434 3429 * Jump to the first mblk_t that can be written to.
3435 3430 */
3436 3431 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp);
3437 3432 offset -= MBLKL(mp), mp = mp->b_cont)
3438 3433 ;
3439 3434 if (mp == NULL) {
3440 3435 /*
3441 3436 * The caller specified an offset that is larger than
3442 3437 * the total size of the buffers it provided.
3443 3438 */
3444 3439 return (CRYPTO_DATA_LEN_RANGE);
3445 3440 }
3446 3441
3447 3442 /*
3448 3443 * Now do the processing on the mblk chain.
3449 3444 */
3450 3445 while (mp != NULL && count > 0) {
3451 3446 cur_len = min(MBLKL(mp) - offset, count);
3452 3447 count -= cur_len;
3453 3448 if (reverse) {
3454 3449 dca_reverse((void*) (src+count),
3455 3450 (char *)(mp->b_rptr + offset), cur_len,
3456 3451 cur_len);
3457 3452 } else {
3458 3453 bcopy(src, (char *)(mp->b_rptr + offset),
3459 3454 cur_len);
3460 3455 src += cur_len;
3461 3456 }
3462 3457 out->cd_length += cur_len;
3463 3458 mp = mp->b_cont;
3464 3459 offset = 0;
3465 3460 }
3466 3461
3467 3462 if (mp == NULL && count > 0) {
3468 3463 /*
3469 3464 * The end of the mblk was reached but the length
3470 3465 * requested could not be processed, (requested to
3471 3466 * digest more data than it provided).
3472 3467 */
3473 3468 return (CRYPTO_DATA_LEN_RANGE);
3474 3469 }
3475 3470 break;
3476 3471
3477 3472 default:
3478 3473 DBG(NULL, DWARN, "unrecognised crypto data format");
3479 3474 rv = CRYPTO_ARGUMENTS_BAD;
3480 3475 }
3481 3476 return (rv);
3482 3477 }
3483 3478
3484 3479 /*
3485 3480 * Compare two byte arrays in reverse order.
3486 3481 * Return 0 if they are identical, 1 otherwise.
3487 3482 */
3488 3483 int
3489 3484 dca_bcmp_reverse(const void *s1, const void *s2, size_t n)
3490 3485 {
3491 3486 int i;
3492 3487 caddr_t src, dst;
3493 3488
3494 3489 if (!n)
3495 3490 return (0);
3496 3491
3497 3492 src = ((caddr_t)s1) + n - 1;
3498 3493 dst = (caddr_t)s2;
3499 3494 for (i = 0; i < n; i++) {
3500 3495 if (*src != *dst)
3501 3496 return (1);
3502 3497 src--;
3503 3498 dst++;
3504 3499 }
3505 3500
3506 3501 return (0);
3507 3502 }
3508 3503
3509 3504
3510 3505 /*
3511 3506 * This calculates the size of a bignum in bits, specifically not counting
3512 3507 * leading zero bits. This size calculation must be done *before* any
3513 3508 * endian reversal takes place (i.e. the numbers are in absolute big-endian
3514 3509 * order.)
3515 3510 */
3516 3511 int
3517 3512 dca_bitlen(unsigned char *bignum, int bytelen)
3518 3513 {
3519 3514 unsigned char msbyte;
3520 3515 int i, j;
3521 3516
3522 3517 for (i = 0; i < bytelen - 1; i++) {
3523 3518 if (bignum[i] != 0) {
3524 3519 break;
3525 3520 }
3526 3521 }
3527 3522 msbyte = bignum[i];
3528 3523 for (j = 8; j > 1; j--) {
3529 3524 if (msbyte & 0x80) {
3530 3525 break;
3531 3526 }
3532 3527 msbyte <<= 1;
3533 3528 }
3534 3529 return ((8 * (bytelen - i - 1)) + j);
3535 3530 }
3536 3531
3537 3532 /*
3538 3533 * This compares to bignums (in big-endian order). It ignores leading
3539 3534 * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc.
3540 3535 */
3541 3536 int
3542 3537 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len)
3543 3538 {
3544 3539 while ((n1len > 1) && (*n1 == 0)) {
3545 3540 n1len--;
3546 3541 n1++;
3547 3542 }
3548 3543 while ((n2len > 1) && (*n2 == 0)) {
3549 3544 n2len--;
3550 3545 n2++;
3551 3546 }
3552 3547 if (n1len != n2len) {
3553 3548 return (n1len - n2len);
3554 3549 }
3555 3550 while ((n1len > 1) && (*n1 == *n2)) {
3556 3551 n1++;
3557 3552 n2++;
3558 3553 n1len--;
3559 3554 }
3560 3555 return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2));
3561 3556 }
3562 3557
3563 3558 /*
3564 3559 * Return array of key attributes.
3565 3560 */
3566 3561 crypto_object_attribute_t *
3567 3562 dca_get_key_attr(crypto_key_t *key)
3568 3563 {
3569 3564 if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) ||
3570 3565 (key->ck_count == 0)) {
3571 3566 return (NULL);
3572 3567 }
3573 3568
3574 3569 return (key->ck_attrs);
3575 3570 }
3576 3571
3577 3572 /*
3578 3573 * If attribute type exists valp points to it's 32-bit value.
3579 3574 */
3580 3575 int
3581 3576 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum,
3582 3577 uint64_t atype, uint32_t *valp)
3583 3578 {
3584 3579 crypto_object_attribute_t *bap;
3585 3580
3586 3581 bap = dca_find_attribute(attrp, atnum, atype);
3587 3582 if (bap == NULL) {
3588 3583 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3589 3584 }
3590 3585
3591 3586 *valp = *bap->oa_value;
3592 3587
3593 3588 return (CRYPTO_SUCCESS);
3594 3589 }
3595 3590
3596 3591 /*
3597 3592 * If attribute type exists data contains the start address of the value,
3598 3593 * and numelems contains it's length.
3599 3594 */
3600 3595 int
3601 3596 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum,
3602 3597 uint64_t atype, void **data, unsigned int *numelems)
3603 3598 {
3604 3599 crypto_object_attribute_t *bap;
3605 3600
3606 3601 bap = dca_find_attribute(attrp, atnum, atype);
3607 3602 if (bap == NULL) {
3608 3603 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3609 3604 }
3610 3605
3611 3606 *data = bap->oa_value;
3612 3607 *numelems = bap->oa_value_len;
3613 3608
3614 3609 return (CRYPTO_SUCCESS);
3615 3610 }
3616 3611
3617 3612 /*
3618 3613 * Finds entry of specified name. If it is not found dca_find_attribute returns
3619 3614 * NULL.
3620 3615 */
3621 3616 crypto_object_attribute_t *
3622 3617 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum,
3623 3618 uint64_t atype)
3624 3619 {
3625 3620 while (atnum) {
3626 3621 if (attrp->oa_type == atype)
3627 3622 return (attrp);
3628 3623 atnum--;
3629 3624 attrp++;
3630 3625 }
3631 3626 return (NULL);
3632 3627 }
3633 3628
3634 3629 /*
3635 3630 * Return the address of the first data buffer. If the data format is
3636 3631 * unrecognised return NULL.
3637 3632 */
3638 3633 caddr_t
3639 3634 dca_bufdaddr(crypto_data_t *data)
3640 3635 {
3641 3636 switch (data->cd_format) {
3642 3637 case CRYPTO_DATA_RAW:
3643 3638 return (data->cd_raw.iov_base + data->cd_offset);
3644 3639 case CRYPTO_DATA_UIO:
3645 3640 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset);
3646 3641 case CRYPTO_DATA_MBLK:
3647 3642 return ((char *)data->cd_mp->b_rptr + data->cd_offset);
3648 3643 default:
3649 3644 DBG(NULL, DWARN,
3650 3645 "dca_bufdaddr: unrecognised crypto data format");
3651 3646 return (NULL);
3652 3647 }
3653 3648 }
3654 3649
3655 3650 static caddr_t
3656 3651 dca_bufdaddr_out(crypto_data_t *data)
3657 3652 {
3658 3653 size_t offset = data->cd_offset + data->cd_length;
3659 3654
3660 3655 switch (data->cd_format) {
3661 3656 case CRYPTO_DATA_RAW:
3662 3657 return (data->cd_raw.iov_base + offset);
3663 3658 case CRYPTO_DATA_UIO:
3664 3659 return (data->cd_uio->uio_iov[0].iov_base + offset);
3665 3660 case CRYPTO_DATA_MBLK:
3666 3661 return ((char *)data->cd_mp->b_rptr + offset);
3667 3662 default:
3668 3663 DBG(NULL, DWARN,
3669 3664 "dca_bufdaddr_out: unrecognised crypto data format");
3670 3665 return (NULL);
3671 3666 }
3672 3667 }
3673 3668
3674 3669 /*
3675 3670 * Control entry points.
3676 3671 */
3677 3672
3678 3673 /* ARGSUSED */
3679 3674 static void
3680 3675 dca_provider_status(crypto_provider_handle_t provider, uint_t *status)
3681 3676 {
3682 3677 *status = CRYPTO_PROVIDER_READY;
3683 3678 }
3684 3679
3685 3680 /*
3686 3681 * Cipher (encrypt/decrypt) entry points.
↓ open down ↓ |
3254 lines elided |
↑ open up ↑ |
3687 3682 */
3688 3683
3689 3684 /* ARGSUSED */
3690 3685 static int
3691 3686 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3692 3687 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3693 3688 crypto_req_handle_t req)
3694 3689 {
3695 3690 int error = CRYPTO_FAILED;
3696 3691 dca_t *softc;
3697 - /* LINTED E_FUNC_SET_NOT_USED */
3698 - int instance;
3699 3692
3700 - /* extract softc and instance number from context */
3701 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3693 + softc = DCA_SOFTC_FROM_CTX(ctx);
3702 3694 DBG(softc, DENTRY, "dca_encrypt_init: started");
3703 3695
3704 3696 /* check mechanism */
3705 3697 switch (mechanism->cm_type) {
3706 3698 case DES_CBC_MECH_INFO_TYPE:
3707 3699 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3708 3700 DR_ENCRYPT);
3709 3701 break;
3710 3702 case DES3_CBC_MECH_INFO_TYPE:
3711 3703 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3712 3704 DR_ENCRYPT | DR_TRIPLE);
3713 3705 break;
3714 3706 case RSA_PKCS_MECH_INFO_TYPE:
3715 3707 case RSA_X_509_MECH_INFO_TYPE:
3716 3708 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3717 3709 break;
3718 3710 default:
3719 3711 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type "
3720 3712 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3721 3713 error = CRYPTO_MECHANISM_INVALID;
3722 3714 }
3723 3715
3724 3716 DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error);
3725 3717
3726 3718 if (error == CRYPTO_SUCCESS)
3727 3719 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3728 3720 &softc->dca_ctx_list_lock);
3729 3721
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
3730 3722 return (error);
3731 3723 }
3732 3724
3733 3725 /* ARGSUSED */
3734 3726 static int
3735 3727 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3736 3728 crypto_data_t *ciphertext, crypto_req_handle_t req)
3737 3729 {
3738 3730 int error = CRYPTO_FAILED;
3739 3731 dca_t *softc;
3740 - /* LINTED E_FUNC_SET_NOT_USED */
3741 - int instance;
3742 3732
3743 3733 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3744 3734 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3745 3735
3746 - /* extract softc and instance number from context */
3747 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3736 + softc = DCA_SOFTC_FROM_CTX(ctx);
3748 3737 DBG(softc, DENTRY, "dca_encrypt: started");
3749 3738
3750 3739 /* handle inplace ops */
3751 3740 if (!ciphertext) {
3752 3741 dca_request_t *reqp = ctx->cc_provider_private;
3753 3742 reqp->dr_flags |= DR_INPLACE;
3754 3743 ciphertext = plaintext;
3755 3744 }
3756 3745
3757 3746 /* check mechanism */
3758 3747 switch (DCA_MECH_FROM_CTX(ctx)) {
3759 3748 case DES_CBC_MECH_INFO_TYPE:
3760 3749 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT);
3761 3750 break;
3762 3751 case DES3_CBC_MECH_INFO_TYPE:
3763 3752 error = dca_3des(ctx, plaintext, ciphertext, req,
3764 3753 DR_ENCRYPT | DR_TRIPLE);
3765 3754 break;
3766 3755 case RSA_PKCS_MECH_INFO_TYPE:
3767 3756 case RSA_X_509_MECH_INFO_TYPE:
3768 3757 error = dca_rsastart(ctx, plaintext, ciphertext, req,
3769 3758 DCA_RSA_ENC);
3770 3759 break;
3771 3760 default:
3772 3761 /* Should never reach here */
3773 3762 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type "
3774 3763 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3775 3764 error = CRYPTO_MECHANISM_INVALID;
3776 3765 }
3777 3766
3778 3767 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3779 3768 (error != CRYPTO_BUFFER_TOO_SMALL)) {
3780 3769 ciphertext->cd_length = 0;
3781 3770 }
3782 3771
3783 3772 DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error);
3784 3773
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
3785 3774 return (error);
3786 3775 }
3787 3776
3788 3777 /* ARGSUSED */
3789 3778 static int
3790 3779 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3791 3780 crypto_data_t *ciphertext, crypto_req_handle_t req)
3792 3781 {
3793 3782 int error = CRYPTO_FAILED;
3794 3783 dca_t *softc;
3795 - /* LINTED E_FUNC_SET_NOT_USED */
3796 - int instance;
3797 3784
3798 3785 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3799 3786 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3800 3787
3801 - /* extract softc and instance number from context */
3802 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3788 + softc = DCA_SOFTC_FROM_CTX(ctx);
3803 3789 DBG(softc, DENTRY, "dca_encrypt_update: started");
3804 3790
3805 3791 /* handle inplace ops */
3806 3792 if (!ciphertext) {
3807 3793 dca_request_t *reqp = ctx->cc_provider_private;
3808 3794 reqp->dr_flags |= DR_INPLACE;
3809 3795 ciphertext = plaintext;
3810 3796 }
3811 3797
3812 3798 /* check mechanism */
3813 3799 switch (DCA_MECH_FROM_CTX(ctx)) {
3814 3800 case DES_CBC_MECH_INFO_TYPE:
3815 3801 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3816 3802 DR_ENCRYPT);
3817 3803 break;
3818 3804 case DES3_CBC_MECH_INFO_TYPE:
3819 3805 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3820 3806 DR_ENCRYPT | DR_TRIPLE);
3821 3807 break;
3822 3808 default:
3823 3809 /* Should never reach here */
3824 3810 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type "
3825 3811 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3826 3812 error = CRYPTO_MECHANISM_INVALID;
3827 3813 }
3828 3814
3829 3815 DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error);
3830 3816
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
3831 3817 return (error);
3832 3818 }
3833 3819
3834 3820 /* ARGSUSED */
3835 3821 static int
3836 3822 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3837 3823 crypto_req_handle_t req)
3838 3824 {
3839 3825 int error = CRYPTO_FAILED;
3840 3826 dca_t *softc;
3841 - /* LINTED E_FUNC_SET_NOT_USED */
3842 - int instance;
3843 3827
3844 3828 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3845 3829 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3846 3830
3847 - /* extract softc and instance number from context */
3848 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3831 + softc = DCA_SOFTC_FROM_CTX(ctx);
3849 3832 DBG(softc, DENTRY, "dca_encrypt_final: started");
3850 3833
3851 3834 /* check mechanism */
3852 3835 switch (DCA_MECH_FROM_CTX(ctx)) {
3853 3836 case DES_CBC_MECH_INFO_TYPE:
3854 3837 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT);
3855 3838 break;
3856 3839 case DES3_CBC_MECH_INFO_TYPE:
3857 3840 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE);
3858 3841 break;
3859 3842 default:
3860 3843 /* Should never reach here */
3861 3844 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type "
3862 3845 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3863 3846 error = CRYPTO_MECHANISM_INVALID;
3864 3847 }
3865 3848
3866 3849 DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error);
3867 3850
3868 3851 return (error);
3869 3852 }
3870 3853
3871 3854 /* ARGSUSED */
3872 3855 static int
3873 3856 dca_encrypt_atomic(crypto_provider_handle_t provider,
3874 3857 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
3875 3858 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
3876 3859 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
3877 3860 {
3878 3861 int error = CRYPTO_FAILED;
3879 3862 dca_t *softc = (dca_t *)provider;
3880 3863
3881 3864 DBG(softc, DENTRY, "dca_encrypt_atomic: started");
3882 3865
3883 3866 if (ctx_template != NULL)
3884 3867 return (CRYPTO_ARGUMENTS_BAD);
3885 3868
3886 3869 /* handle inplace ops */
3887 3870 if (!ciphertext) {
3888 3871 ciphertext = plaintext;
3889 3872 }
3890 3873
3891 3874 /* check mechanism */
3892 3875 switch (mechanism->cm_type) {
3893 3876 case DES_CBC_MECH_INFO_TYPE:
3894 3877 error = dca_3desatomic(provider, session_id, mechanism, key,
3895 3878 plaintext, ciphertext, KM_SLEEP, req,
3896 3879 DR_ENCRYPT | DR_ATOMIC);
3897 3880 break;
3898 3881 case DES3_CBC_MECH_INFO_TYPE:
3899 3882 error = dca_3desatomic(provider, session_id, mechanism, key,
3900 3883 plaintext, ciphertext, KM_SLEEP, req,
3901 3884 DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC);
3902 3885 break;
3903 3886 case RSA_PKCS_MECH_INFO_TYPE:
3904 3887 case RSA_X_509_MECH_INFO_TYPE:
3905 3888 error = dca_rsaatomic(provider, session_id, mechanism, key,
3906 3889 plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC);
3907 3890 break;
3908 3891 default:
3909 3892 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type "
3910 3893 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3911 3894 error = CRYPTO_MECHANISM_INVALID;
3912 3895 }
3913 3896
3914 3897 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
3915 3898 ciphertext->cd_length = 0;
3916 3899 }
3917 3900
3918 3901 DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error);
3919 3902
3920 3903 return (error);
↓ open down ↓ |
62 lines elided |
↑ open up ↑ |
3921 3904 }
3922 3905
3923 3906 /* ARGSUSED */
3924 3907 static int
3925 3908 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3926 3909 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3927 3910 crypto_req_handle_t req)
3928 3911 {
3929 3912 int error = CRYPTO_FAILED;
3930 3913 dca_t *softc;
3931 - /* LINTED E_FUNC_SET_NOT_USED */
3932 - int instance;
3933 3914
3934 - /* extract softc and instance number from context */
3935 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3915 + softc = DCA_SOFTC_FROM_CTX(ctx);
3936 3916 DBG(softc, DENTRY, "dca_decrypt_init: started");
3937 3917
3938 3918 /* check mechanism */
3939 3919 switch (mechanism->cm_type) {
3940 3920 case DES_CBC_MECH_INFO_TYPE:
3941 3921 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3942 3922 DR_DECRYPT);
3943 3923 break;
3944 3924 case DES3_CBC_MECH_INFO_TYPE:
3945 3925 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3946 3926 DR_DECRYPT | DR_TRIPLE);
3947 3927 break;
3948 3928 case RSA_PKCS_MECH_INFO_TYPE:
3949 3929 case RSA_X_509_MECH_INFO_TYPE:
3950 3930 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3951 3931 break;
3952 3932 default:
3953 3933 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type "
3954 3934 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3955 3935 error = CRYPTO_MECHANISM_INVALID;
3956 3936 }
3957 3937
3958 3938 DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error);
3959 3939
3960 3940 if (error == CRYPTO_SUCCESS)
3961 3941 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3962 3942 &softc->dca_ctx_list_lock);
3963 3943
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
3964 3944 return (error);
3965 3945 }
3966 3946
3967 3947 /* ARGSUSED */
3968 3948 static int
3969 3949 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3970 3950 crypto_data_t *plaintext, crypto_req_handle_t req)
3971 3951 {
3972 3952 int error = CRYPTO_FAILED;
3973 3953 dca_t *softc;
3974 - /* LINTED E_FUNC_SET_NOT_USED */
3975 - int instance;
3976 3954
3977 3955 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3978 3956 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3979 3957
3980 - /* extract softc and instance number from context */
3981 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
3958 + softc = DCA_SOFTC_FROM_CTX(ctx);
3982 3959 DBG(softc, DENTRY, "dca_decrypt: started");
3983 3960
3984 3961 /* handle inplace ops */
3985 3962 if (!plaintext) {
3986 3963 dca_request_t *reqp = ctx->cc_provider_private;
3987 3964 reqp->dr_flags |= DR_INPLACE;
3988 3965 plaintext = ciphertext;
3989 3966 }
3990 3967
3991 3968 /* check mechanism */
3992 3969 switch (DCA_MECH_FROM_CTX(ctx)) {
3993 3970 case DES_CBC_MECH_INFO_TYPE:
3994 3971 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT);
3995 3972 break;
3996 3973 case DES3_CBC_MECH_INFO_TYPE:
3997 3974 error = dca_3des(ctx, ciphertext, plaintext, req,
3998 3975 DR_DECRYPT | DR_TRIPLE);
3999 3976 break;
4000 3977 case RSA_PKCS_MECH_INFO_TYPE:
4001 3978 case RSA_X_509_MECH_INFO_TYPE:
4002 3979 error = dca_rsastart(ctx, ciphertext, plaintext, req,
4003 3980 DCA_RSA_DEC);
4004 3981 break;
4005 3982 default:
4006 3983 /* Should never reach here */
4007 3984 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type "
4008 3985 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4009 3986 error = CRYPTO_MECHANISM_INVALID;
4010 3987 }
4011 3988
4012 3989 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
4013 3990 (error != CRYPTO_BUFFER_TOO_SMALL)) {
4014 3991 if (plaintext)
4015 3992 plaintext->cd_length = 0;
4016 3993 }
4017 3994
4018 3995 DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error);
4019 3996
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
4020 3997 return (error);
4021 3998 }
4022 3999
4023 4000 /* ARGSUSED */
4024 4001 static int
4025 4002 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
4026 4003 crypto_data_t *plaintext, crypto_req_handle_t req)
4027 4004 {
4028 4005 int error = CRYPTO_FAILED;
4029 4006 dca_t *softc;
4030 - /* LINTED E_FUNC_SET_NOT_USED */
4031 - int instance;
4032 4007
4033 4008 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4034 4009 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4035 4010
4036 - /* extract softc and instance number from context */
4037 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4011 + softc = DCA_SOFTC_FROM_CTX(ctx);
4038 4012 DBG(softc, DENTRY, "dca_decrypt_update: started");
4039 4013
4040 4014 /* handle inplace ops */
4041 4015 if (!plaintext) {
4042 4016 dca_request_t *reqp = ctx->cc_provider_private;
4043 4017 reqp->dr_flags |= DR_INPLACE;
4044 4018 plaintext = ciphertext;
4045 4019 }
4046 4020
4047 4021 /* check mechanism */
4048 4022 switch (DCA_MECH_FROM_CTX(ctx)) {
4049 4023 case DES_CBC_MECH_INFO_TYPE:
4050 4024 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4051 4025 DR_DECRYPT);
4052 4026 break;
4053 4027 case DES3_CBC_MECH_INFO_TYPE:
4054 4028 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4055 4029 DR_DECRYPT | DR_TRIPLE);
4056 4030 break;
4057 4031 default:
4058 4032 /* Should never reach here */
4059 4033 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type "
4060 4034 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4061 4035 error = CRYPTO_MECHANISM_INVALID;
4062 4036 }
4063 4037
4064 4038 DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error);
4065 4039
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
4066 4040 return (error);
4067 4041 }
4068 4042
4069 4043 /* ARGSUSED */
4070 4044 static int
4071 4045 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext,
4072 4046 crypto_req_handle_t req)
4073 4047 {
4074 4048 int error = CRYPTO_FAILED;
4075 4049 dca_t *softc;
4076 - /* LINTED E_FUNC_SET_NOT_USED */
4077 - int instance;
4078 4050
4079 4051 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4080 4052 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4081 4053
4082 - /* extract softc and instance number from context */
4083 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4054 + softc = DCA_SOFTC_FROM_CTX(ctx);
4084 4055 DBG(softc, DENTRY, "dca_decrypt_final: started");
4085 4056
4086 4057 /* check mechanism */
4087 4058 switch (DCA_MECH_FROM_CTX(ctx)) {
4088 4059 case DES_CBC_MECH_INFO_TYPE:
4089 4060 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT);
4090 4061 break;
4091 4062 case DES3_CBC_MECH_INFO_TYPE:
4092 4063 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE);
4093 4064 break;
4094 4065 default:
4095 4066 /* Should never reach here */
4096 4067 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type "
4097 4068 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4098 4069 error = CRYPTO_MECHANISM_INVALID;
4099 4070 }
4100 4071
4101 4072 DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error);
4102 4073
4103 4074 return (error);
4104 4075 }
4105 4076
4106 4077 /* ARGSUSED */
4107 4078 static int
4108 4079 dca_decrypt_atomic(crypto_provider_handle_t provider,
4109 4080 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4110 4081 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
4111 4082 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4112 4083 {
4113 4084 int error = CRYPTO_FAILED;
4114 4085 dca_t *softc = (dca_t *)provider;
4115 4086
4116 4087 DBG(softc, DENTRY, "dca_decrypt_atomic: started");
4117 4088
4118 4089 if (ctx_template != NULL)
4119 4090 return (CRYPTO_ARGUMENTS_BAD);
4120 4091
4121 4092 /* handle inplace ops */
4122 4093 if (!plaintext) {
4123 4094 plaintext = ciphertext;
4124 4095 }
4125 4096
4126 4097 /* check mechanism */
4127 4098 switch (mechanism->cm_type) {
4128 4099 case DES_CBC_MECH_INFO_TYPE:
4129 4100 error = dca_3desatomic(provider, session_id, mechanism, key,
4130 4101 ciphertext, plaintext, KM_SLEEP, req,
4131 4102 DR_DECRYPT | DR_ATOMIC);
4132 4103 break;
4133 4104 case DES3_CBC_MECH_INFO_TYPE:
4134 4105 error = dca_3desatomic(provider, session_id, mechanism, key,
4135 4106 ciphertext, plaintext, KM_SLEEP, req,
4136 4107 DR_DECRYPT | DR_TRIPLE | DR_ATOMIC);
4137 4108 break;
4138 4109 case RSA_PKCS_MECH_INFO_TYPE:
4139 4110 case RSA_X_509_MECH_INFO_TYPE:
4140 4111 error = dca_rsaatomic(provider, session_id, mechanism, key,
4141 4112 ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC);
4142 4113 break;
4143 4114 default:
4144 4115 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type "
4145 4116 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4146 4117 error = CRYPTO_MECHANISM_INVALID;
4147 4118 }
4148 4119
4149 4120 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
4150 4121 plaintext->cd_length = 0;
4151 4122 }
4152 4123
4153 4124 DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error);
4154 4125
4155 4126 return (error);
4156 4127 }
4157 4128
4158 4129 /*
4159 4130 * Sign entry points.
↓ open down ↓ |
66 lines elided |
↑ open up ↑ |
4160 4131 */
4161 4132
4162 4133 /* ARGSUSED */
4163 4134 static int
4164 4135 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4165 4136 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4166 4137 crypto_req_handle_t req)
4167 4138 {
4168 4139 int error = CRYPTO_FAILED;
4169 4140 dca_t *softc;
4170 - /* LINTED E_FUNC_SET_NOT_USED */
4171 - int instance;
4172 4141
4173 - /* extract softc and instance number from context */
4174 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4142 + softc = DCA_SOFTC_FROM_CTX(ctx);
4175 4143 DBG(softc, DENTRY, "dca_sign_init: started\n");
4176 4144
4177 4145 if (ctx_template != NULL)
4178 4146 return (CRYPTO_ARGUMENTS_BAD);
4179 4147
4180 4148 /* check mechanism */
4181 4149 switch (mechanism->cm_type) {
4182 4150 case RSA_PKCS_MECH_INFO_TYPE:
4183 4151 case RSA_X_509_MECH_INFO_TYPE:
4184 4152 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4185 4153 break;
4186 4154 case DSA_MECH_INFO_TYPE:
4187 4155 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4188 4156 DCA_DSA_SIGN);
4189 4157 break;
4190 4158 default:
4191 4159 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type "
4192 4160 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4193 4161 error = CRYPTO_MECHANISM_INVALID;
4194 4162 }
4195 4163
4196 4164 DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error);
4197 4165
4198 4166 if (error == CRYPTO_SUCCESS)
4199 4167 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4200 4168 &softc->dca_ctx_list_lock);
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
4201 4169
4202 4170 return (error);
4203 4171 }
4204 4172
4205 4173 static int
4206 4174 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data,
4207 4175 crypto_data_t *signature, crypto_req_handle_t req)
4208 4176 {
4209 4177 int error = CRYPTO_FAILED;
4210 4178 dca_t *softc;
4211 - /* LINTED E_FUNC_SET_NOT_USED */
4212 - int instance;
4213 4179
4214 4180 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4215 4181 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4216 4182
4217 - /* extract softc and instance number from context */
4218 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4183 + softc = DCA_SOFTC_FROM_CTX(ctx);
4219 4184 DBG(softc, DENTRY, "dca_sign: started\n");
4220 4185
4221 4186 /* check mechanism */
4222 4187 switch (DCA_MECH_FROM_CTX(ctx)) {
4223 4188 case RSA_PKCS_MECH_INFO_TYPE:
4224 4189 case RSA_X_509_MECH_INFO_TYPE:
4225 4190 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN);
4226 4191 break;
4227 4192 case DSA_MECH_INFO_TYPE:
4228 4193 error = dca_dsa_sign(ctx, data, signature, req);
4229 4194 break;
4230 4195 default:
4231 4196 cmn_err(CE_WARN, "dca_sign: unexpected mech type "
4232 4197 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4233 4198 error = CRYPTO_MECHANISM_INVALID;
4234 4199 }
4235 4200
4236 4201 DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error);
4237 4202
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
4238 4203 return (error);
4239 4204 }
4240 4205
4241 4206 /* ARGSUSED */
4242 4207 static int
4243 4208 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data,
4244 4209 crypto_req_handle_t req)
4245 4210 {
4246 4211 int error = CRYPTO_MECHANISM_INVALID;
4247 4212 dca_t *softc;
4248 - /* LINTED E_FUNC_SET_NOT_USED */
4249 - int instance;
4250 4213
4251 4214 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4252 4215 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4253 4216
4254 - /* extract softc and instance number from context */
4255 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4217 + softc = DCA_SOFTC_FROM_CTX(ctx);
4256 4218 DBG(softc, DENTRY, "dca_sign_update: started\n");
4257 4219
4258 4220 cmn_err(CE_WARN, "dca_sign_update: unexpected mech type "
4259 4221 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4260 4222
4261 4223 DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error);
4262 4224
4263 4225 return (error);
4264 4226 }
4265 4227
4266 4228 /* ARGSUSED */
4267 4229 static int
4268 4230 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4269 4231 crypto_req_handle_t req)
4270 4232 {
4271 4233 int error = CRYPTO_MECHANISM_INVALID;
4272 4234 dca_t *softc;
4273 - /* LINTED E_FUNC_SET_NOT_USED */
4274 - int instance;
4275 4235
4276 4236 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4277 4237 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4278 4238
4279 - /* extract softc and instance number from context */
4280 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4239 + softc = DCA_SOFTC_FROM_CTX(ctx);
4281 4240 DBG(softc, DENTRY, "dca_sign_final: started\n");
4282 4241
4283 4242 cmn_err(CE_WARN, "dca_sign_final: unexpected mech type "
4284 4243 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4285 4244
4286 4245 DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error);
4287 4246
4288 4247 return (error);
4289 4248 }
4290 4249
4291 4250 static int
4292 4251 dca_sign_atomic(crypto_provider_handle_t provider,
4293 4252 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4294 4253 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4295 4254 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4296 4255 {
4297 4256 int error = CRYPTO_FAILED;
4298 4257 dca_t *softc = (dca_t *)provider;
4299 4258
4300 4259 DBG(softc, DENTRY, "dca_sign_atomic: started\n");
4301 4260
4302 4261 if (ctx_template != NULL)
4303 4262 return (CRYPTO_ARGUMENTS_BAD);
4304 4263
4305 4264 /* check mechanism */
4306 4265 switch (mechanism->cm_type) {
4307 4266 case RSA_PKCS_MECH_INFO_TYPE:
4308 4267 case RSA_X_509_MECH_INFO_TYPE:
4309 4268 error = dca_rsaatomic(provider, session_id, mechanism, key,
4310 4269 data, signature, KM_SLEEP, req, DCA_RSA_SIGN);
4311 4270 break;
4312 4271 case DSA_MECH_INFO_TYPE:
4313 4272 error = dca_dsaatomic(provider, session_id, mechanism, key,
4314 4273 data, signature, KM_SLEEP, req, DCA_DSA_SIGN);
4315 4274 break;
4316 4275 default:
4317 4276 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type "
4318 4277 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4319 4278 error = CRYPTO_MECHANISM_INVALID;
4320 4279 }
4321 4280
4322 4281 DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error);
4323 4282
4324 4283 return (error);
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
4325 4284 }
4326 4285
4327 4286 /* ARGSUSED */
4328 4287 static int
4329 4288 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4330 4289 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4331 4290 crypto_req_handle_t req)
4332 4291 {
4333 4292 int error = CRYPTO_FAILED;
4334 4293 dca_t *softc;
4335 - /* LINTED E_FUNC_SET_NOT_USED */
4336 - int instance;
4337 4294
4338 - /* extract softc and instance number from context */
4339 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4295 + softc = DCA_SOFTC_FROM_CTX(ctx);
4340 4296 DBG(softc, DENTRY, "dca_sign_recover_init: started\n");
4341 4297
4342 4298 if (ctx_template != NULL)
4343 4299 return (CRYPTO_ARGUMENTS_BAD);
4344 4300
4345 4301 /* check mechanism */
4346 4302 switch (mechanism->cm_type) {
4347 4303 case RSA_PKCS_MECH_INFO_TYPE:
4348 4304 case RSA_X_509_MECH_INFO_TYPE:
4349 4305 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4350 4306 break;
4351 4307 default:
4352 4308 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type "
4353 4309 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4354 4310 error = CRYPTO_MECHANISM_INVALID;
4355 4311 }
4356 4312
4357 4313 DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error);
4358 4314
4359 4315 if (error == CRYPTO_SUCCESS)
4360 4316 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4361 4317 &softc->dca_ctx_list_lock);
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
4362 4318
4363 4319 return (error);
4364 4320 }
4365 4321
4366 4322 static int
4367 4323 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data,
4368 4324 crypto_data_t *signature, crypto_req_handle_t req)
4369 4325 {
4370 4326 int error = CRYPTO_FAILED;
4371 4327 dca_t *softc;
4372 - /* LINTED E_FUNC_SET_NOT_USED */
4373 - int instance;
4374 4328
4375 4329 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4376 4330 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4377 4331
4378 - /* extract softc and instance number from context */
4379 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4332 + softc = DCA_SOFTC_FROM_CTX(ctx);
4380 4333 DBG(softc, DENTRY, "dca_sign_recover: started\n");
4381 4334
4382 4335 /* check mechanism */
4383 4336 switch (DCA_MECH_FROM_CTX(ctx)) {
4384 4337 case RSA_PKCS_MECH_INFO_TYPE:
4385 4338 case RSA_X_509_MECH_INFO_TYPE:
4386 4339 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR);
4387 4340 break;
4388 4341 default:
4389 4342 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type "
4390 4343 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4391 4344 error = CRYPTO_MECHANISM_INVALID;
4392 4345 }
4393 4346
4394 4347 DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error);
4395 4348
4396 4349 return (error);
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
4397 4350 }
4398 4351
4399 4352 static int
4400 4353 dca_sign_recover_atomic(crypto_provider_handle_t provider,
4401 4354 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4402 4355 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4403 4356 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4404 4357 {
4405 4358 int error = CRYPTO_FAILED;
4406 4359 dca_t *softc = (dca_t *)provider;
4407 - /* LINTED E_FUNC_SET_NOT_USED */
4408 - int instance;
4409 4360
4410 - instance = ddi_get_instance(softc->dca_dip);
4411 4361 DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n");
4412 4362
4413 4363 if (ctx_template != NULL)
4414 4364 return (CRYPTO_ARGUMENTS_BAD);
4415 4365
4416 4366 /* check mechanism */
4417 4367 switch (mechanism->cm_type) {
4418 4368 case RSA_PKCS_MECH_INFO_TYPE:
4419 4369 case RSA_X_509_MECH_INFO_TYPE:
4420 4370 error = dca_rsaatomic(provider, session_id, mechanism, key,
4421 4371 data, signature, KM_SLEEP, req, DCA_RSA_SIGNR);
4422 4372 break;
4423 4373 default:
4424 4374 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type"
4425 4375 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4426 4376 error = CRYPTO_MECHANISM_INVALID;
4427 4377 }
4428 4378
4429 4379 DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error);
4430 4380
4431 4381 return (error);
4432 4382 }
4433 4383
4434 4384 /*
4435 4385 * Verify entry points.
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
4436 4386 */
4437 4387
4438 4388 /* ARGSUSED */
4439 4389 static int
4440 4390 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4441 4391 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4442 4392 crypto_req_handle_t req)
4443 4393 {
4444 4394 int error = CRYPTO_FAILED;
4445 4395 dca_t *softc;
4446 - /* LINTED E_FUNC_SET_NOT_USED */
4447 - int instance;
4448 4396
4449 - /* extract softc and instance number from context */
4450 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4397 + softc = DCA_SOFTC_FROM_CTX(ctx);
4451 4398 DBG(softc, DENTRY, "dca_verify_init: started\n");
4452 4399
4453 4400 if (ctx_template != NULL)
4454 4401 return (CRYPTO_ARGUMENTS_BAD);
4455 4402
4456 4403 /* check mechanism */
4457 4404 switch (mechanism->cm_type) {
4458 4405 case RSA_PKCS_MECH_INFO_TYPE:
4459 4406 case RSA_X_509_MECH_INFO_TYPE:
4460 4407 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4461 4408 break;
4462 4409 case DSA_MECH_INFO_TYPE:
4463 4410 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4464 4411 DCA_DSA_VRFY);
4465 4412 break;
4466 4413 default:
4467 4414 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type "
4468 4415 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4469 4416 error = CRYPTO_MECHANISM_INVALID;
4470 4417 }
4471 4418
4472 4419 DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error);
4473 4420
4474 4421 if (error == CRYPTO_SUCCESS)
4475 4422 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4476 4423 &softc->dca_ctx_list_lock);
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
4477 4424
4478 4425 return (error);
4479 4426 }
4480 4427
4481 4428 static int
4482 4429 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature,
4483 4430 crypto_req_handle_t req)
4484 4431 {
4485 4432 int error = CRYPTO_FAILED;
4486 4433 dca_t *softc;
4487 - /* LINTED E_FUNC_SET_NOT_USED */
4488 - int instance;
4489 4434
4490 4435 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4491 4436 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4492 4437
4493 - /* extract softc and instance number from context */
4494 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4438 + softc = DCA_SOFTC_FROM_CTX(ctx);
4495 4439 DBG(softc, DENTRY, "dca_verify: started\n");
4496 4440
4497 4441 /* check mechanism */
4498 4442 switch (DCA_MECH_FROM_CTX(ctx)) {
4499 4443 case RSA_PKCS_MECH_INFO_TYPE:
4500 4444 case RSA_X_509_MECH_INFO_TYPE:
4501 4445 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY);
4502 4446 break;
4503 4447 case DSA_MECH_INFO_TYPE:
4504 4448 error = dca_dsa_verify(ctx, data, signature, req);
4505 4449 break;
4506 4450 default:
4507 4451 cmn_err(CE_WARN, "dca_verify: unexpected mech type "
4508 4452 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4509 4453 error = CRYPTO_MECHANISM_INVALID;
4510 4454 }
4511 4455
4512 4456 DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error);
4513 4457
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
4514 4458 return (error);
4515 4459 }
4516 4460
4517 4461 /* ARGSUSED */
4518 4462 static int
4519 4463 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data,
4520 4464 crypto_req_handle_t req)
4521 4465 {
4522 4466 int error = CRYPTO_MECHANISM_INVALID;
4523 4467 dca_t *softc;
4524 - /* LINTED E_FUNC_SET_NOT_USED */
4525 - int instance;
4526 4468
4527 4469 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4528 4470 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4529 4471
4530 - /* extract softc and instance number from context */
4531 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4472 + softc = DCA_SOFTC_FROM_CTX(ctx);
4532 4473 DBG(softc, DENTRY, "dca_verify_update: started\n");
4533 4474
4534 4475 cmn_err(CE_WARN, "dca_verify_update: unexpected mech type "
4535 4476 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4536 4477
4537 4478 DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error);
4538 4479
4539 4480 return (error);
4540 4481 }
4541 4482
4542 4483 /* ARGSUSED */
4543 4484 static int
4544 4485 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4545 4486 crypto_req_handle_t req)
4546 4487 {
4547 4488 int error = CRYPTO_MECHANISM_INVALID;
4548 4489 dca_t *softc;
4549 - /* LINTED E_FUNC_SET_NOT_USED */
4550 - int instance;
4551 4490
4552 4491 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4553 4492 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4554 4493
4555 - /* extract softc and instance number from context */
4556 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4494 + softc = DCA_SOFTC_FROM_CTX(ctx);
4557 4495 DBG(softc, DENTRY, "dca_verify_final: started\n");
4558 4496
4559 4497 cmn_err(CE_WARN, "dca_verify_final: unexpected mech type "
4560 4498 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4561 4499
4562 4500 DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error);
4563 4501
4564 4502 return (error);
4565 4503 }
4566 4504
4567 4505 static int
4568 4506 dca_verify_atomic(crypto_provider_handle_t provider,
4569 4507 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4570 4508 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4571 4509 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4572 4510 {
4573 4511 int error = CRYPTO_FAILED;
4574 4512 dca_t *softc = (dca_t *)provider;
4575 4513
4576 4514 DBG(softc, DENTRY, "dca_verify_atomic: started\n");
4577 4515
4578 4516 if (ctx_template != NULL)
4579 4517 return (CRYPTO_ARGUMENTS_BAD);
4580 4518
4581 4519 /* check mechanism */
4582 4520 switch (mechanism->cm_type) {
4583 4521 case RSA_PKCS_MECH_INFO_TYPE:
4584 4522 case RSA_X_509_MECH_INFO_TYPE:
4585 4523 error = dca_rsaatomic(provider, session_id, mechanism, key,
4586 4524 signature, data, KM_SLEEP, req, DCA_RSA_VRFY);
4587 4525 break;
4588 4526 case DSA_MECH_INFO_TYPE:
4589 4527 error = dca_dsaatomic(provider, session_id, mechanism, key,
4590 4528 data, signature, KM_SLEEP, req, DCA_DSA_VRFY);
4591 4529 break;
4592 4530 default:
4593 4531 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type "
4594 4532 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4595 4533 error = CRYPTO_MECHANISM_INVALID;
4596 4534 }
4597 4535
4598 4536 DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error);
4599 4537
4600 4538 return (error);
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
4601 4539 }
4602 4540
4603 4541 /* ARGSUSED */
4604 4542 static int
4605 4543 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4606 4544 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4607 4545 crypto_req_handle_t req)
4608 4546 {
4609 4547 int error = CRYPTO_MECHANISM_INVALID;
4610 4548 dca_t *softc;
4611 - /* LINTED E_FUNC_SET_NOT_USED */
4612 - int instance;
4613 4549
4614 - /* extract softc and instance number from context */
4615 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4550 + softc = DCA_SOFTC_FROM_CTX(ctx);
4616 4551 DBG(softc, DENTRY, "dca_verify_recover_init: started\n");
4617 4552
4618 4553 if (ctx_template != NULL)
4619 4554 return (CRYPTO_ARGUMENTS_BAD);
4620 4555
4621 4556 /* check mechanism */
4622 4557 switch (mechanism->cm_type) {
4623 4558 case RSA_PKCS_MECH_INFO_TYPE:
4624 4559 case RSA_X_509_MECH_INFO_TYPE:
4625 4560 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4626 4561 break;
4627 4562 default:
4628 4563 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type"
4629 4564 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4630 4565 }
4631 4566
4632 4567 DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error);
4633 4568
4634 4569 if (error == CRYPTO_SUCCESS)
4635 4570 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4636 4571 &softc->dca_ctx_list_lock);
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
4637 4572
4638 4573 return (error);
4639 4574 }
4640 4575
4641 4576 static int
4642 4577 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature,
4643 4578 crypto_data_t *data, crypto_req_handle_t req)
4644 4579 {
4645 4580 int error = CRYPTO_MECHANISM_INVALID;
4646 4581 dca_t *softc;
4647 - /* LINTED E_FUNC_SET_NOT_USED */
4648 - int instance;
4649 4582
4650 4583 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4651 4584 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4652 4585
4653 - /* extract softc and instance number from context */
4654 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4586 + softc = DCA_SOFTC_FROM_CTX(ctx);
4655 4587 DBG(softc, DENTRY, "dca_verify_recover: started\n");
4656 4588
4657 4589 /* check mechanism */
4658 4590 switch (DCA_MECH_FROM_CTX(ctx)) {
4659 4591 case RSA_PKCS_MECH_INFO_TYPE:
4660 4592 case RSA_X_509_MECH_INFO_TYPE:
4661 4593 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR);
4662 4594 break;
4663 4595 default:
4664 4596 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type "
4665 4597 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4666 4598 }
4667 4599
4668 4600 DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error);
4669 4601
4670 4602 return (error);
4671 4603 }
4672 4604
4673 4605 static int
4674 4606 dca_verify_recover_atomic(crypto_provider_handle_t provider,
4675 4607 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4676 4608 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4677 4609 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4678 4610 {
4679 4611 int error = CRYPTO_MECHANISM_INVALID;
4680 4612 dca_t *softc = (dca_t *)provider;
4681 4613
4682 4614 DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n");
4683 4615
4684 4616 if (ctx_template != NULL)
4685 4617 return (CRYPTO_ARGUMENTS_BAD);
4686 4618
4687 4619 /* check mechanism */
4688 4620 switch (mechanism->cm_type) {
4689 4621 case RSA_PKCS_MECH_INFO_TYPE:
4690 4622 case RSA_X_509_MECH_INFO_TYPE:
4691 4623 error = dca_rsaatomic(provider, session_id, mechanism, key,
4692 4624 signature, data, KM_SLEEP, req, DCA_RSA_VRFYR);
4693 4625 break;
4694 4626 default:
4695 4627 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech "
4696 4628 "type 0x%llx\n", (unsigned long long)mechanism->cm_type);
4697 4629 error = CRYPTO_MECHANISM_INVALID;
4698 4630 }
4699 4631
4700 4632 DBG(softc, DENTRY,
4701 4633 "dca_verify_recover_atomic: done, err = 0x%x", error);
4702 4634
4703 4635 return (error);
4704 4636 }
4705 4637
4706 4638 /*
4707 4639 * Random number entry points.
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
4708 4640 */
4709 4641
4710 4642 /* ARGSUSED */
4711 4643 static int
4712 4644 dca_generate_random(crypto_provider_handle_t provider,
4713 4645 crypto_session_id_t session_id,
4714 4646 uchar_t *buf, size_t len, crypto_req_handle_t req)
4715 4647 {
4716 4648 int error = CRYPTO_FAILED;
4717 4649 dca_t *softc = (dca_t *)provider;
4718 - /* LINTED E_FUNC_SET_NOT_USED */
4719 - int instance;
4720 4650
4721 - instance = ddi_get_instance(softc->dca_dip);
4722 4651 DBG(softc, DENTRY, "dca_generate_random: started");
4723 4652
4724 4653 error = dca_rng(softc, buf, len, req);
4725 4654
4726 4655 DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error);
4727 4656
4728 4657 return (error);
4729 4658 }
4730 4659
4731 4660 /*
4732 4661 * Context management entry points.
4733 4662 */
4734 4663
4735 4664 int
4736 4665 dca_free_context(crypto_ctx_t *ctx)
4737 4666 {
4738 4667 int error = CRYPTO_SUCCESS;
4739 4668 dca_t *softc;
4740 - /* LINTED E_FUNC_SET_NOT_USED */
4741 - int instance;
4742 4669
4743 - /* extract softc and instance number from context */
4744 - DCA_SOFTC_FROM_CTX(ctx, softc, instance);
4670 + softc = DCA_SOFTC_FROM_CTX(ctx);
4745 4671 DBG(softc, DENTRY, "dca_free_context: entered");
4746 4672
4747 4673 if (ctx->cc_provider_private == NULL)
4748 4674 return (error);
4749 4675
4750 4676 dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock);
4751 4677
4752 4678 error = dca_free_context_low(ctx);
4753 4679
4754 4680 DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error);
4755 4681
4756 4682 return (error);
4757 4683 }
4758 4684
4759 4685 static int
4760 4686 dca_free_context_low(crypto_ctx_t *ctx)
4761 4687 {
4762 4688 int error = CRYPTO_SUCCESS;
4763 4689
4764 4690 /* check mechanism */
4765 4691 switch (DCA_MECH_FROM_CTX(ctx)) {
4766 4692 case DES_CBC_MECH_INFO_TYPE:
4767 4693 case DES3_CBC_MECH_INFO_TYPE:
4768 4694 dca_3desctxfree(ctx);
4769 4695 break;
4770 4696 case RSA_PKCS_MECH_INFO_TYPE:
4771 4697 case RSA_X_509_MECH_INFO_TYPE:
4772 4698 dca_rsactxfree(ctx);
4773 4699 break;
4774 4700 case DSA_MECH_INFO_TYPE:
4775 4701 dca_dsactxfree(ctx);
4776 4702 break;
4777 4703 default:
4778 4704 /* Should never reach here */
4779 4705 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type "
4780 4706 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4781 4707 error = CRYPTO_MECHANISM_INVALID;
4782 4708 }
4783 4709
4784 4710 return (error);
4785 4711 }
4786 4712
4787 4713
4788 4714 /* Free any unfreed private context. It is called in detach. */
4789 4715 static void
4790 4716 dca_free_context_list(dca_t *dca)
4791 4717 {
4792 4718 dca_listnode_t *node;
4793 4719 crypto_ctx_t ctx;
4794 4720
4795 4721 (void) memset(&ctx, 0, sizeof (ctx));
4796 4722 ctx.cc_provider = dca;
4797 4723
4798 4724 while ((node = dca_delist2(&dca->dca_ctx_list,
4799 4725 &dca->dca_ctx_list_lock)) != NULL) {
4800 4726 ctx.cc_provider_private = node;
4801 4727 (void) dca_free_context_low(&ctx);
4802 4728 }
4803 4729 }
4804 4730
4805 4731 static int
4806 4732 ext_info_sym(crypto_provider_handle_t prov,
4807 4733 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4808 4734 {
4809 4735 return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM));
4810 4736 }
4811 4737
4812 4738 static int
4813 4739 ext_info_asym(crypto_provider_handle_t prov,
4814 4740 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4815 4741 {
4816 4742 int rv;
4817 4743
4818 4744 rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM);
4819 4745 /* The asymmetric cipher slot supports random */
4820 4746 ext_info->ei_flags |= CRYPTO_EXTF_RNG;
4821 4747
4822 4748 return (rv);
4823 4749 }
4824 4750
4825 4751 /* ARGSUSED */
4826 4752 static int
4827 4753 ext_info_base(crypto_provider_handle_t prov,
4828 4754 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id)
4829 4755 {
4830 4756 dca_t *dca = (dca_t *)prov;
4831 4757 int len;
4832 4758
4833 4759 /* Label */
4834 4760 (void) sprintf((char *)ext_info->ei_label, "%s/%d %s",
4835 4761 ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id);
4836 4762 len = strlen((char *)ext_info->ei_label);
4837 4763 (void) memset(ext_info->ei_label + len, ' ',
4838 4764 CRYPTO_EXT_SIZE_LABEL - len);
4839 4765
4840 4766 /* Manufacturer ID */
4841 4767 (void) sprintf((char *)ext_info->ei_manufacturerID, "%s",
4842 4768 DCA_MANUFACTURER_ID);
4843 4769 len = strlen((char *)ext_info->ei_manufacturerID);
4844 4770 (void) memset(ext_info->ei_manufacturerID + len, ' ',
4845 4771 CRYPTO_EXT_SIZE_MANUF - len);
4846 4772
4847 4773 /* Model */
4848 4774 (void) sprintf((char *)ext_info->ei_model, dca->dca_model);
4849 4775
4850 4776 DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model);
4851 4777
4852 4778 len = strlen((char *)ext_info->ei_model);
4853 4779 (void) memset(ext_info->ei_model + len, ' ',
4854 4780 CRYPTO_EXT_SIZE_MODEL - len);
4855 4781
4856 4782 /* Serial Number. Blank for Deimos */
4857 4783 (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL);
4858 4784
4859 4785 ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED;
4860 4786
4861 4787 ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO;
4862 4788 ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO;
4863 4789 ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO;
4864 4790 ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO;
4865 4791 ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO;
4866 4792 ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO;
4867 4793 ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO;
4868 4794 ext_info->ei_hardware_version.cv_major = 0;
4869 4795 ext_info->ei_hardware_version.cv_minor = 0;
4870 4796 ext_info->ei_firmware_version.cv_major = 0;
4871 4797 ext_info->ei_firmware_version.cv_minor = 0;
4872 4798
4873 4799 /* Time. No need to be supplied for token without a clock */
4874 4800 ext_info->ei_time[0] = '\000';
4875 4801
4876 4802 return (CRYPTO_SUCCESS);
4877 4803 }
4878 4804
4879 4805 static void
4880 4806 dca_fma_init(dca_t *dca)
4881 4807 {
4882 4808 ddi_iblock_cookie_t fm_ibc;
4883 4809 int fm_capabilities = DDI_FM_EREPORT_CAPABLE |
4884 4810 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
4885 4811 DDI_FM_ERRCB_CAPABLE;
4886 4812
4887 4813 /* Read FMA capabilities from dca.conf file (if present) */
4888 4814 dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip,
4889 4815 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
4890 4816 fm_capabilities);
4891 4817
4892 4818 DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities);
4893 4819
4894 4820 /* Only register with IO Fault Services if we have some capability */
4895 4821 if (dca->fm_capabilities) {
4896 4822 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC;
4897 4823 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR;
4898 4824
4899 4825 /* Register capabilities with IO Fault Services */
4900 4826 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc);
4901 4827 DBG(dca, DWARN, "fm_capable() = 0x%x",
4902 4828 ddi_fm_capable(dca->dca_dip));
4903 4829
4904 4830 /*
4905 4831 * Initialize pci ereport capabilities if ereport capable
4906 4832 */
4907 4833 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4908 4834 DDI_FM_ERRCB_CAP(dca->fm_capabilities))
4909 4835 pci_ereport_setup(dca->dca_dip);
4910 4836
4911 4837 /*
4912 4838 * Initialize callback mutex and register error callback if
4913 4839 * error callback capable.
4914 4840 */
4915 4841 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4916 4842 ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb,
4917 4843 (void *)dca);
4918 4844 }
4919 4845 } else {
4920 4846 /*
4921 4847 * These fields have to be cleared of FMA if there are no
4922 4848 * FMA capabilities at runtime.
4923 4849 */
4924 4850 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC;
4925 4851 dca_dmaattr.dma_attr_flags = 0;
4926 4852 }
4927 4853 }
4928 4854
4929 4855
4930 4856 static void
4931 4857 dca_fma_fini(dca_t *dca)
4932 4858 {
4933 4859 /* Only unregister FMA capabilities if we registered some */
4934 4860 if (dca->fm_capabilities) {
4935 4861
4936 4862 /*
4937 4863 * Release any resources allocated by pci_ereport_setup()
4938 4864 */
4939 4865 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4940 4866 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4941 4867 pci_ereport_teardown(dca->dca_dip);
4942 4868 }
4943 4869
4944 4870 /*
4945 4871 * Free callback mutex and un-register error callback if
4946 4872 * error callback capable.
4947 4873 */
4948 4874 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4949 4875 ddi_fm_handler_unregister(dca->dca_dip);
4950 4876 }
4951 4877
4952 4878 /* Unregister from IO Fault Services */
4953 4879 ddi_fm_fini(dca->dca_dip);
4954 4880 DBG(dca, DWARN, "fm_capable() = 0x%x",
4955 4881 ddi_fm_capable(dca->dca_dip));
4956 4882 }
4957 4883 }
4958 4884
4959 4885
4960 4886 /*
4961 4887 * The IO fault service error handling callback function
4962 4888 */
4963 4889 /*ARGSUSED*/
4964 4890 static int
4965 4891 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4966 4892 {
4967 4893 dca_t *dca = (dca_t *)impl_data;
4968 4894
4969 4895 pci_ereport_post(dip, err, NULL);
4970 4896 if (err->fme_status == DDI_FM_FATAL) {
4971 4897 dca_failure(dca, DDI_DATAPATH_FAULT,
4972 4898 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
4973 4899 "fault PCI in FMA callback.");
4974 4900 }
4975 4901 return (err->fme_status);
4976 4902 }
4977 4903
4978 4904
4979 4905 static int
4980 4906 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
4981 4907 dca_fma_eclass_t eclass_index)
4982 4908 {
4983 4909 ddi_fm_error_t de;
4984 4910 int version = 0;
4985 4911
4986 4912 ddi_fm_acc_err_get(handle, &de, version);
4987 4913 if (de.fme_status != DDI_FM_OK) {
4988 4914 dca_failure(dca, DDI_DATAPATH_FAULT,
4989 4915 eclass_index, fm_ena_increment(de.fme_ena),
4990 4916 CRYPTO_DEVICE_ERROR, "");
4991 4917 return (DDI_FAILURE);
4992 4918 }
4993 4919
4994 4920 return (DDI_SUCCESS);
4995 4921 }
4996 4922
4997 4923 int
4998 4924 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle,
4999 4925 dca_fma_eclass_t eclass_index)
5000 4926 {
5001 4927 ddi_fm_error_t de;
5002 4928 int version = 0;
5003 4929
5004 4930 ddi_fm_dma_err_get(handle, &de, version);
5005 4931 if (de.fme_status != DDI_FM_OK) {
5006 4932 dca_failure(dca, DDI_DATAPATH_FAULT,
5007 4933 eclass_index, fm_ena_increment(de.fme_ena),
5008 4934 CRYPTO_DEVICE_ERROR, "");
5009 4935 return (DDI_FAILURE);
5010 4936 }
5011 4937 return (DDI_SUCCESS);
5012 4938 }
5013 4939
5014 4940 static uint64_t
5015 4941 dca_ena(uint64_t ena)
5016 4942 {
5017 4943 if (ena == 0)
5018 4944 ena = fm_ena_generate(0, FM_ENA_FMT1);
5019 4945 else
5020 4946 ena = fm_ena_increment(ena);
5021 4947 return (ena);
5022 4948 }
5023 4949
5024 4950 static char *
5025 4951 dca_fma_eclass_string(char *model, dca_fma_eclass_t index)
5026 4952 {
5027 4953 if (strstr(model, "500"))
5028 4954 return (dca_fma_eclass_sca500[index]);
5029 4955 else
5030 4956 return (dca_fma_eclass_sca1000[index]);
5031 4957 }
↓ open down ↓ |
277 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX