Print this page
fixup .text where possible
additional style updates in crypto
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/crypto/io/dca.c
+++ new/usr/src/uts/common/crypto/io/dca.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27
28 28 /*
29 29 * Deimos - cryptographic acceleration based upon Broadcom 582x.
30 30 */
31 31
32 32 #include <sys/types.h>
33 33 #include <sys/modctl.h>
34 34 #include <sys/conf.h>
35 35 #include <sys/devops.h>
36 36 #include <sys/ddi.h>
37 37 #include <sys/sunddi.h>
38 38 #include <sys/cmn_err.h>
39 39 #include <sys/varargs.h>
40 40 #include <sys/file.h>
41 41 #include <sys/stat.h>
42 42 #include <sys/kmem.h>
43 43 #include <sys/ioccom.h>
44 44 #include <sys/open.h>
45 45 #include <sys/cred.h>
46 46 #include <sys/kstat.h>
47 47 #include <sys/strsun.h>
48 48 #include <sys/note.h>
49 49 #include <sys/crypto/common.h>
50 50 #include <sys/crypto/spi.h>
51 51 #include <sys/ddifm.h>
52 52 #include <sys/fm/protocol.h>
53 53 #include <sys/fm/util.h>
54 54 #include <sys/fm/io/ddi.h>
55 55 #include <sys/crypto/dca.h>
56 56
57 57 /*
58 58 * Core Deimos driver.
59 59 */
60 60
61 61 static void dca_enlist2(dca_listnode_t *, dca_listnode_t *,
62 62 kmutex_t *);
63 63 static void dca_rmlist2(dca_listnode_t *node, kmutex_t *);
64 64 static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *);
65 65 static void dca_free_context_list(dca_t *dca);
66 66 static int dca_free_context_low(crypto_ctx_t *ctx);
67 67 static int dca_attach(dev_info_t *, ddi_attach_cmd_t);
68 68 static int dca_detach(dev_info_t *, ddi_detach_cmd_t);
69 69 static int dca_suspend(dca_t *);
70 70 static int dca_resume(dca_t *);
71 71 static int dca_init(dca_t *);
72 72 static int dca_reset(dca_t *, int);
73 73 static int dca_initworklist(dca_t *, dca_worklist_t *);
74 74 static void dca_uninit(dca_t *);
75 75 static void dca_initq(dca_listnode_t *);
76 76 static void dca_enqueue(dca_listnode_t *, dca_listnode_t *);
77 77 static dca_listnode_t *dca_dequeue(dca_listnode_t *);
78 78 static dca_listnode_t *dca_unqueue(dca_listnode_t *);
79 79 static dca_request_t *dca_newreq(dca_t *);
80 80 static dca_work_t *dca_getwork(dca_t *, int);
81 81 static void dca_freework(dca_work_t *);
82 82 static dca_work_t *dca_newwork(dca_t *);
83 83 static void dca_destroywork(dca_work_t *);
84 84 static void dca_schedule(dca_t *, int);
85 85 static void dca_reclaim(dca_t *, int);
86 86 static uint_t dca_intr(char *);
87 87 static void dca_failure(dca_t *, ddi_fault_location_t,
88 88 dca_fma_eclass_t index, uint64_t, int, char *, ...);
89 89 static void dca_jobtimeout(void *);
90 90 static int dca_drain(dca_t *);
91 91 static void dca_undrain(dca_t *);
92 92 static void dca_rejectjobs(dca_t *);
93 93
94 94 #ifdef SCHEDDELAY
95 95 static void dca_schedtimeout(void *);
96 96 #endif
97 97
98 98 /*
99 99 * We want these inlined for performance.
100 100 */
101 101 #ifndef DEBUG
102 102 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork)
103 103 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done)
104 104 #pragma inline(dca_reverse, dca_length)
105 105 #endif
106 106
107 107 /*
108 108 * Device operations.
109 109 */
110 110 static struct dev_ops devops = {
111 111 DEVO_REV, /* devo_rev */
112 112 0, /* devo_refcnt */
113 113 nodev, /* devo_getinfo */
114 114 nulldev, /* devo_identify */
115 115 nulldev, /* devo_probe */
116 116 dca_attach, /* devo_attach */
117 117 dca_detach, /* devo_detach */
118 118 nodev, /* devo_reset */
119 119 NULL, /* devo_cb_ops */
120 120 NULL, /* devo_bus_ops */
121 121 ddi_power, /* devo_power */
122 122 ddi_quiesce_not_supported, /* devo_quiesce */
123 123 };
124 124
125 125 #define IDENT "PCI Crypto Accelerator"
126 126 #define IDENT_SYM "Crypto Accel Sym 2.0"
127 127 #define IDENT_ASYM "Crypto Accel Asym 2.0"
128 128
129 129 /* Space-padded, will be filled in dynamically during registration */
130 130 #define IDENT3 "PCI Crypto Accelerator Mod 2.0"
131 131
132 132 #define VENDOR "Sun Microsystems, Inc."
133 133
134 134 #define STALETIME (30 * SECOND)
135 135
136 136 #define crypto_prov_notify crypto_provider_notification
137 137 /* A 28 char function name doesn't leave much line space */
138 138
139 139 /*
140 140 * Module linkage.
141 141 */
142 142 static struct modldrv modldrv = {
143 143 &mod_driverops, /* drv_modops */
144 144 IDENT, /* drv_linkinfo */
145 145 &devops, /* drv_dev_ops */
146 146 };
↓ open down ↓ |
146 lines elided |
↑ open up ↑ |
147 147
148 148 extern struct mod_ops mod_cryptoops;
149 149
150 150 static struct modlcrypto modlcrypto = {
151 151 &mod_cryptoops,
152 152 IDENT3
153 153 };
154 154
155 155 static struct modlinkage modlinkage = {
156 156 MODREV_1, /* ml_rev */
157 - &modldrv, /* ml_linkage */
158 - &modlcrypto,
159 - NULL
157 + { &modldrv, /* ml_linkage */
158 + &modlcrypto,
159 + NULL }
160 160 };
161 161
162 162 /*
163 163 * CSPI information (entry points, provider info, etc.)
164 164 */
165 165
166 166 /* Mechanisms for the symmetric cipher provider */
167 167 static crypto_mech_info_t dca_mech_info_tab1[] = {
168 168 /* DES-CBC */
169 169 {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE,
170 170 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
171 171 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
172 172 DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
173 173 /* 3DES-CBC */
174 174 {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE,
175 175 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
176 176 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
177 177 DES3_MIN_KEY_LEN, DES3_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
178 178 };
179 179
180 180 /* Mechanisms for the asymmetric cipher provider */
181 181 static crypto_mech_info_t dca_mech_info_tab2[] = {
182 182 /* DSA */
183 183 {SUN_CKM_DSA, DSA_MECH_INFO_TYPE,
184 184 CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY |
185 185 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC,
186 186 CRYPTO_BYTES2BITS(DSA_MIN_KEY_LEN),
187 187 CRYPTO_BYTES2BITS(DSA_MAX_KEY_LEN),
188 188 CRYPTO_KEYSIZE_UNIT_IN_BITS},
189 189
190 190 /* RSA */
191 191 {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE,
192 192 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
193 193 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
194 194 CRYPTO_FG_VERIFY_RECOVER |
195 195 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
196 196 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
197 197 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
198 198 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
199 199 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
200 200 CRYPTO_KEYSIZE_UNIT_IN_BITS},
201 201 {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE,
202 202 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
203 203 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
204 204 CRYPTO_FG_VERIFY_RECOVER |
205 205 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
206 206 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
207 207 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
208 208 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
209 209 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
210 210 CRYPTO_KEYSIZE_UNIT_IN_BITS}
211 211 };
212 212
213 213 static void dca_provider_status(crypto_provider_handle_t, uint_t *);
214 214
215 215 static crypto_control_ops_t dca_control_ops = {
216 216 dca_provider_status
217 217 };
218 218
219 219 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
220 220 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
221 221 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
222 222 crypto_req_handle_t);
223 223 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *,
224 224 crypto_data_t *, crypto_req_handle_t);
225 225 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *,
226 226 crypto_req_handle_t);
227 227 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
228 228 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
229 229 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
230 230
231 231 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
232 232 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
233 233 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
234 234 crypto_req_handle_t);
235 235 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *,
236 236 crypto_data_t *, crypto_req_handle_t);
237 237 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *,
238 238 crypto_req_handle_t);
239 239 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
240 240 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
241 241 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
242 242
243 243 static crypto_cipher_ops_t dca_cipher_ops = {
244 244 dca_encrypt_init,
245 245 dca_encrypt,
246 246 dca_encrypt_update,
247 247 dca_encrypt_final,
248 248 dca_encrypt_atomic,
249 249 dca_decrypt_init,
250 250 dca_decrypt,
251 251 dca_decrypt_update,
252 252 dca_decrypt_final,
253 253 dca_decrypt_atomic
254 254 };
255 255
256 256 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
257 257 crypto_spi_ctx_template_t, crypto_req_handle_t);
258 258 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
259 259 crypto_req_handle_t);
260 260 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *,
261 261 crypto_req_handle_t);
262 262 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *,
263 263 crypto_req_handle_t);
264 264 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t,
265 265 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
266 266 crypto_spi_ctx_template_t, crypto_req_handle_t);
267 267 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
268 268 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
269 269 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
270 270 crypto_req_handle_t);
271 271 static int dca_sign_recover_atomic(crypto_provider_handle_t,
272 272 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
273 273 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
274 274
275 275 static crypto_sign_ops_t dca_sign_ops = {
276 276 dca_sign_init,
277 277 dca_sign,
278 278 dca_sign_update,
279 279 dca_sign_final,
280 280 dca_sign_atomic,
281 281 dca_sign_recover_init,
282 282 dca_sign_recover,
283 283 dca_sign_recover_atomic
284 284 };
285 285
286 286 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *,
287 287 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
288 288 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
289 289 crypto_req_handle_t);
290 290 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *,
291 291 crypto_req_handle_t);
292 292 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *,
293 293 crypto_req_handle_t);
294 294 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
295 295 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
296 296 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
297 297 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
298 298 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
299 299 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *,
300 300 crypto_data_t *, crypto_req_handle_t);
301 301 static int dca_verify_recover_atomic(crypto_provider_handle_t,
302 302 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
303 303 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
304 304
305 305 static crypto_verify_ops_t dca_verify_ops = {
306 306 dca_verify_init,
307 307 dca_verify,
308 308 dca_verify_update,
309 309 dca_verify_final,
310 310 dca_verify_atomic,
311 311 dca_verify_recover_init,
312 312 dca_verify_recover,
313 313 dca_verify_recover_atomic
314 314 };
315 315
316 316 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t,
317 317 uchar_t *, size_t, crypto_req_handle_t);
318 318
319 319 static crypto_random_number_ops_t dca_random_number_ops = {
320 320 NULL,
321 321 dca_generate_random
322 322 };
323 323
324 324 static int ext_info_sym(crypto_provider_handle_t prov,
325 325 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
326 326 static int ext_info_asym(crypto_provider_handle_t prov,
327 327 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
328 328 static int ext_info_base(crypto_provider_handle_t prov,
329 329 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id);
330 330
331 331 static crypto_provider_management_ops_t dca_provmanage_ops_1 = {
332 332 ext_info_sym, /* ext_info */
333 333 NULL, /* init_token */
334 334 NULL, /* init_pin */
335 335 NULL /* set_pin */
336 336 };
337 337
338 338 static crypto_provider_management_ops_t dca_provmanage_ops_2 = {
339 339 ext_info_asym, /* ext_info */
340 340 NULL, /* init_token */
341 341 NULL, /* init_pin */
342 342 NULL /* set_pin */
343 343 };
↓ open down ↓ |
174 lines elided |
↑ open up ↑ |
344 344
345 345 int dca_free_context(crypto_ctx_t *);
346 346
347 347 static crypto_ctx_ops_t dca_ctx_ops = {
348 348 NULL,
349 349 dca_free_context
350 350 };
351 351
352 352 /* Operations for the symmetric cipher provider */
353 353 static crypto_ops_t dca_crypto_ops1 = {
354 - &dca_control_ops,
355 - NULL, /* digest_ops */
356 - &dca_cipher_ops,
357 - NULL, /* mac_ops */
358 - NULL, /* sign_ops */
359 - NULL, /* verify_ops */
360 - NULL, /* dual_ops */
361 - NULL, /* cipher_mac_ops */
362 - NULL, /* random_number_ops */
363 - NULL, /* session_ops */
364 - NULL, /* object_ops */
365 - NULL, /* key_ops */
366 - &dca_provmanage_ops_1, /* management_ops */
367 - &dca_ctx_ops
354 + .co_control_ops = &dca_control_ops,
355 + .co_digest_ops = NULL,
356 + .co_cipher_ops = &dca_cipher_ops,
357 + .co_mac_ops = NULL,
358 + .co_sign_ops = NULL,
359 + .co_verify_ops = NULL,
360 + .co_dual_ops = NULL,
361 + .co_dual_cipher_mac_ops = NULL,
362 + .co_random_ops = NULL,
363 + .co_session_ops = NULL,
364 + .co_object_ops = NULL,
365 + .co_key_ops = NULL,
366 + .co_provider_ops = &dca_provmanage_ops_1,
367 + .co_ctx_ops = &dca_ctx_ops
368 368 };
369 369
370 370 /* Operations for the asymmetric cipher provider */
371 -static crypto_ops_t dca_crypto_ops2 = {
371 +static crypto_ops_t dca_crypto_ops2 = { .cou.cou_v1 = {
372 372 &dca_control_ops,
373 373 NULL, /* digest_ops */
374 374 &dca_cipher_ops,
375 375 NULL, /* mac_ops */
376 376 &dca_sign_ops,
377 377 &dca_verify_ops,
378 378 NULL, /* dual_ops */
379 379 NULL, /* cipher_mac_ops */
380 380 &dca_random_number_ops,
381 381 NULL, /* session_ops */
382 382 NULL, /* object_ops */
383 383 NULL, /* key_ops */
384 384 &dca_provmanage_ops_2, /* management_ops */
385 385 &dca_ctx_ops
386 -};
386 +}};
387 387
388 388 /* Provider information for the symmetric cipher provider */
389 -static crypto_provider_info_t dca_prov_info1 = {
389 +static crypto_provider_info_t dca_prov_info1 = {{{{
390 390 CRYPTO_SPI_VERSION_1,
391 391 NULL, /* pi_provider_description */
392 392 CRYPTO_HW_PROVIDER,
393 - NULL, /* pi_provider_dev */
393 + { NULL }, /* pi_provider_dev */
394 394 NULL, /* pi_provider_handle */
395 395 &dca_crypto_ops1,
396 396 sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t),
397 397 dca_mech_info_tab1,
398 398 0, /* pi_logical_provider_count */
399 399 NULL /* pi_logical_providers */
400 -};
400 +}}}};
401 401
402 402 /* Provider information for the asymmetric cipher provider */
403 -static crypto_provider_info_t dca_prov_info2 = {
403 +static crypto_provider_info_t dca_prov_info2 = {{{{
404 404 CRYPTO_SPI_VERSION_1,
405 405 NULL, /* pi_provider_description */
406 406 CRYPTO_HW_PROVIDER,
407 - NULL, /* pi_provider_dev */
407 + { NULL }, /* pi_provider_dev */
408 408 NULL, /* pi_provider_handle */
409 409 &dca_crypto_ops2,
410 410 sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t),
411 411 dca_mech_info_tab2,
412 412 0, /* pi_logical_provider_count */
413 413 NULL /* pi_logical_providers */
414 -};
414 +}}}};
415 415
416 416 /* Convenience macros */
417 417 #define DCA_SOFTC_FROM_CTX(ctx) ((dca_t *)(ctx)->cc_provider)
418 418 #define DCA_MECH_FROM_CTX(ctx) \
419 419 (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type)
420 420
421 421 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
422 422 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
423 423 dca_chain_t *head, int *n_chain);
424 424 static uint64_t dca_ena(uint64_t ena);
425 425 static caddr_t dca_bufdaddr_out(crypto_data_t *data);
426 426 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index);
427 427 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
428 428 dca_fma_eclass_t eclass_index);
429 429
430 430 static void dca_fma_init(dca_t *dca);
431 431 static void dca_fma_fini(dca_t *dca);
432 432 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
433 433 const void *impl_data);
434 434
435 435
436 436 static dca_device_t dca_devices[] = {
437 437 /* Broadcom vanilla variants */
438 438 { 0x14e4, 0x5820, "Broadcom 5820" },
439 439 { 0x14e4, 0x5821, "Broadcom 5821" },
440 440 { 0x14e4, 0x5822, "Broadcom 5822" },
441 441 { 0x14e4, 0x5825, "Broadcom 5825" },
442 442 /* Sun specific OEMd variants */
443 443 { 0x108e, 0x5454, "SCA" },
444 444 { 0x108e, 0x5455, "SCA 1000" },
445 445 { 0x108e, 0x5457, "SCA 500" },
446 446 /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */
447 447 { 0x108e, 0x1, "SCA 500" },
448 448 };
449 449
450 450 /*
451 451 * Device attributes.
452 452 */
453 453 static struct ddi_device_acc_attr dca_regsattr = {
454 454 DDI_DEVICE_ATTR_V1,
455 455 DDI_STRUCTURE_LE_ACC,
456 456 DDI_STRICTORDER_ACC,
457 457 DDI_FLAGERR_ACC
458 458 };
459 459
460 460 static struct ddi_device_acc_attr dca_devattr = {
461 461 DDI_DEVICE_ATTR_V0,
462 462 DDI_STRUCTURE_LE_ACC,
463 463 DDI_STRICTORDER_ACC
464 464 };
465 465
466 466 #if !defined(i386) && !defined(__i386)
467 467 static struct ddi_device_acc_attr dca_bufattr = {
468 468 DDI_DEVICE_ATTR_V0,
469 469 DDI_NEVERSWAP_ACC,
470 470 DDI_STRICTORDER_ACC
471 471 };
472 472 #endif
473 473
474 474 static struct ddi_dma_attr dca_dmaattr = {
475 475 DMA_ATTR_V0, /* dma_attr_version */
476 476 0x0, /* dma_attr_addr_lo */
477 477 0xffffffffUL, /* dma_attr_addr_hi */
478 478 0x00ffffffUL, /* dma_attr_count_max */
479 479 0x40, /* dma_attr_align */
480 480 0x40, /* dma_attr_burstsizes */
481 481 0x1, /* dma_attr_minxfer */
482 482 0x00ffffffUL, /* dma_attr_maxxfer */
483 483 0xffffffffUL, /* dma_attr_seg */
484 484 #if defined(i386) || defined(__i386) || defined(__amd64)
485 485 512, /* dma_attr_sgllen */
486 486 #else
487 487 1, /* dma_attr_sgllen */
488 488 #endif
489 489 1, /* dma_attr_granular */
490 490 DDI_DMA_FLAGERR /* dma_attr_flags */
491 491 };
492 492
493 493 static void *dca_state = NULL;
494 494 int dca_mindma = 2500;
495 495
496 496 /*
497 497 * FMA eclass string definitions. Note that these string arrays must be
498 498 * consistent with the dca_fma_eclass_t enum.
499 499 */
500 500 static char *dca_fma_eclass_sca1000[] = {
501 501 "sca1000.hw.device",
502 502 "sca1000.hw.timeout",
503 503 "sca1000.none"
504 504 };
505 505
506 506 static char *dca_fma_eclass_sca500[] = {
507 507 "sca500.hw.device",
508 508 "sca500.hw.timeout",
509 509 "sca500.none"
510 510 };
511 511
512 512 /*
513 513 * DDI entry points.
514 514 */
515 515 int
516 516 _init(void)
517 517 {
518 518 int rv;
519 519
520 520 DBG(NULL, DMOD, "dca: in _init");
521 521
522 522 if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) {
523 523 /* this should *never* happen! */
524 524 return (rv);
525 525 }
526 526
527 527 if ((rv = mod_install(&modlinkage)) != 0) {
528 528 /* cleanup here */
529 529 ddi_soft_state_fini(&dca_state);
530 530 return (rv);
531 531 }
532 532
533 533 return (0);
534 534 }
535 535
536 536 int
537 537 _fini(void)
538 538 {
539 539 int rv;
540 540
541 541 DBG(NULL, DMOD, "dca: in _fini");
542 542
543 543 if ((rv = mod_remove(&modlinkage)) == 0) {
544 544 /* cleanup here */
545 545 ddi_soft_state_fini(&dca_state);
546 546 }
547 547 return (rv);
548 548 }
549 549
550 550 int
551 551 _info(struct modinfo *modinfop)
552 552 {
553 553 DBG(NULL, DMOD, "dca: in _info");
554 554
555 555 return (mod_info(&modlinkage, modinfop));
556 556 }
557 557
558 558 int
559 559 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
560 560 {
561 561 ddi_acc_handle_t pci;
562 562 int instance;
563 563 ddi_iblock_cookie_t ibc;
564 564 int intr_added = 0;
565 565 dca_t *dca;
566 566 ushort_t venid;
567 567 ushort_t devid;
568 568 ushort_t revid;
569 569 ushort_t subsysid;
570 570 ushort_t subvenid;
571 571 int i;
572 572 int ret;
573 573 char ID[64];
574 574 static char *unknowndev = "Unknown device";
575 575
576 576 #if DEBUG
577 577 /* these are only used for debugging */
578 578 ushort_t pcicomm;
579 579 ushort_t pcistat;
580 580 uchar_t cachelinesz;
581 581 uchar_t mingnt;
582 582 uchar_t maxlat;
583 583 uchar_t lattmr;
584 584 #endif
585 585
586 586 instance = ddi_get_instance(dip);
587 587
588 588 DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance);
589 589
590 590 switch (cmd) {
591 591 case DDI_RESUME:
592 592 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
593 593 dca_diperror(dip, "no soft state in detach");
594 594 return (DDI_FAILURE);
595 595 }
596 596 /* assumption: we won't be DDI_DETACHed until we return */
597 597 return (dca_resume(dca));
598 598 case DDI_ATTACH:
599 599 break;
600 600 default:
601 601 return (DDI_FAILURE);
602 602 }
603 603
604 604 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
605 605 dca_diperror(dip, "slot does not support PCI bus-master");
606 606 return (DDI_FAILURE);
607 607 }
608 608
609 609 if (ddi_intr_hilevel(dip, 0) != 0) {
610 610 dca_diperror(dip, "hilevel interrupts not supported");
611 611 return (DDI_FAILURE);
612 612 }
613 613
614 614 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
615 615 dca_diperror(dip, "unable to setup PCI config handle");
616 616 return (DDI_FAILURE);
617 617 }
618 618
619 619 /* common PCI attributes */
620 620 venid = pci_config_get16(pci, PCI_VENID);
621 621 devid = pci_config_get16(pci, PCI_DEVID);
622 622 revid = pci_config_get8(pci, PCI_REVID);
623 623 subvenid = pci_config_get16(pci, PCI_SUBVENID);
624 624 subsysid = pci_config_get16(pci, PCI_SUBSYSID);
625 625
626 626 /*
627 627 * Broadcom-specific timings.
628 628 * We disable these timers/counters since they can cause
629 629 * incorrect false failures when the bus is just a little
630 630 * bit slow, or busy.
631 631 */
632 632 pci_config_put8(pci, PCI_TRDYTO, 0);
633 633 pci_config_put8(pci, PCI_RETRIES, 0);
634 634
635 635 /* initialize PCI access settings */
636 636 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
637 637 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
638 638
639 639 /* set up our PCI latency timer */
640 640 pci_config_put8(pci, PCI_LATTMR, 0x40);
641 641
642 642 #if DEBUG
643 643 /* read registers (for debugging) */
644 644 pcicomm = pci_config_get16(pci, PCI_COMM);
645 645 pcistat = pci_config_get16(pci, PCI_STATUS);
646 646 cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ);
647 647 mingnt = pci_config_get8(pci, PCI_MINGNT);
648 648 maxlat = pci_config_get8(pci, PCI_MAXLAT);
649 649 lattmr = pci_config_get8(pci, PCI_LATTMR);
650 650 #endif
651 651
652 652 pci_config_teardown(&pci);
653 653
654 654 if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) {
655 655 dca_diperror(dip, "unable to get iblock cookie");
656 656 return (DDI_FAILURE);
657 657 }
658 658
659 659 if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) {
660 660 dca_diperror(dip, "unable to allocate soft state");
661 661 return (DDI_FAILURE);
662 662 }
663 663
664 664 dca = ddi_get_soft_state(dca_state, instance);
665 665 ASSERT(dca != NULL);
666 666 dca->dca_dip = dip;
667 667 WORKLIST(dca, MCR1)->dwl_prov = NULL;
668 668 WORKLIST(dca, MCR2)->dwl_prov = NULL;
669 669 /* figure pagesize */
670 670 dca->dca_pagesize = ddi_ptob(dip, 1);
671 671
672 672 /*
673 673 * Search for the device in our supported devices table. This
674 674 * is here for two reasons. First, we want to ensure that
675 675 * only Sun-qualified (and presumably Sun-labeled) devices can
676 676 * be used with this driver. Second, some devices have
677 677 * specific differences. E.g. the 5821 has support for a
678 678 * special mode of RC4, deeper queues, power management, and
679 679 * other changes. Also, the export versions of some of these
680 680 * chips don't support RC4 or 3DES, so we catch that here.
681 681 *
682 682 * Note that we only look at the upper nibble of the device
683 683 * id, which is used to distinguish export vs. domestic
684 684 * versions of the chip. (The lower nibble is used for
685 685 * stepping information.)
686 686 */
687 687 for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) {
688 688 /*
689 689 * Try to match the subsystem information first.
690 690 */
691 691 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) &&
692 692 subsysid && (subsysid == dca_devices[i].dd_device_id)) {
693 693 dca->dca_model = dca_devices[i].dd_model;
694 694 dca->dca_devid = dca_devices[i].dd_device_id;
695 695 break;
696 696 }
697 697 /*
698 698 * Failing that, try the generic vendor and device id.
699 699 * Even if we find a match, we keep searching anyway,
700 700 * since we would prefer to find a match based on the
701 701 * subsystem ids.
702 702 */
703 703 if ((venid == dca_devices[i].dd_vendor_id) &&
704 704 (devid == dca_devices[i].dd_device_id)) {
705 705 dca->dca_model = dca_devices[i].dd_model;
706 706 dca->dca_devid = dca_devices[i].dd_device_id;
707 707 }
708 708 }
709 709 /* try and handle an unrecognized device */
710 710 if (dca->dca_model == NULL) {
711 711 dca->dca_model = unknowndev;
712 712 dca_error(dca, "device not recognized, not supported");
713 713 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d",
714 714 i, venid, devid, revid);
715 715 }
716 716
717 717 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description",
718 718 dca->dca_model) != DDI_SUCCESS) {
719 719 dca_error(dca, "unable to create description property");
720 720 return (DDI_FAILURE);
721 721 }
722 722
723 723 DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x",
724 724 pcicomm, pcistat, cachelinesz);
725 725 DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x",
726 726 mingnt, maxlat, lattmr);
727 727
728 728 /*
729 729 * initialize locks, etc.
730 730 */
731 731 (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc);
732 732
733 733 /* use RNGSHA1 by default */
734 734 if (ddi_getprop(DDI_DEV_T_ANY, dip,
735 735 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) {
736 736 dca->dca_flags |= DCA_RNGSHA1;
737 737 }
738 738
739 739 /* initialize FMA */
740 740 dca_fma_init(dca);
741 741
742 742 /* initialize some key data structures */
743 743 if (dca_init(dca) != DDI_SUCCESS) {
744 744 goto failed;
745 745 }
746 746
747 747 /* initialize kstats */
748 748 dca_ksinit(dca);
749 749
750 750 /* setup access to registers */
751 751 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs,
752 752 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) {
753 753 dca_error(dca, "unable to map registers");
754 754 goto failed;
755 755 }
756 756
757 757 DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1));
758 758 DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL));
759 759 DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT));
760 760 DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA));
761 761 DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2));
762 762
763 763 /* reset the chip */
764 764 if (dca_reset(dca, 0) < 0) {
765 765 goto failed;
766 766 }
767 767
768 768 /* initialize the chip */
769 769 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
770 770 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
771 771 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
772 772 goto failed;
773 773 }
774 774
775 775 /* add the interrupt */
776 776 if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr,
777 777 (void *)dca) != DDI_SUCCESS) {
778 778 DBG(dca, DWARN, "ddi_add_intr failed");
779 779 goto failed;
780 780 } else {
781 781 intr_added = 1;
782 782 }
783 783
784 784 /* enable interrupts on the device */
785 785 /*
786 786 * XXX: Note, 5820A1 errata indicates that this may clobber
787 787 * bits 24 and 23, which affect the speed of the RNG. Since
788 788 * we always want to run in full-speed mode, this should be
789 789 * harmless.
790 790 */
791 791 if (dca->dca_devid == 0x5825) {
792 792 /* for 5825 - increase the DMA read size */
793 793 SETBIT(dca, CSR_DMACTL,
794 794 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
795 795 } else {
796 796 SETBIT(dca, CSR_DMACTL,
797 797 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
798 798 }
799 799 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
800 800 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
801 801 goto failed;
802 802 }
803 803
804 804 /* register MCR1 with the crypto framework */
805 805 /* Be careful not to exceed 32 chars */
806 806 (void) sprintf(ID, "%s/%d %s",
807 807 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM);
808 808 dca_prov_info1.pi_provider_description = ID;
809 809 dca_prov_info1.pi_provider_dev.pd_hw = dip;
810 810 dca_prov_info1.pi_provider_handle = dca;
811 811 if ((ret = crypto_register_provider(&dca_prov_info1,
812 812 &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) {
813 813 cmn_err(CE_WARN,
814 814 "crypto_register_provider() failed (%d) for MCR1", ret);
815 815 goto failed;
816 816 }
817 817
818 818 /* register MCR2 with the crypto framework */
819 819 /* Be careful not to exceed 32 chars */
820 820 (void) sprintf(ID, "%s/%d %s",
821 821 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM);
822 822 dca_prov_info2.pi_provider_description = ID;
823 823 dca_prov_info2.pi_provider_dev.pd_hw = dip;
824 824 dca_prov_info2.pi_provider_handle = dca;
825 825 if ((ret = crypto_register_provider(&dca_prov_info2,
826 826 &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) {
827 827 cmn_err(CE_WARN,
828 828 "crypto_register_provider() failed (%d) for MCR2", ret);
829 829 goto failed;
830 830 }
831 831
832 832 crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov,
833 833 CRYPTO_PROVIDER_READY);
834 834 crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov,
835 835 CRYPTO_PROVIDER_READY);
836 836
837 837 /* Initialize the local random number pool for this instance */
838 838 if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) {
839 839 goto failed;
840 840 }
841 841
842 842 mutex_enter(&dca->dca_intrlock);
843 843 dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca,
844 844 drv_usectohz(SECOND));
845 845 mutex_exit(&dca->dca_intrlock);
846 846
847 847 ddi_set_driver_private(dip, (caddr_t)dca);
848 848
849 849 ddi_report_dev(dip);
850 850
851 851 if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) {
852 852 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED);
853 853 }
854 854
855 855 return (DDI_SUCCESS);
856 856
857 857 failed:
858 858 /* unregister from the crypto framework */
859 859 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
860 860 (void) crypto_unregister_provider(
861 861 WORKLIST(dca, MCR1)->dwl_prov);
862 862 }
863 863 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
864 864 (void) crypto_unregister_provider(
865 865 WORKLIST(dca, MCR2)->dwl_prov);
866 866 }
867 867 if (intr_added) {
868 868 CLRBIT(dca, CSR_DMACTL,
869 869 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
870 870 /* unregister intr handler */
871 871 ddi_remove_intr(dip, 0, dca->dca_icookie);
872 872 }
873 873 if (dca->dca_regs_handle) {
874 874 ddi_regs_map_free(&dca->dca_regs_handle);
875 875 }
876 876 if (dca->dca_intrstats) {
877 877 kstat_delete(dca->dca_intrstats);
878 878 }
879 879 if (dca->dca_ksp) {
880 880 kstat_delete(dca->dca_ksp);
881 881 }
882 882 dca_uninit(dca);
883 883
884 884 /* finalize FMA */
885 885 dca_fma_fini(dca);
886 886
887 887 mutex_destroy(&dca->dca_intrlock);
888 888 ddi_soft_state_free(dca_state, instance);
889 889 return (DDI_FAILURE);
890 890
891 891 }
892 892
893 893 int
894 894 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
895 895 {
896 896 int instance;
897 897 dca_t *dca;
898 898 timeout_id_t tid;
899 899
900 900 instance = ddi_get_instance(dip);
901 901
902 902 DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance);
903 903
904 904 switch (cmd) {
905 905 case DDI_SUSPEND:
906 906 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
907 907 dca_diperror(dip, "no soft state in detach");
908 908 return (DDI_FAILURE);
909 909 }
910 910 /* assumption: we won't be DDI_DETACHed until we return */
911 911 return (dca_suspend(dca));
912 912
913 913 case DDI_DETACH:
914 914 break;
915 915 default:
916 916 return (DDI_FAILURE);
917 917 }
918 918
919 919 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
920 920 dca_diperror(dip, "no soft state in detach");
921 921 return (DDI_FAILURE);
922 922 }
923 923
924 924 /*
925 925 * Unregister from kCF.
926 926 * This needs to be done at the beginning of detach.
927 927 */
928 928 if (WORKLIST(dca, MCR1)->dwl_prov != NULL) {
929 929 if (crypto_unregister_provider(
930 930 WORKLIST(dca, MCR1)->dwl_prov) != CRYPTO_SUCCESS) {
931 931 dca_error(dca, "unable to unregister MCR1 from kcf");
932 932 return (DDI_FAILURE);
933 933 }
934 934 }
935 935
936 936 if (WORKLIST(dca, MCR2)->dwl_prov != NULL) {
937 937 if (crypto_unregister_provider(
938 938 WORKLIST(dca, MCR2)->dwl_prov) != CRYPTO_SUCCESS) {
939 939 dca_error(dca, "unable to unregister MCR2 from kcf");
940 940 return (DDI_FAILURE);
941 941 }
942 942 }
943 943
944 944 /*
945 945 * Cleanup the private context list. Once the
946 946 * crypto_unregister_provider returns, it is safe to do so.
947 947 */
948 948 dca_free_context_list(dca);
949 949
950 950 /* Cleanup the local random number pool */
951 951 dca_random_fini(dca);
952 952
953 953 /* send any jobs in the waitq back to kCF */
954 954 dca_rejectjobs(dca);
955 955
956 956 /* untimeout the timeouts */
957 957 mutex_enter(&dca->dca_intrlock);
958 958 tid = dca->dca_jobtid;
959 959 dca->dca_jobtid = 0;
960 960 mutex_exit(&dca->dca_intrlock);
961 961 if (tid) {
962 962 (void) untimeout(tid);
963 963 }
964 964
965 965 /* disable device interrupts */
966 966 CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
967 967
968 968 /* unregister interrupt handlers */
969 969 ddi_remove_intr(dip, 0, dca->dca_icookie);
970 970
971 971 /* release our regs handle */
972 972 ddi_regs_map_free(&dca->dca_regs_handle);
973 973
974 974 /* toss out kstats */
975 975 if (dca->dca_intrstats) {
976 976 kstat_delete(dca->dca_intrstats);
977 977 }
978 978 if (dca->dca_ksp) {
979 979 kstat_delete(dca->dca_ksp);
980 980 }
981 981
982 982 mutex_destroy(&dca->dca_intrlock);
983 983 dca_uninit(dca);
984 984
985 985 /* finalize FMA */
986 986 dca_fma_fini(dca);
987 987
988 988 ddi_soft_state_free(dca_state, instance);
989 989
990 990 return (DDI_SUCCESS);
991 991 }
992 992
993 993 int
994 994 dca_resume(dca_t *dca)
995 995 {
996 996 ddi_acc_handle_t pci;
997 997
998 998 if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) {
999 999 dca_error(dca, "unable to setup PCI config handle");
1000 1000 return (DDI_FAILURE);
1001 1001 }
1002 1002
1003 1003 /*
1004 1004 * Reprogram registers in PCI configuration space.
1005 1005 */
1006 1006
1007 1007 /* Broadcom-specific timers -- we disable them. */
1008 1008 pci_config_put8(pci, PCI_TRDYTO, 0);
1009 1009 pci_config_put8(pci, PCI_RETRIES, 0);
1010 1010
1011 1011 /* initialize PCI access settings */
1012 1012 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
1013 1013 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
1014 1014
1015 1015 /* set up our PCI latency timer */
1016 1016 pci_config_put8(pci, PCI_LATTMR, 0x40);
1017 1017
1018 1018 pci_config_teardown(&pci);
1019 1019
1020 1020 if (dca_reset(dca, 0) < 0) {
1021 1021 dca_error(dca, "unable to reset device during resume");
1022 1022 return (DDI_FAILURE);
1023 1023 }
1024 1024
1025 1025 /*
1026 1026 * Now restore the card-specific CSRs.
1027 1027 */
1028 1028
1029 1029 /* restore endianness settings */
1030 1030 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
1031 1031 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1032 1032 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1033 1033 return (DDI_FAILURE);
1034 1034
1035 1035 /* restore interrupt enables */
1036 1036 if (dca->dca_devid == 0x5825) {
1037 1037 /* for 5825 set 256 byte read size to improve performance */
1038 1038 SETBIT(dca, CSR_DMACTL,
1039 1039 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
1040 1040 } else {
1041 1041 SETBIT(dca, CSR_DMACTL,
1042 1042 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
1043 1043 }
1044 1044 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1045 1045 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1046 1046 return (DDI_FAILURE);
1047 1047
1048 1048 /* resume scheduling jobs on the device */
1049 1049 dca_undrain(dca);
1050 1050
1051 1051 return (DDI_SUCCESS);
1052 1052 }
1053 1053
1054 1054 int
1055 1055 dca_suspend(dca_t *dca)
1056 1056 {
1057 1057 if ((dca_drain(dca)) != 0) {
1058 1058 return (DDI_FAILURE);
1059 1059 }
1060 1060 if (dca_reset(dca, 0) < 0) {
1061 1061 dca_error(dca, "unable to reset device during suspend");
1062 1062 return (DDI_FAILURE);
1063 1063 }
1064 1064 return (DDI_SUCCESS);
1065 1065 }
1066 1066
1067 1067 /*
1068 1068 * Hardware access stuff.
1069 1069 */
1070 1070 int
1071 1071 dca_reset(dca_t *dca, int failreset)
1072 1072 {
1073 1073 int i;
1074 1074
1075 1075 if (dca->dca_regs_handle == NULL) {
1076 1076 return (-1);
1077 1077 }
1078 1078
1079 1079 PUTCSR(dca, CSR_DMACTL, DMACTL_RESET);
1080 1080 if (!failreset) {
1081 1081 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1082 1082 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1083 1083 return (-1);
1084 1084 }
1085 1085
1086 1086 /* now wait for a reset */
1087 1087 for (i = 1; i < 100; i++) {
1088 1088 uint32_t dmactl;
1089 1089 drv_usecwait(100);
1090 1090 dmactl = GETCSR(dca, CSR_DMACTL);
1091 1091 if (!failreset) {
1092 1092 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1093 1093 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1094 1094 return (-1);
1095 1095 }
1096 1096 if ((dmactl & DMACTL_RESET) == 0) {
1097 1097 DBG(dca, DCHATTY, "reset in %d usec", i * 100);
1098 1098 return (0);
1099 1099 }
1100 1100 }
1101 1101 if (!failreset) {
1102 1102 dca_failure(dca, DDI_DEVICE_FAULT,
1103 1103 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1104 1104 "timeout waiting for reset after %d usec", i * 100);
1105 1105 }
1106 1106 return (-1);
1107 1107 }
1108 1108
1109 1109 int
1110 1110 dca_initworklist(dca_t *dca, dca_worklist_t *wlp)
1111 1111 {
1112 1112 int i;
1113 1113 int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR);
1114 1114
1115 1115 /*
1116 1116 * Set up work queue.
1117 1117 */
1118 1118 mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1119 1119 mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER,
1120 1120 dca->dca_icookie);
1121 1121 mutex_init(&wlp->dwl_freelock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1122 1122 cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL);
1123 1123
1124 1124 mutex_enter(&wlp->dwl_lock);
1125 1125
1126 1126 dca_initq(&wlp->dwl_freereqs);
1127 1127 dca_initq(&wlp->dwl_waitq);
1128 1128 dca_initq(&wlp->dwl_freework);
1129 1129 dca_initq(&wlp->dwl_runq);
1130 1130
1131 1131 for (i = 0; i < MAXWORK; i++) {
1132 1132 dca_work_t *workp;
1133 1133
1134 1134 if ((workp = dca_newwork(dca)) == NULL) {
1135 1135 dca_error(dca, "unable to allocate work");
1136 1136 mutex_exit(&wlp->dwl_lock);
1137 1137 return (DDI_FAILURE);
1138 1138 }
1139 1139 workp->dw_wlp = wlp;
1140 1140 dca_freework(workp);
1141 1141 }
1142 1142 mutex_exit(&wlp->dwl_lock);
1143 1143
1144 1144 for (i = 0; i < reqprealloc; i++) {
1145 1145 dca_request_t *reqp;
1146 1146
1147 1147 if ((reqp = dca_newreq(dca)) == NULL) {
1148 1148 dca_error(dca, "unable to allocate request");
1149 1149 return (DDI_FAILURE);
1150 1150 }
1151 1151 reqp->dr_dca = dca;
1152 1152 reqp->dr_wlp = wlp;
1153 1153 dca_freereq(reqp);
1154 1154 }
1155 1155 return (DDI_SUCCESS);
1156 1156 }
1157 1157
1158 1158 int
1159 1159 dca_init(dca_t *dca)
1160 1160 {
1161 1161 dca_worklist_t *wlp;
1162 1162
1163 1163 /* Initialize the private context list and the corresponding lock. */
1164 1164 mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL);
1165 1165 dca_initq(&dca->dca_ctx_list);
1166 1166
1167 1167 /*
1168 1168 * MCR1 algorithms.
1169 1169 */
1170 1170 wlp = WORKLIST(dca, MCR1);
1171 1171 (void) sprintf(wlp->dwl_name, "dca%d:mcr1",
1172 1172 ddi_get_instance(dca->dca_dip));
1173 1173 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1174 1174 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1175 1175 "mcr1_lowater", MCR1LOWATER);
1176 1176 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1177 1177 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1178 1178 "mcr1_hiwater", MCR1HIWATER);
1179 1179 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1180 1180 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1181 1181 "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR);
1182 1182 wlp->dwl_dca = dca;
1183 1183 wlp->dwl_mcr = MCR1;
1184 1184 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1185 1185 return (DDI_FAILURE);
1186 1186 }
1187 1187
1188 1188 /*
1189 1189 * MCR2 algorithms.
1190 1190 */
1191 1191 wlp = WORKLIST(dca, MCR2);
1192 1192 (void) sprintf(wlp->dwl_name, "dca%d:mcr2",
1193 1193 ddi_get_instance(dca->dca_dip));
1194 1194 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1195 1195 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1196 1196 "mcr2_lowater", MCR2LOWATER);
1197 1197 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1198 1198 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1199 1199 "mcr2_hiwater", MCR2HIWATER);
1200 1200 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1201 1201 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1202 1202 "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR);
1203 1203 wlp->dwl_dca = dca;
1204 1204 wlp->dwl_mcr = MCR2;
1205 1205 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1206 1206 return (DDI_FAILURE);
1207 1207 }
1208 1208 return (DDI_SUCCESS);
1209 1209 }
1210 1210
1211 1211 /*
1212 1212 * Uninitialize worklists. This routine should only be called when no
1213 1213 * active jobs (hence DMA mappings) exist. One way to ensure this is
1214 1214 * to unregister from kCF before calling this routine. (This is done
1215 1215 * e.g. in detach(9e).)
1216 1216 */
1217 1217 void
1218 1218 dca_uninit(dca_t *dca)
1219 1219 {
1220 1220 int mcr;
1221 1221
1222 1222 mutex_destroy(&dca->dca_ctx_list_lock);
1223 1223
1224 1224 for (mcr = MCR1; mcr <= MCR2; mcr++) {
1225 1225 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1226 1226 dca_work_t *workp;
1227 1227 dca_request_t *reqp;
1228 1228
1229 1229 if (dca->dca_regs_handle == NULL) {
1230 1230 continue;
1231 1231 }
1232 1232
1233 1233 mutex_enter(&wlp->dwl_lock);
1234 1234 while ((workp = dca_getwork(dca, mcr)) != NULL) {
1235 1235 dca_destroywork(workp);
1236 1236 }
1237 1237 mutex_exit(&wlp->dwl_lock);
1238 1238 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) {
1239 1239 dca_destroyreq(reqp);
1240 1240 }
1241 1241
1242 1242 mutex_destroy(&wlp->dwl_lock);
1243 1243 mutex_destroy(&wlp->dwl_freereqslock);
1244 1244 mutex_destroy(&wlp->dwl_freelock);
1245 1245 cv_destroy(&wlp->dwl_cv);
1246 1246 wlp->dwl_prov = NULL;
1247 1247 }
1248 1248 }
1249 1249
1250 1250 static void
1251 1251 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock)
1252 1252 {
1253 1253 if (!q || !node)
1254 1254 return;
1255 1255
1256 1256 mutex_enter(lock);
1257 1257 node->dl_next2 = q;
1258 1258 node->dl_prev2 = q->dl_prev2;
1259 1259 node->dl_next2->dl_prev2 = node;
1260 1260 node->dl_prev2->dl_next2 = node;
1261 1261 mutex_exit(lock);
1262 1262 }
1263 1263
1264 1264 static void
1265 1265 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock)
1266 1266 {
1267 1267 if (!node)
1268 1268 return;
1269 1269
1270 1270 mutex_enter(lock);
1271 1271 node->dl_next2->dl_prev2 = node->dl_prev2;
1272 1272 node->dl_prev2->dl_next2 = node->dl_next2;
1273 1273 node->dl_next2 = NULL;
1274 1274 node->dl_prev2 = NULL;
1275 1275 mutex_exit(lock);
1276 1276 }
1277 1277
1278 1278 static dca_listnode_t *
1279 1279 dca_delist2(dca_listnode_t *q, kmutex_t *lock)
1280 1280 {
1281 1281 dca_listnode_t *node;
1282 1282
1283 1283 mutex_enter(lock);
1284 1284 if ((node = q->dl_next2) == q) {
1285 1285 mutex_exit(lock);
1286 1286 return (NULL);
1287 1287 }
1288 1288
1289 1289 node->dl_next2->dl_prev2 = node->dl_prev2;
1290 1290 node->dl_prev2->dl_next2 = node->dl_next2;
1291 1291 node->dl_next2 = NULL;
1292 1292 node->dl_prev2 = NULL;
1293 1293 mutex_exit(lock);
1294 1294
1295 1295 return (node);
1296 1296 }
1297 1297
1298 1298 void
1299 1299 dca_initq(dca_listnode_t *q)
1300 1300 {
1301 1301 q->dl_next = q;
1302 1302 q->dl_prev = q;
1303 1303 q->dl_next2 = q;
1304 1304 q->dl_prev2 = q;
1305 1305 }
1306 1306
1307 1307 void
1308 1308 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node)
1309 1309 {
1310 1310 /*
1311 1311 * Enqueue submits at the "tail" of the list, i.e. just
1312 1312 * behind the sentinel.
1313 1313 */
1314 1314 node->dl_next = q;
1315 1315 node->dl_prev = q->dl_prev;
1316 1316 node->dl_next->dl_prev = node;
1317 1317 node->dl_prev->dl_next = node;
1318 1318 }
1319 1319
1320 1320 void
1321 1321 dca_rmqueue(dca_listnode_t *node)
1322 1322 {
1323 1323 node->dl_next->dl_prev = node->dl_prev;
1324 1324 node->dl_prev->dl_next = node->dl_next;
1325 1325 node->dl_next = NULL;
1326 1326 node->dl_prev = NULL;
1327 1327 }
1328 1328
1329 1329 dca_listnode_t *
1330 1330 dca_dequeue(dca_listnode_t *q)
1331 1331 {
1332 1332 dca_listnode_t *node;
1333 1333 /*
1334 1334 * Dequeue takes from the "head" of the list, i.e. just after
1335 1335 * the sentinel.
1336 1336 */
1337 1337 if ((node = q->dl_next) == q) {
1338 1338 /* queue is empty */
1339 1339 return (NULL);
1340 1340 }
1341 1341 dca_rmqueue(node);
1342 1342 return (node);
1343 1343 }
1344 1344
1345 1345 /* this is the opposite of dequeue, it takes things off in LIFO order */
1346 1346 dca_listnode_t *
1347 1347 dca_unqueue(dca_listnode_t *q)
1348 1348 {
1349 1349 dca_listnode_t *node;
1350 1350 /*
1351 1351 * unqueue takes from the "tail" of the list, i.e. just before
1352 1352 * the sentinel.
1353 1353 */
1354 1354 if ((node = q->dl_prev) == q) {
1355 1355 /* queue is empty */
1356 1356 return (NULL);
1357 1357 }
1358 1358 dca_rmqueue(node);
1359 1359 return (node);
1360 1360 }
1361 1361
1362 1362 dca_listnode_t *
1363 1363 dca_peekqueue(dca_listnode_t *q)
1364 1364 {
1365 1365 dca_listnode_t *node;
1366 1366
1367 1367 if ((node = q->dl_next) == q) {
1368 1368 return (NULL);
1369 1369 } else {
1370 1370 return (node);
1371 1371 }
1372 1372 }
1373 1373
1374 1374 /*
1375 1375 * Interrupt service routine.
1376 1376 */
1377 1377 uint_t
1378 1378 dca_intr(char *arg)
1379 1379 {
1380 1380 dca_t *dca = (dca_t *)arg;
1381 1381 uint32_t status;
1382 1382
1383 1383 mutex_enter(&dca->dca_intrlock);
1384 1384 status = GETCSR(dca, CSR_DMASTAT);
1385 1385 PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS);
1386 1386 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1387 1387 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
1388 1388 mutex_exit(&dca->dca_intrlock);
1389 1389 return ((uint_t)DDI_FAILURE);
1390 1390 }
1391 1391
1392 1392 DBG(dca, DINTR, "interrupted, status = 0x%x!", status);
1393 1393
1394 1394 if ((status & DMASTAT_INTERRUPTS) == 0) {
1395 1395 /* increment spurious interrupt kstat */
1396 1396 if (dca->dca_intrstats) {
1397 1397 KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++;
1398 1398 }
1399 1399 mutex_exit(&dca->dca_intrlock);
1400 1400 return (DDI_INTR_UNCLAIMED);
1401 1401 }
1402 1402
1403 1403 if (dca->dca_intrstats) {
1404 1404 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++;
1405 1405 }
1406 1406 if (status & DMASTAT_MCR1INT) {
1407 1407 DBG(dca, DINTR, "MCR1 interrupted");
1408 1408 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock));
1409 1409 dca_schedule(dca, MCR1);
1410 1410 dca_reclaim(dca, MCR1);
1411 1411 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock));
1412 1412 }
1413 1413
1414 1414 if (status & DMASTAT_MCR2INT) {
1415 1415 DBG(dca, DINTR, "MCR2 interrupted");
1416 1416 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock));
1417 1417 dca_schedule(dca, MCR2);
1418 1418 dca_reclaim(dca, MCR2);
1419 1419 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock));
1420 1420 }
1421 1421
1422 1422 if (status & DMASTAT_ERRINT) {
1423 1423 uint32_t erraddr;
1424 1424 erraddr = GETCSR(dca, CSR_DMAEA);
1425 1425 mutex_exit(&dca->dca_intrlock);
1426 1426
1427 1427 /*
1428 1428 * bit 1 of the error address indicates failure during
1429 1429 * read if set, during write otherwise.
1430 1430 */
1431 1431 dca_failure(dca, DDI_DEVICE_FAULT,
1432 1432 DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1433 1433 "DMA master access error %s address 0x%x",
1434 1434 erraddr & 0x1 ? "reading" : "writing", erraddr & ~1);
1435 1435 return (DDI_INTR_CLAIMED);
1436 1436 }
1437 1437
1438 1438 mutex_exit(&dca->dca_intrlock);
1439 1439
1440 1440 return (DDI_INTR_CLAIMED);
1441 1441 }
1442 1442
1443 1443 /*
1444 1444 * Reverse a string of bytes from s1 into s2. The reversal happens
1445 1445 * from the tail of s1. If len1 < len2, then null bytes will be
1446 1446 * padded to the end of s2. If len2 < len1, then (presumably null)
1447 1447 * bytes will be dropped from the start of s1.
1448 1448 *
1449 1449 * The rationale here is that when s1 (source) is shorter, then we
1450 1450 * are reversing from big-endian ordering, into device ordering, and
1451 1451 * want to add some extra nulls to the tail (MSB) side of the device.
1452 1452 *
1453 1453 * Similarly, when s2 (dest) is shorter, then we are truncating what
1454 1454 * are presumably null MSB bits from the device.
1455 1455 *
1456 1456 * There is an expectation when reversing from the device back into
1457 1457 * big-endian, that the number of bytes to reverse and the target size
1458 1458 * will match, and no truncation or padding occurs.
1459 1459 */
1460 1460 void
1461 1461 dca_reverse(void *s1, void *s2, int len1, int len2)
1462 1462 {
1463 1463 caddr_t src, dst;
1464 1464
1465 1465 if (len1 == 0) {
1466 1466 if (len2) {
1467 1467 bzero(s2, len2);
1468 1468 }
1469 1469 return;
1470 1470 }
1471 1471 src = (caddr_t)s1 + len1 - 1;
1472 1472 dst = s2;
1473 1473 while ((src >= (caddr_t)s1) && (len2)) {
1474 1474 *dst++ = *src--;
1475 1475 len2--;
1476 1476 }
1477 1477 while (len2 > 0) {
1478 1478 *dst++ = 0;
1479 1479 len2--;
1480 1480 }
1481 1481 }
1482 1482
1483 1483 uint16_t
1484 1484 dca_padfull(int num)
1485 1485 {
1486 1486 if (num <= 512) {
1487 1487 return (BITS2BYTES(512));
1488 1488 }
1489 1489 if (num <= 768) {
1490 1490 return (BITS2BYTES(768));
1491 1491 }
1492 1492 if (num <= 1024) {
1493 1493 return (BITS2BYTES(1024));
1494 1494 }
1495 1495 if (num <= 1536) {
1496 1496 return (BITS2BYTES(1536));
1497 1497 }
1498 1498 if (num <= 2048) {
1499 1499 return (BITS2BYTES(2048));
1500 1500 }
1501 1501 return (0);
1502 1502 }
1503 1503
1504 1504 uint16_t
1505 1505 dca_padhalf(int num)
1506 1506 {
1507 1507 if (num <= 256) {
1508 1508 return (BITS2BYTES(256));
1509 1509 }
1510 1510 if (num <= 384) {
1511 1511 return (BITS2BYTES(384));
1512 1512 }
1513 1513 if (num <= 512) {
1514 1514 return (BITS2BYTES(512));
1515 1515 }
1516 1516 if (num <= 768) {
1517 1517 return (BITS2BYTES(768));
1518 1518 }
1519 1519 if (num <= 1024) {
1520 1520 return (BITS2BYTES(1024));
1521 1521 }
1522 1522 return (0);
1523 1523 }
1524 1524
1525 1525 dca_work_t *
1526 1526 dca_newwork(dca_t *dca)
1527 1527 {
1528 1528 dca_work_t *workp;
1529 1529 size_t size;
1530 1530 ddi_dma_cookie_t c;
1531 1531 unsigned nc;
1532 1532 int rv;
1533 1533
1534 1534 workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP);
1535 1535
1536 1536 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1537 1537 DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah);
1538 1538 if (rv != 0) {
1539 1539 dca_error(dca, "unable to alloc MCR DMA handle");
1540 1540 dca_destroywork(workp);
1541 1541 return (NULL);
1542 1542 }
1543 1543
1544 1544 rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah,
1545 1545 ROUNDUP(MCR_SIZE, dca->dca_pagesize),
1546 1546 &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1547 1547 &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch);
1548 1548 if (rv != 0) {
1549 1549 dca_error(dca, "unable to alloc MCR DMA memory");
1550 1550 dca_destroywork(workp);
1551 1551 return (NULL);
1552 1552 }
1553 1553
1554 1554 rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL,
1555 1555 workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR,
1556 1556 DDI_DMA_SLEEP, NULL, &c, &nc);
1557 1557 if (rv != DDI_DMA_MAPPED) {
1558 1558 dca_error(dca, "unable to map MCR DMA memory");
1559 1559 dca_destroywork(workp);
1560 1560 return (NULL);
1561 1561 }
1562 1562
1563 1563 workp->dw_mcr_paddr = c.dmac_address;
1564 1564 return (workp);
1565 1565 }
1566 1566
1567 1567 void
1568 1568 dca_destroywork(dca_work_t *workp)
1569 1569 {
1570 1570 if (workp->dw_mcr_paddr) {
1571 1571 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah);
1572 1572 }
1573 1573 if (workp->dw_mcr_acch) {
1574 1574 ddi_dma_mem_free(&workp->dw_mcr_acch);
1575 1575 }
1576 1576 if (workp->dw_mcr_dmah) {
1577 1577 ddi_dma_free_handle(&workp->dw_mcr_dmah);
1578 1578 }
1579 1579 kmem_free(workp, sizeof (dca_work_t));
1580 1580 }
1581 1581
1582 1582 dca_request_t *
1583 1583 dca_newreq(dca_t *dca)
1584 1584 {
1585 1585 dca_request_t *reqp;
1586 1586 size_t size;
1587 1587 ddi_dma_cookie_t c;
1588 1588 unsigned nc;
1589 1589 int rv;
1590 1590 int n_chain = 0;
1591 1591
1592 1592 size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH;
1593 1593
1594 1594 reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP);
1595 1595
1596 1596 reqp->dr_dca = dca;
1597 1597
1598 1598 /*
1599 1599 * Setup the DMA region for the context and descriptors.
1600 1600 */
1601 1601 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP,
1602 1602 NULL, &reqp->dr_ctx_dmah);
1603 1603 if (rv != DDI_SUCCESS) {
1604 1604 dca_error(dca, "failure allocating request DMA handle");
1605 1605 dca_destroyreq(reqp);
1606 1606 return (NULL);
1607 1607 }
1608 1608
1609 1609 /* for driver hardening, allocate in whole pages */
1610 1610 rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah,
1611 1611 ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT,
1612 1612 DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size,
1613 1613 &reqp->dr_ctx_acch);
1614 1614 if (rv != DDI_SUCCESS) {
1615 1615 dca_error(dca, "unable to alloc request DMA memory");
1616 1616 dca_destroyreq(reqp);
1617 1617 return (NULL);
1618 1618 }
1619 1619
1620 1620 rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL,
1621 1621 reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE,
1622 1622 DDI_DMA_SLEEP, 0, &c, &nc);
1623 1623 if (rv != DDI_DMA_MAPPED) {
1624 1624 dca_error(dca, "failed binding request DMA handle");
1625 1625 dca_destroyreq(reqp);
1626 1626 return (NULL);
1627 1627 }
1628 1628 reqp->dr_ctx_paddr = c.dmac_address;
1629 1629
1630 1630 reqp->dr_dma_size = size;
1631 1631
1632 1632 /*
1633 1633 * Set up the dma for our scratch/shared buffers.
1634 1634 */
1635 1635 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1636 1636 DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah);
1637 1637 if (rv != DDI_SUCCESS) {
1638 1638 dca_error(dca, "failure allocating ibuf DMA handle");
1639 1639 dca_destroyreq(reqp);
1640 1640 return (NULL);
1641 1641 }
1642 1642 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1643 1643 DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah);
1644 1644 if (rv != DDI_SUCCESS) {
1645 1645 dca_error(dca, "failure allocating obuf DMA handle");
1646 1646 dca_destroyreq(reqp);
1647 1647 return (NULL);
1648 1648 }
1649 1649
1650 1650 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1651 1651 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah);
1652 1652 if (rv != DDI_SUCCESS) {
1653 1653 dca_error(dca, "failure allocating chain_in DMA handle");
1654 1654 dca_destroyreq(reqp);
1655 1655 return (NULL);
1656 1656 }
1657 1657
1658 1658 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1659 1659 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah);
1660 1660 if (rv != DDI_SUCCESS) {
1661 1661 dca_error(dca, "failure allocating chain_out DMA handle");
1662 1662 dca_destroyreq(reqp);
1663 1663 return (NULL);
1664 1664 }
1665 1665
1666 1666 /*
1667 1667 * for driver hardening, allocate in whole pages.
1668 1668 */
1669 1669 size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1670 1670 #if defined(i386) || defined(__i386)
1671 1671 /*
1672 1672 * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter
1673 1673 * may fail on x86 platform if a physically contiguous memory chunk
1674 1674 * cannot be found. From initial testing, we did not see performance
1675 1675 * degradation as seen on Sparc.
1676 1676 */
1677 1677 if ((reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
1678 1678 dca_error(dca, "unable to alloc request ibuf memory");
1679 1679 dca_destroyreq(reqp);
1680 1680 return (NULL);
1681 1681 }
1682 1682 if ((reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
1683 1683 dca_error(dca, "unable to alloc request obuf memory");
1684 1684 dca_destroyreq(reqp);
1685 1685 return (NULL);
1686 1686 }
1687 1687 #else
1688 1688 /*
1689 1689 * We could kmem_alloc for Sparc too. However, it gives worse
1690 1690 * performance when transferring more than one page data. For example,
1691 1691 * using 4 threads and 12032 byte data and 3DES on 900MHZ Sparc system,
1692 1692 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for
1693 1693 * the same throughput.
1694 1694 */
1695 1695 rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah,
1696 1696 size, &dca_bufattr,
1697 1697 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr,
1698 1698 &size, &reqp->dr_ibuf_acch);
1699 1699 if (rv != DDI_SUCCESS) {
1700 1700 dca_error(dca, "unable to alloc request DMA memory");
1701 1701 dca_destroyreq(reqp);
1702 1702 return (NULL);
1703 1703 }
1704 1704
1705 1705 rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah,
1706 1706 size, &dca_bufattr,
1707 1707 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr,
1708 1708 &size, &reqp->dr_obuf_acch);
1709 1709 if (rv != DDI_SUCCESS) {
1710 1710 dca_error(dca, "unable to alloc request DMA memory");
1711 1711 dca_destroyreq(reqp);
1712 1712 return (NULL);
1713 1713 }
1714 1714 #endif
1715 1715
1716 1716 /* Skip the used portion in the context page */
1717 1717 reqp->dr_offset = CTX_MAXLENGTH;
1718 1718 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1719 1719 reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah,
1720 1720 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1721 1721 &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) {
1722 1722 (void) dca_destroyreq(reqp);
1723 1723 return (NULL);
1724 1724 }
1725 1725 reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
1726 1726 /* Skip the space used by the input buffer */
1727 1727 reqp->dr_offset += DESC_SIZE * n_chain;
1728 1728
1729 1729 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1730 1730 reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah,
1731 1731 DDI_DMA_READ | DDI_DMA_STREAMING,
1732 1732 &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) {
1733 1733 (void) dca_destroyreq(reqp);
1734 1734 return (NULL);
1735 1735 }
1736 1736 reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
1737 1737 /* Skip the space used by the output buffer */
1738 1738 reqp->dr_offset += DESC_SIZE * n_chain;
1739 1739
1740 1740 DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d",
1741 1741 reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH);
1742 1742 return (reqp);
1743 1743 }
1744 1744
1745 1745 void
1746 1746 dca_destroyreq(dca_request_t *reqp)
1747 1747 {
1748 1748 #if defined(i386) || defined(__i386)
1749 1749 dca_t *dca = reqp->dr_dca;
1750 1750 size_t size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1751 1751 #endif
1752 1752
1753 1753 /*
1754 1754 * Clean up DMA for the context structure.
1755 1755 */
1756 1756 if (reqp->dr_ctx_paddr) {
1757 1757 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah);
1758 1758 }
1759 1759
1760 1760 if (reqp->dr_ctx_acch) {
1761 1761 ddi_dma_mem_free(&reqp->dr_ctx_acch);
1762 1762 }
1763 1763
1764 1764 if (reqp->dr_ctx_dmah) {
1765 1765 ddi_dma_free_handle(&reqp->dr_ctx_dmah);
1766 1766 }
1767 1767
1768 1768 /*
1769 1769 * Clean up DMA for the scratch buffer.
1770 1770 */
1771 1771 #if defined(i386) || defined(__i386)
1772 1772 if (reqp->dr_ibuf_dmah) {
1773 1773 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1774 1774 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1775 1775 }
1776 1776 if (reqp->dr_obuf_dmah) {
1777 1777 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1778 1778 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1779 1779 }
1780 1780
1781 1781 kmem_free(reqp->dr_ibuf_kaddr, size);
1782 1782 kmem_free(reqp->dr_obuf_kaddr, size);
1783 1783 #else
1784 1784 if (reqp->dr_ibuf_paddr) {
1785 1785 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1786 1786 }
1787 1787 if (reqp->dr_obuf_paddr) {
1788 1788 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1789 1789 }
1790 1790
1791 1791 if (reqp->dr_ibuf_acch) {
1792 1792 ddi_dma_mem_free(&reqp->dr_ibuf_acch);
1793 1793 }
1794 1794 if (reqp->dr_obuf_acch) {
1795 1795 ddi_dma_mem_free(&reqp->dr_obuf_acch);
1796 1796 }
1797 1797
1798 1798 if (reqp->dr_ibuf_dmah) {
1799 1799 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1800 1800 }
1801 1801 if (reqp->dr_obuf_dmah) {
1802 1802 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1803 1803 }
1804 1804 #endif
1805 1805 /*
1806 1806 * These two DMA handles should have been unbinded in
1807 1807 * dca_unbindchains() function
1808 1808 */
1809 1809 if (reqp->dr_chain_in_dmah) {
1810 1810 ddi_dma_free_handle(&reqp->dr_chain_in_dmah);
1811 1811 }
1812 1812 if (reqp->dr_chain_out_dmah) {
1813 1813 ddi_dma_free_handle(&reqp->dr_chain_out_dmah);
1814 1814 }
1815 1815
1816 1816 kmem_free(reqp, sizeof (dca_request_t));
1817 1817 }
1818 1818
1819 1819 dca_work_t *
1820 1820 dca_getwork(dca_t *dca, int mcr)
1821 1821 {
1822 1822 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1823 1823 dca_work_t *workp;
1824 1824
1825 1825 mutex_enter(&wlp->dwl_freelock);
1826 1826 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework);
1827 1827 mutex_exit(&wlp->dwl_freelock);
1828 1828 if (workp) {
1829 1829 int nreqs;
1830 1830 bzero(workp->dw_mcr_kaddr, 8);
1831 1831
1832 1832 /* clear out old requests */
1833 1833 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) {
1834 1834 workp->dw_reqs[nreqs] = NULL;
1835 1835 }
1836 1836 }
1837 1837 return (workp);
1838 1838 }
1839 1839
1840 1840 void
1841 1841 dca_freework(dca_work_t *workp)
1842 1842 {
1843 1843 mutex_enter(&workp->dw_wlp->dwl_freelock);
1844 1844 dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp);
1845 1845 mutex_exit(&workp->dw_wlp->dwl_freelock);
1846 1846 }
1847 1847
1848 1848 dca_request_t *
1849 1849 dca_getreq(dca_t *dca, int mcr, int tryhard)
1850 1850 {
1851 1851 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1852 1852 dca_request_t *reqp;
1853 1853
1854 1854 mutex_enter(&wlp->dwl_freereqslock);
1855 1855 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs);
1856 1856 mutex_exit(&wlp->dwl_freereqslock);
1857 1857 if (reqp) {
1858 1858 reqp->dr_flags = 0;
1859 1859 reqp->dr_callback = NULL;
1860 1860 } else if (tryhard) {
1861 1861 /*
1862 1862 * failed to get a free one, try an allocation, the hard way.
1863 1863 * XXX: Kstat desired here.
1864 1864 */
1865 1865 if ((reqp = dca_newreq(dca)) != NULL) {
1866 1866 reqp->dr_wlp = wlp;
1867 1867 reqp->dr_dca = dca;
1868 1868 reqp->dr_flags = 0;
1869 1869 reqp->dr_callback = NULL;
1870 1870 }
1871 1871 }
1872 1872 return (reqp);
1873 1873 }
1874 1874
1875 1875 void
1876 1876 dca_freereq(dca_request_t *reqp)
1877 1877 {
1878 1878 reqp->dr_kcf_req = NULL;
1879 1879 if (!(reqp->dr_flags & DR_NOCACHE)) {
1880 1880 mutex_enter(&reqp->dr_wlp->dwl_freereqslock);
1881 1881 dca_enqueue(&reqp->dr_wlp->dwl_freereqs,
1882 1882 (dca_listnode_t *)reqp);
1883 1883 mutex_exit(&reqp->dr_wlp->dwl_freereqslock);
1884 1884 }
1885 1885 }
1886 1886
1887 1887 /*
1888 1888 * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer
1889 1889 * is mapped to a single physical address. On x86, a user buffer is mapped
1890 1890 * to multiple physical addresses. These physical addresses are chained
1891 1891 * using the method specified in Broadcom BCM5820 specification.
1892 1892 */
1893 1893 int
1894 1894 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt)
1895 1895 {
1896 1896 int rv;
1897 1897 caddr_t kaddr;
1898 1898 uint_t flags;
1899 1899 int n_chain = 0;
1900 1900
1901 1901 if (reqp->dr_flags & DR_INPLACE) {
1902 1902 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
1903 1903 } else {
1904 1904 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING;
1905 1905 }
1906 1906
1907 1907 /* first the input */
1908 1908 if (incnt) {
1909 1909 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) {
1910 1910 DBG(NULL, DWARN, "unrecognised crypto data format");
1911 1911 return (DDI_FAILURE);
1912 1912 }
1913 1913 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset,
1914 1914 kaddr, reqp->dr_chain_in_dmah, flags,
1915 1915 &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) {
1916 1916 (void) dca_unbindchains(reqp);
1917 1917 return (rv);
1918 1918 }
1919 1919
1920 1920 /*
1921 1921 * The offset and length are altered by the calling routine
1922 1922 * reqp->dr_in->cd_offset += incnt;
1923 1923 * reqp->dr_in->cd_length -= incnt;
1924 1924 */
1925 1925 /* Save the first one in the chain for MCR */
1926 1926 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr;
1927 1927 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr;
1928 1928 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length;
1929 1929 } else {
1930 1930 reqp->dr_in_paddr = NULL;
1931 1931 reqp->dr_in_next = 0;
1932 1932 reqp->dr_in_len = 0;
1933 1933 }
1934 1934
1935 1935 if (reqp->dr_flags & DR_INPLACE) {
1936 1936 reqp->dr_out_paddr = reqp->dr_in_paddr;
1937 1937 reqp->dr_out_len = reqp->dr_in_len;
1938 1938 reqp->dr_out_next = reqp->dr_in_next;
1939 1939 return (DDI_SUCCESS);
1940 1940 }
1941 1941
1942 1942 /* then the output */
1943 1943 if (outcnt) {
1944 1944 flags = DDI_DMA_READ | DDI_DMA_STREAMING;
1945 1945 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) {
1946 1946 DBG(NULL, DWARN, "unrecognised crypto data format");
1947 1947 (void) dca_unbindchains(reqp);
1948 1948 return (DDI_FAILURE);
1949 1949 }
1950 1950 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset +
1951 1951 n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah,
1952 1952 flags, &reqp->dr_chain_out_head, &n_chain);
1953 1953 if (rv != DDI_SUCCESS) {
1954 1954 (void) dca_unbindchains(reqp);
1955 1955 return (DDI_FAILURE);
1956 1956 }
1957 1957
1958 1958 /* Save the first one in the chain for MCR */
1959 1959 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr;
1960 1960 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr;
1961 1961 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length;
1962 1962 } else {
1963 1963 reqp->dr_out_paddr = NULL;
1964 1964 reqp->dr_out_next = 0;
1965 1965 reqp->dr_out_len = 0;
1966 1966 }
1967 1967
1968 1968 return (DDI_SUCCESS);
1969 1969 }
1970 1970
1971 1971 /*
1972 1972 * Unbind the user buffers from the DMA handles.
1973 1973 */
1974 1974 int
1975 1975 dca_unbindchains(dca_request_t *reqp)
1976 1976 {
1977 1977 int rv = DDI_SUCCESS;
1978 1978 int rv1 = DDI_SUCCESS;
1979 1979
1980 1980 /* Clear the input chain */
1981 1981 if (reqp->dr_chain_in_head.dc_buffer_paddr != NULL) {
1982 1982 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah);
1983 1983 reqp->dr_chain_in_head.dc_buffer_paddr = 0;
1984 1984 }
1985 1985
1986 1986 if (reqp->dr_flags & DR_INPLACE) {
1987 1987 return (rv);
1988 1988 }
1989 1989
1990 1990 /* Clear the output chain */
1991 1991 if (reqp->dr_chain_out_head.dc_buffer_paddr != NULL) {
1992 1992 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah);
1993 1993 reqp->dr_chain_out_head.dc_buffer_paddr = 0;
1994 1994 }
1995 1995
1996 1996 return ((rv != DDI_SUCCESS)? rv : rv1);
1997 1997 }
1998 1998
1999 1999 /*
2000 2000 * Build either input chain or output chain. It is single-item chain for Sparc,
2001 2001 * and possible mutiple-item chain for x86.
2002 2002 */
2003 2003 static int
2004 2004 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
2005 2005 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
2006 2006 dca_chain_t *head, int *n_chain)
2007 2007 {
2008 2008 ddi_dma_cookie_t c;
2009 2009 uint_t nc;
2010 2010 int rv;
2011 2011 caddr_t chain_kaddr_pre;
2012 2012 caddr_t chain_kaddr;
2013 2013 uint32_t chain_paddr;
2014 2014 int i;
2015 2015
2016 2016 /* Advance past the context structure to the starting address */
2017 2017 chain_paddr = reqp->dr_ctx_paddr + dr_offset;
2018 2018 chain_kaddr = reqp->dr_ctx_kaddr + dr_offset;
2019 2019
2020 2020 /*
2021 2021 * Bind the kernel address to the DMA handle. On x86, the actual
2022 2022 * buffer is mapped into multiple physical addresses. On Sparc,
2023 2023 * the actual buffer is mapped into a single address.
2024 2024 */
2025 2025 rv = ddi_dma_addr_bind_handle(handle,
2026 2026 NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc);
2027 2027 if (rv != DDI_DMA_MAPPED) {
2028 2028 return (DDI_FAILURE);
2029 2029 }
2030 2030
2031 2031 (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV);
2032 2032 if ((rv = dca_check_dma_handle(reqp->dr_dca, handle,
2033 2033 DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) {
2034 2034 reqp->destroy = TRUE;
2035 2035 return (rv);
2036 2036 }
2037 2037
2038 2038 *n_chain = nc;
2039 2039
2040 2040 /* Setup the data buffer chain for DMA transfer */
2041 2041 chain_kaddr_pre = NULL;
2042 2042 head->dc_buffer_paddr = 0;
2043 2043 head->dc_next_paddr = 0;
2044 2044 head->dc_buffer_length = 0;
2045 2045 for (i = 0; i < nc; i++) {
2046 2046 /* PIO */
2047 2047 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address);
2048 2048 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0);
2049 2049 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size);
2050 2050
2051 2051 /* Remember the head of the chain */
2052 2052 if (head->dc_buffer_paddr == 0) {
2053 2053 head->dc_buffer_paddr = c.dmac_address;
2054 2054 head->dc_buffer_length = c.dmac_size;
2055 2055 }
2056 2056
2057 2057 /* Link to the previous one if one exists */
2058 2058 if (chain_kaddr_pre) {
2059 2059 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT,
2060 2060 chain_paddr);
2061 2061 if (head->dc_next_paddr == 0)
2062 2062 head->dc_next_paddr = chain_paddr;
2063 2063 }
2064 2064 chain_kaddr_pre = chain_kaddr;
2065 2065
2066 2066 /* Maintain pointers */
2067 2067 chain_paddr += DESC_SIZE;
2068 2068 chain_kaddr += DESC_SIZE;
2069 2069
2070 2070 /* Retrieve the next cookie if there is one */
2071 2071 if (i < nc-1)
2072 2072 ddi_dma_nextcookie(handle, &c);
2073 2073 }
2074 2074
2075 2075 /* Set the next pointer in the last entry to NULL */
2076 2076 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0);
2077 2077
2078 2078 return (DDI_SUCCESS);
2079 2079 }
2080 2080
2081 2081 /*
2082 2082 * Schedule some work.
2083 2083 */
2084 2084 int
2085 2085 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched)
2086 2086 {
2087 2087 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2088 2088
2089 2089 mutex_enter(&wlp->dwl_lock);
2090 2090
2091 2091 DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p",
2092 2092 reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr,
2093 2093 reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr);
2094 2094 DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x",
2095 2095 reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr);
2096 2096 /* sync out the entire context and descriptor chains */
2097 2097 (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2098 2098 if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah,
2099 2099 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2100 2100 reqp->destroy = TRUE;
2101 2101 mutex_exit(&wlp->dwl_lock);
2102 2102 return (CRYPTO_DEVICE_ERROR);
2103 2103 }
2104 2104
2105 2105 dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp);
2106 2106 wlp->dwl_count++;
2107 2107 wlp->dwl_lastsubmit = ddi_get_lbolt();
2108 2108 reqp->dr_wlp = wlp;
2109 2109
2110 2110 if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) {
2111 2111 /* we are fully loaded now, let kCF know */
2112 2112
2113 2113 wlp->dwl_flowctl++;
2114 2114 wlp->dwl_busy = 1;
2115 2115
2116 2116 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY);
2117 2117 }
2118 2118
2119 2119 if (dosched) {
2120 2120 #ifdef SCHEDDELAY
2121 2121 /* possibly wait for more work to arrive */
2122 2122 if (wlp->dwl_count >= wlp->dwl_reqspermcr) {
2123 2123 dca_schedule(dca, mcr);
2124 2124 } else if (!wlp->dwl_schedtid) {
2125 2125 /* wait 1 msec for more work before doing it */
2126 2126 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2127 2127 (void *)wlp, drv_usectohz(MSEC));
2128 2128 }
2129 2129 #else
2130 2130 dca_schedule(dca, mcr);
2131 2131 #endif
2132 2132 }
2133 2133 mutex_exit(&wlp->dwl_lock);
2134 2134
2135 2135 return (CRYPTO_QUEUED);
2136 2136 }
2137 2137
2138 2138 void
2139 2139 dca_schedule(dca_t *dca, int mcr)
2140 2140 {
2141 2141 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2142 2142 int csr;
2143 2143 int full;
2144 2144 uint32_t status;
2145 2145
2146 2146 ASSERT(mutex_owned(&wlp->dwl_lock));
2147 2147 /*
2148 2148 * If the card is draining or has an outstanding failure,
2149 2149 * don't schedule any more work on it right now
2150 2150 */
2151 2151 if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) {
2152 2152 return;
2153 2153 }
2154 2154
2155 2155 if (mcr == MCR2) {
2156 2156 csr = CSR_MCR2;
2157 2157 full = DMASTAT_MCR2FULL;
2158 2158 } else {
2159 2159 csr = CSR_MCR1;
2160 2160 full = DMASTAT_MCR1FULL;
2161 2161 }
2162 2162
2163 2163 for (;;) {
2164 2164 dca_work_t *workp;
2165 2165 uint32_t offset;
2166 2166 int nreqs;
2167 2167
2168 2168 status = GETCSR(dca, CSR_DMASTAT);
2169 2169 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2170 2170 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
2171 2171 return;
2172 2172
2173 2173 if ((status & full) != 0)
2174 2174 break;
2175 2175
2176 2176 #ifdef SCHEDDELAY
2177 2177 /* if there isn't enough to do, don't bother now */
2178 2178 if ((wlp->dwl_count < wlp->dwl_reqspermcr) &&
2179 2179 (ddi_get_lbolt() < (wlp->dwl_lastsubmit +
2180 2180 drv_usectohz(MSEC)))) {
2181 2181 /* wait a bit longer... */
2182 2182 if (wlp->dwl_schedtid == 0) {
2183 2183 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2184 2184 (void *)wlp, drv_usectohz(MSEC));
2185 2185 }
2186 2186 return;
2187 2187 }
2188 2188 #endif
2189 2189
2190 2190 /* grab a work structure */
2191 2191 workp = dca_getwork(dca, mcr);
2192 2192
2193 2193 if (workp == NULL) {
2194 2194 /*
2195 2195 * There must be work ready to be reclaimed,
2196 2196 * in this case, since the chip can only hold
2197 2197 * less work outstanding than there are total.
2198 2198 */
2199 2199 dca_reclaim(dca, mcr);
2200 2200 continue;
2201 2201 }
2202 2202
2203 2203 nreqs = 0;
2204 2204 offset = MCR_CTXADDR;
2205 2205
2206 2206 while (nreqs < wlp->dwl_reqspermcr) {
2207 2207 dca_request_t *reqp;
2208 2208
2209 2209 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq);
2210 2210 if (reqp == NULL) {
2211 2211 /* nothing left to process */
2212 2212 break;
2213 2213 }
2214 2214 /*
2215 2215 * Update flow control.
2216 2216 */
2217 2217 wlp->dwl_count--;
2218 2218 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2219 2219 (wlp->dwl_busy)) {
2220 2220 wlp->dwl_busy = 0;
2221 2221 crypto_prov_notify(wlp->dwl_prov,
2222 2222 CRYPTO_PROVIDER_READY);
2223 2223 }
2224 2224
2225 2225 /*
2226 2226 * Context address.
2227 2227 */
2228 2228 PUTMCR32(workp, offset, reqp->dr_ctx_paddr);
2229 2229 offset += 4;
2230 2230
2231 2231 /*
2232 2232 * Input chain.
2233 2233 */
2234 2234 /* input buffer address */
2235 2235 PUTMCR32(workp, offset, reqp->dr_in_paddr);
2236 2236 offset += 4;
2237 2237 /* next input buffer entry */
2238 2238 PUTMCR32(workp, offset, reqp->dr_in_next);
2239 2239 offset += 4;
2240 2240 /* input buffer length */
2241 2241 PUTMCR16(workp, offset, reqp->dr_in_len);
2242 2242 offset += 2;
2243 2243 /* zero the reserved field */
2244 2244 PUTMCR16(workp, offset, 0);
2245 2245 offset += 2;
2246 2246
2247 2247 /*
2248 2248 * Overall length.
2249 2249 */
2250 2250 /* reserved field */
2251 2251 PUTMCR16(workp, offset, 0);
2252 2252 offset += 2;
2253 2253 /* total packet length */
2254 2254 PUTMCR16(workp, offset, reqp->dr_pkt_length);
2255 2255 offset += 2;
2256 2256
2257 2257 /*
2258 2258 * Output chain.
2259 2259 */
2260 2260 /* output buffer address */
2261 2261 PUTMCR32(workp, offset, reqp->dr_out_paddr);
2262 2262 offset += 4;
2263 2263 /* next output buffer entry */
2264 2264 PUTMCR32(workp, offset, reqp->dr_out_next);
2265 2265 offset += 4;
2266 2266 /* output buffer length */
2267 2267 PUTMCR16(workp, offset, reqp->dr_out_len);
2268 2268 offset += 2;
2269 2269 /* zero the reserved field */
2270 2270 PUTMCR16(workp, offset, 0);
2271 2271 offset += 2;
2272 2272
2273 2273 /*
2274 2274 * Note submission.
2275 2275 */
2276 2276 workp->dw_reqs[nreqs] = reqp;
2277 2277 nreqs++;
2278 2278 }
2279 2279
2280 2280 if (nreqs == 0) {
2281 2281 /* nothing in the queue! */
2282 2282 dca_freework(workp);
2283 2283 return;
2284 2284 }
2285 2285
2286 2286 wlp->dwl_submit++;
2287 2287
2288 2288 PUTMCR16(workp, MCR_FLAGS, 0);
2289 2289 PUTMCR16(workp, MCR_COUNT, nreqs);
2290 2290
2291 2291 DBG(dca, DCHATTY,
2292 2292 "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d",
2293 2293 workp->dw_mcr_paddr, workp->dw_mcr_kaddr,
2294 2294 nreqs, mcr);
2295 2295
2296 2296 workp->dw_lbolt = ddi_get_lbolt();
2297 2297 /* Make sure MCR is synced out to device. */
2298 2298 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0,
2299 2299 DDI_DMA_SYNC_FORDEV);
2300 2300 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2301 2301 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2302 2302 dca_destroywork(workp);
2303 2303 return;
2304 2304 }
2305 2305
2306 2306 PUTCSR(dca, csr, workp->dw_mcr_paddr);
2307 2307 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2308 2308 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2309 2309 dca_destroywork(workp);
2310 2310 return;
2311 2311 } else {
2312 2312 dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp);
2313 2313 }
2314 2314
2315 2315 DBG(dca, DCHATTY, "posted");
2316 2316 }
2317 2317 }
2318 2318
2319 2319 /*
2320 2320 * Reclaim completed work, called in interrupt context.
2321 2321 */
2322 2322 void
2323 2323 dca_reclaim(dca_t *dca, int mcr)
2324 2324 {
2325 2325 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2326 2326 dca_work_t *workp;
2327 2327 ushort_t flags;
2328 2328 int nreclaimed = 0;
2329 2329 int i;
2330 2330
2331 2331 DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr);
2332 2332 ASSERT(mutex_owned(&wlp->dwl_lock));
2333 2333 /*
2334 2334 * For each MCR in the submitted (runq), we check to see if
2335 2335 * it has been processed. If so, then we note each individual
2336 2336 * job in the MCR, and and do the completion processing for
2337 2337 * each of such job.
2338 2338 */
2339 2339 for (;;) {
2340 2340
2341 2341 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2342 2342 if (workp == NULL) {
2343 2343 break;
2344 2344 }
2345 2345
2346 2346 /* only sync the MCR flags, since that's all we need */
2347 2347 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4,
2348 2348 DDI_DMA_SYNC_FORKERNEL);
2349 2349 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2350 2350 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2351 2351 dca_rmqueue((dca_listnode_t *)workp);
2352 2352 dca_destroywork(workp);
2353 2353 return;
2354 2354 }
2355 2355
2356 2356 flags = GETMCR16(workp, MCR_FLAGS);
2357 2357 if ((flags & MCRFLAG_FINISHED) == 0) {
2358 2358 /* chip is still working on it */
2359 2359 DBG(dca, DRECLAIM,
2360 2360 "chip still working on it (MCR%d)", mcr);
2361 2361 break;
2362 2362 }
2363 2363
2364 2364 /* its really for us, so remove it from the queue */
2365 2365 dca_rmqueue((dca_listnode_t *)workp);
2366 2366
2367 2367 /* if we were draining, signal on the cv */
2368 2368 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2369 2369 cv_signal(&wlp->dwl_cv);
2370 2370 }
2371 2371
2372 2372 /* update statistics, done under the lock */
2373 2373 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2374 2374 dca_request_t *reqp = workp->dw_reqs[i];
2375 2375 if (reqp == NULL) {
2376 2376 continue;
2377 2377 }
2378 2378 if (reqp->dr_byte_stat >= 0) {
2379 2379 dca->dca_stats[reqp->dr_byte_stat] +=
2380 2380 reqp->dr_pkt_length;
2381 2381 }
2382 2382 if (reqp->dr_job_stat >= 0) {
2383 2383 dca->dca_stats[reqp->dr_job_stat]++;
2384 2384 }
2385 2385 }
2386 2386 mutex_exit(&wlp->dwl_lock);
2387 2387
2388 2388 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2389 2389 dca_request_t *reqp = workp->dw_reqs[i];
2390 2390
2391 2391 if (reqp == NULL) {
2392 2392 continue;
2393 2393 }
2394 2394
2395 2395 /* Do the callback. */
2396 2396 workp->dw_reqs[i] = NULL;
2397 2397 dca_done(reqp, CRYPTO_SUCCESS);
2398 2398
2399 2399 nreclaimed++;
2400 2400 }
2401 2401
2402 2402 /* now we can release the work */
2403 2403 dca_freework(workp);
2404 2404
2405 2405 mutex_enter(&wlp->dwl_lock);
2406 2406 }
2407 2407 DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed);
2408 2408 }
2409 2409
2410 2410 int
2411 2411 dca_length(crypto_data_t *cdata)
2412 2412 {
2413 2413 return (cdata->cd_length);
2414 2414 }
2415 2415
2416 2416 /*
2417 2417 * This is the callback function called from the interrupt when a kCF job
2418 2418 * completes. It does some driver-specific things, and then calls the
2419 2419 * kCF-provided callback. Finally, it cleans up the state for the work
2420 2420 * request and drops the reference count to allow for DR.
2421 2421 */
2422 2422 void
2423 2423 dca_done(dca_request_t *reqp, int err)
2424 2424 {
2425 2425 uint64_t ena = 0;
2426 2426
2427 2427 /* unbind any chains we were using */
2428 2428 if (dca_unbindchains(reqp) != DDI_SUCCESS) {
2429 2429 /* DMA failure */
2430 2430 ena = dca_ena(ena);
2431 2431 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT,
2432 2432 DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR,
2433 2433 "fault on buffer DMA handle");
2434 2434 if (err == CRYPTO_SUCCESS) {
2435 2435 err = CRYPTO_DEVICE_ERROR;
2436 2436 }
2437 2437 }
2438 2438
2439 2439 if (reqp->dr_callback != NULL) {
2440 2440 reqp->dr_callback(reqp, err);
2441 2441 } else {
2442 2442 dca_freereq(reqp);
2443 2443 }
2444 2444 }
2445 2445
2446 2446 /*
2447 2447 * Call this when a failure is detected. It will reset the chip,
2448 2448 * log a message, alert kCF, and mark jobs in the runq as failed.
2449 2449 */
2450 2450 /* ARGSUSED */
2451 2451 void
2452 2452 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index,
2453 2453 uint64_t ena, int errno, char *mess, ...)
2454 2454 {
2455 2455 va_list ap;
2456 2456 char buf[256];
2457 2457 int mcr;
2458 2458 char *eclass;
2459 2459 int have_mutex;
2460 2460
2461 2461 va_start(ap, mess);
2462 2462 (void) vsprintf(buf, mess, ap);
2463 2463 va_end(ap);
2464 2464
2465 2465 eclass = dca_fma_eclass_string(dca->dca_model, index);
2466 2466
2467 2467 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) &&
2468 2468 index != DCA_FM_ECLASS_NONE) {
2469 2469 ddi_fm_ereport_post(dca->dca_dip, eclass, ena,
2470 2470 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2471 2471 FM_EREPORT_VERS0, NULL);
2472 2472
2473 2473 /* Report the impact of the failure to the DDI. */
2474 2474 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST);
2475 2475 } else {
2476 2476 /* Just log the error string to the message log */
2477 2477 dca_error(dca, buf);
2478 2478 }
2479 2479
2480 2480 /*
2481 2481 * Indicate a failure (keeps schedule from running).
2482 2482 */
2483 2483 dca->dca_flags |= DCA_FAILED;
2484 2484
2485 2485 /*
2486 2486 * Reset the chip. This should also have as a side effect, the
2487 2487 * disabling of all interrupts from the device.
2488 2488 */
2489 2489 (void) dca_reset(dca, 1);
2490 2490
2491 2491 /*
2492 2492 * Report the failure to kCF.
2493 2493 */
2494 2494 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2495 2495 if (WORKLIST(dca, mcr)->dwl_prov) {
2496 2496 crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov,
2497 2497 CRYPTO_PROVIDER_FAILED);
2498 2498 }
2499 2499 }
2500 2500
2501 2501 /*
2502 2502 * Return jobs not sent to hardware back to kCF.
2503 2503 */
2504 2504 dca_rejectjobs(dca);
2505 2505
2506 2506 /*
2507 2507 * From this point on, no new work should be arriving, and the
2508 2508 * chip should not be doing any active DMA.
2509 2509 */
2510 2510
2511 2511 /*
2512 2512 * Now find all the work submitted to the device and fail
2513 2513 * them.
2514 2514 */
2515 2515 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2516 2516 dca_worklist_t *wlp;
2517 2517 int i;
2518 2518
2519 2519 wlp = WORKLIST(dca, mcr);
2520 2520
2521 2521 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2522 2522 continue;
2523 2523 }
2524 2524 for (;;) {
2525 2525 dca_work_t *workp;
2526 2526
2527 2527 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2528 2528 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq);
2529 2529 if (workp == NULL) {
2530 2530 if (have_mutex)
2531 2531 mutex_exit(&wlp->dwl_lock);
2532 2532 break;
2533 2533 }
2534 2534 mutex_exit(&wlp->dwl_lock);
2535 2535
2536 2536 /*
2537 2537 * Free up requests
2538 2538 */
2539 2539 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2540 2540 dca_request_t *reqp = workp->dw_reqs[i];
2541 2541 if (reqp) {
2542 2542 dca_done(reqp, errno);
2543 2543 workp->dw_reqs[i] = NULL;
2544 2544 }
2545 2545 }
2546 2546
2547 2547 mutex_enter(&wlp->dwl_lock);
2548 2548 /*
2549 2549 * If waiting to drain, signal on the waiter.
2550 2550 */
2551 2551 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2552 2552 cv_signal(&wlp->dwl_cv);
2553 2553 }
2554 2554
2555 2555 /*
2556 2556 * Return the work and request structures to
2557 2557 * the free pool.
2558 2558 */
2559 2559 dca_freework(workp);
2560 2560 if (have_mutex)
2561 2561 mutex_exit(&wlp->dwl_lock);
2562 2562 }
2563 2563 }
2564 2564
2565 2565 }
2566 2566
2567 2567 #ifdef SCHEDDELAY
2568 2568 /*
2569 2569 * Reschedule worklist as needed.
2570 2570 */
2571 2571 void
2572 2572 dca_schedtimeout(void *arg)
2573 2573 {
2574 2574 dca_worklist_t *wlp = (dca_worklist_t *)arg;
2575 2575 mutex_enter(&wlp->dwl_lock);
2576 2576 wlp->dwl_schedtid = 0;
2577 2577 dca_schedule(wlp->dwl_dca, wlp->dwl_mcr);
2578 2578 mutex_exit(&wlp->dwl_lock);
2579 2579 }
2580 2580 #endif
2581 2581
2582 2582 /*
2583 2583 * Check for stalled jobs.
2584 2584 */
2585 2585 void
2586 2586 dca_jobtimeout(void *arg)
2587 2587 {
2588 2588 int mcr;
2589 2589 dca_t *dca = (dca_t *)arg;
2590 2590 int hung = 0;
2591 2591
2592 2592 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2593 2593 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2594 2594 dca_work_t *workp;
2595 2595 clock_t when;
2596 2596
2597 2597 mutex_enter(&wlp->dwl_lock);
2598 2598 when = ddi_get_lbolt();
2599 2599
2600 2600 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2601 2601 if (workp == NULL) {
2602 2602 /* nothing sitting in the queue */
2603 2603 mutex_exit(&wlp->dwl_lock);
2604 2604 continue;
2605 2605 }
2606 2606
2607 2607 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) {
2608 2608 /* request has been queued for less than STALETIME */
2609 2609 mutex_exit(&wlp->dwl_lock);
2610 2610 continue;
2611 2611 }
2612 2612
2613 2613 /* job has been sitting around for over 1 second, badness */
2614 2614 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp,
2615 2615 mcr);
2616 2616
2617 2617 /* put it back in the queue, until we reset the chip */
2618 2618 hung++;
2619 2619 mutex_exit(&wlp->dwl_lock);
2620 2620 }
2621 2621
2622 2622 if (hung) {
2623 2623 dca_failure(dca, DDI_DEVICE_FAULT,
2624 2624 DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR,
2625 2625 "timeout processing job.)");
2626 2626 }
2627 2627
2628 2628 /* reschedule ourself */
2629 2629 mutex_enter(&dca->dca_intrlock);
2630 2630 if (dca->dca_jobtid == 0) {
2631 2631 /* timeout has been canceled, prior to DR */
2632 2632 mutex_exit(&dca->dca_intrlock);
2633 2633 return;
2634 2634 }
2635 2635
2636 2636 /* check again in 1 second */
2637 2637 dca->dca_jobtid = timeout(dca_jobtimeout, arg,
2638 2638 drv_usectohz(SECOND));
2639 2639 mutex_exit(&dca->dca_intrlock);
2640 2640 }
2641 2641
2642 2642 /*
2643 2643 * This returns all jobs back to kCF. It assumes that processing
2644 2644 * on the worklist has halted.
2645 2645 */
2646 2646 void
2647 2647 dca_rejectjobs(dca_t *dca)
2648 2648 {
2649 2649 int mcr;
2650 2650 int have_mutex;
2651 2651 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2652 2652 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2653 2653 dca_request_t *reqp;
2654 2654
2655 2655 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2656 2656 continue;
2657 2657 }
2658 2658 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2659 2659 for (;;) {
2660 2660 reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq);
2661 2661 if (reqp == NULL) {
2662 2662 break;
2663 2663 }
2664 2664 /* update flow control */
2665 2665 wlp->dwl_count--;
2666 2666 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2667 2667 (wlp->dwl_busy)) {
2668 2668 wlp->dwl_busy = 0;
2669 2669 crypto_prov_notify(wlp->dwl_prov,
2670 2670 CRYPTO_PROVIDER_READY);
2671 2671 }
2672 2672 mutex_exit(&wlp->dwl_lock);
2673 2673
2674 2674 (void) dca_unbindchains(reqp);
2675 2675 reqp->dr_callback(reqp, EAGAIN);
2676 2676 mutex_enter(&wlp->dwl_lock);
2677 2677 }
2678 2678 if (have_mutex)
2679 2679 mutex_exit(&wlp->dwl_lock);
2680 2680 }
2681 2681 }
2682 2682
2683 2683 int
2684 2684 dca_drain(dca_t *dca)
2685 2685 {
2686 2686 int mcr;
2687 2687 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2688 2688 #ifdef SCHEDDELAY
2689 2689 timeout_id_t tid;
2690 2690 #endif
2691 2691 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2692 2692
2693 2693 mutex_enter(&wlp->dwl_lock);
2694 2694 wlp->dwl_drain = 1;
2695 2695
2696 2696 /* give it up to a second to drain from the chip */
2697 2697 if (!QEMPTY(&wlp->dwl_runq)) {
2698 2698 (void) cv_reltimedwait(&wlp->dwl_cv, &wlp->dwl_lock,
2699 2699 drv_usectohz(STALETIME), TR_CLOCK_TICK);
2700 2700
2701 2701 if (!QEMPTY(&wlp->dwl_runq)) {
2702 2702 dca_error(dca, "unable to drain device");
2703 2703 mutex_exit(&wlp->dwl_lock);
2704 2704 dca_undrain(dca);
2705 2705 return (EBUSY);
2706 2706 }
2707 2707 }
2708 2708
2709 2709 #ifdef SCHEDDELAY
2710 2710 tid = wlp->dwl_schedtid;
2711 2711 mutex_exit(&wlp->dwl_lock);
2712 2712
2713 2713 /*
2714 2714 * untimeout outside the lock -- this is safe because we
2715 2715 * have set the drain flag, so dca_schedule() will not
2716 2716 * reschedule another timeout
2717 2717 */
2718 2718 if (tid) {
2719 2719 untimeout(tid);
2720 2720 }
2721 2721 #else
2722 2722 mutex_exit(&wlp->dwl_lock);
2723 2723 #endif
2724 2724 }
2725 2725 return (0);
2726 2726 }
2727 2727
2728 2728 void
2729 2729 dca_undrain(dca_t *dca)
2730 2730 {
2731 2731 int mcr;
2732 2732
2733 2733 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2734 2734 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2735 2735 mutex_enter(&wlp->dwl_lock);
2736 2736 wlp->dwl_drain = 0;
2737 2737 dca_schedule(dca, mcr);
2738 2738 mutex_exit(&wlp->dwl_lock);
2739 2739 }
2740 2740 }
2741 2741
2742 2742 /*
2743 2743 * Duplicate the crypto_data_t structure, but point to the original
2744 2744 * buffers.
2745 2745 */
2746 2746 int
2747 2747 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput)
2748 2748 {
2749 2749 ninput->cd_format = input->cd_format;
2750 2750 ninput->cd_offset = input->cd_offset;
2751 2751 ninput->cd_length = input->cd_length;
2752 2752 ninput->cd_miscdata = input->cd_miscdata;
2753 2753
2754 2754 switch (input->cd_format) {
2755 2755 case CRYPTO_DATA_RAW:
2756 2756 ninput->cd_raw.iov_base = input->cd_raw.iov_base;
2757 2757 ninput->cd_raw.iov_len = input->cd_raw.iov_len;
2758 2758 break;
2759 2759
2760 2760 case CRYPTO_DATA_UIO:
2761 2761 ninput->cd_uio = input->cd_uio;
2762 2762 break;
2763 2763
2764 2764 case CRYPTO_DATA_MBLK:
2765 2765 ninput->cd_mp = input->cd_mp;
2766 2766 break;
2767 2767
2768 2768 default:
2769 2769 DBG(NULL, DWARN,
2770 2770 "dca_dupcrypto: unrecognised crypto data format");
2771 2771 return (CRYPTO_FAILED);
2772 2772 }
2773 2773
2774 2774 return (CRYPTO_SUCCESS);
2775 2775 }
2776 2776
2777 2777 /*
2778 2778 * Performs validation checks on the input and output data structures.
2779 2779 */
2780 2780 int
2781 2781 dca_verifyio(crypto_data_t *input, crypto_data_t *output)
2782 2782 {
2783 2783 int rv = CRYPTO_SUCCESS;
2784 2784
2785 2785 switch (input->cd_format) {
2786 2786 case CRYPTO_DATA_RAW:
2787 2787 break;
2788 2788
2789 2789 case CRYPTO_DATA_UIO:
2790 2790 /* we support only kernel buffer */
2791 2791 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) {
2792 2792 DBG(NULL, DWARN, "non kernel input uio buffer");
2793 2793 rv = CRYPTO_ARGUMENTS_BAD;
2794 2794 }
2795 2795 break;
2796 2796
2797 2797 case CRYPTO_DATA_MBLK:
2798 2798 break;
2799 2799
2800 2800 default:
2801 2801 DBG(NULL, DWARN, "unrecognised input crypto data format");
2802 2802 rv = CRYPTO_ARGUMENTS_BAD;
2803 2803 }
2804 2804
2805 2805 switch (output->cd_format) {
2806 2806 case CRYPTO_DATA_RAW:
2807 2807 break;
2808 2808
2809 2809 case CRYPTO_DATA_UIO:
2810 2810 /* we support only kernel buffer */
2811 2811 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) {
2812 2812 DBG(NULL, DWARN, "non kernel output uio buffer");
2813 2813 rv = CRYPTO_ARGUMENTS_BAD;
2814 2814 }
2815 2815 break;
2816 2816
2817 2817 case CRYPTO_DATA_MBLK:
2818 2818 break;
2819 2819
2820 2820 default:
2821 2821 DBG(NULL, DWARN, "unrecognised output crypto data format");
2822 2822 rv = CRYPTO_ARGUMENTS_BAD;
2823 2823 }
2824 2824
2825 2825 return (rv);
2826 2826 }
2827 2827
2828 2828 /*
2829 2829 * data: source crypto_data_t struct
2830 2830 * off: offset into the source before commencing copy
2831 2831 * count: the amount of data to copy
2832 2832 * dest: destination buffer
2833 2833 */
2834 2834 int
2835 2835 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest)
2836 2836 {
2837 2837 int rv = CRYPTO_SUCCESS;
2838 2838 uio_t *uiop;
2839 2839 uint_t vec_idx;
2840 2840 size_t cur_len;
2841 2841 mblk_t *mp;
2842 2842
2843 2843 if (count == 0) {
2844 2844 /* We don't want anything so we're done. */
2845 2845 return (rv);
2846 2846 }
2847 2847
2848 2848 /*
2849 2849 * Sanity check that we haven't specified a length greater than the
2850 2850 * offset adjusted size of the buffer.
2851 2851 */
2852 2852 if (count > (data->cd_length - off)) {
2853 2853 return (CRYPTO_DATA_LEN_RANGE);
2854 2854 }
2855 2855
2856 2856 /* Add the internal crypto_data offset to the requested offset. */
2857 2857 off += data->cd_offset;
2858 2858
2859 2859 switch (data->cd_format) {
2860 2860 case CRYPTO_DATA_RAW:
2861 2861 bcopy(data->cd_raw.iov_base + off, dest, count);
2862 2862 break;
2863 2863
2864 2864 case CRYPTO_DATA_UIO:
2865 2865 /*
2866 2866 * Jump to the first iovec containing data to be
2867 2867 * processed.
2868 2868 */
2869 2869 uiop = data->cd_uio;
2870 2870 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
2871 2871 off >= uiop->uio_iov[vec_idx].iov_len;
2872 2872 off -= uiop->uio_iov[vec_idx++].iov_len)
2873 2873 ;
2874 2874 if (vec_idx == uiop->uio_iovcnt) {
2875 2875 /*
2876 2876 * The caller specified an offset that is larger than
2877 2877 * the total size of the buffers it provided.
2878 2878 */
2879 2879 return (CRYPTO_DATA_LEN_RANGE);
2880 2880 }
2881 2881
2882 2882 /*
2883 2883 * Now process the iovecs.
2884 2884 */
2885 2885 while (vec_idx < uiop->uio_iovcnt && count > 0) {
2886 2886 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
2887 2887 off, count);
2888 2888 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
2889 2889 cur_len);
2890 2890 count -= cur_len;
2891 2891 dest += cur_len;
2892 2892 vec_idx++;
2893 2893 off = 0;
2894 2894 }
2895 2895
2896 2896 if (vec_idx == uiop->uio_iovcnt && count > 0) {
2897 2897 /*
2898 2898 * The end of the specified iovec's was reached but
2899 2899 * the length requested could not be processed
2900 2900 * (requested to digest more data than it provided).
2901 2901 */
2902 2902 return (CRYPTO_DATA_LEN_RANGE);
2903 2903 }
2904 2904 break;
2905 2905
2906 2906 case CRYPTO_DATA_MBLK:
2907 2907 /*
2908 2908 * Jump to the first mblk_t containing data to be processed.
2909 2909 */
2910 2910 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp);
2911 2911 off -= MBLKL(mp), mp = mp->b_cont)
2912 2912 ;
2913 2913 if (mp == NULL) {
2914 2914 /*
2915 2915 * The caller specified an offset that is larger than
2916 2916 * the total size of the buffers it provided.
2917 2917 */
2918 2918 return (CRYPTO_DATA_LEN_RANGE);
2919 2919 }
2920 2920
2921 2921 /*
2922 2922 * Now do the processing on the mblk chain.
2923 2923 */
2924 2924 while (mp != NULL && count > 0) {
2925 2925 cur_len = min(MBLKL(mp) - off, count);
2926 2926 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
2927 2927 count -= cur_len;
2928 2928 dest += cur_len;
2929 2929 mp = mp->b_cont;
2930 2930 off = 0;
2931 2931 }
2932 2932
2933 2933 if (mp == NULL && count > 0) {
2934 2934 /*
2935 2935 * The end of the mblk was reached but the length
2936 2936 * requested could not be processed, (requested to
2937 2937 * digest more data than it provided).
2938 2938 */
2939 2939 return (CRYPTO_DATA_LEN_RANGE);
2940 2940 }
2941 2941 break;
2942 2942
2943 2943 default:
2944 2944 DBG(NULL, DWARN, "unrecognised crypto data format");
2945 2945 rv = CRYPTO_ARGUMENTS_BAD;
2946 2946 }
2947 2947 return (rv);
2948 2948 }
2949 2949
2950 2950
2951 2951 /*
2952 2952 * Performs the input, output or hard scatter/gather checks on the specified
2953 2953 * crypto_data_t struct. Returns true if the data is scatter/gather in nature
2954 2954 * ie fails the test.
2955 2955 */
2956 2956 int
2957 2957 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val)
2958 2958 {
2959 2959 uio_t *uiop;
2960 2960 mblk_t *mp;
2961 2961 int rv = FALSE;
2962 2962
2963 2963 switch (val) {
2964 2964 case DCA_SG_CONTIG:
2965 2965 /*
2966 2966 * Check for a contiguous data buffer.
2967 2967 */
2968 2968 switch (data->cd_format) {
2969 2969 case CRYPTO_DATA_RAW:
2970 2970 /* Contiguous in nature */
2971 2971 break;
2972 2972
2973 2973 case CRYPTO_DATA_UIO:
2974 2974 if (data->cd_uio->uio_iovcnt > 1)
2975 2975 rv = TRUE;
2976 2976 break;
2977 2977
2978 2978 case CRYPTO_DATA_MBLK:
2979 2979 mp = data->cd_mp;
2980 2980 if (mp->b_cont != NULL)
2981 2981 rv = TRUE;
2982 2982 break;
2983 2983
2984 2984 default:
2985 2985 DBG(NULL, DWARN, "unrecognised crypto data format");
2986 2986 }
2987 2987 break;
2988 2988
2989 2989 case DCA_SG_WALIGN:
2990 2990 /*
2991 2991 * Check for a contiguous data buffer that is 32-bit word
2992 2992 * aligned and is of word multiples in size.
2993 2993 */
2994 2994 switch (data->cd_format) {
2995 2995 case CRYPTO_DATA_RAW:
2996 2996 if ((data->cd_raw.iov_len % sizeof (uint32_t)) ||
2997 2997 ((uintptr_t)data->cd_raw.iov_base %
2998 2998 sizeof (uint32_t))) {
2999 2999 rv = TRUE;
3000 3000 }
3001 3001 break;
3002 3002
3003 3003 case CRYPTO_DATA_UIO:
3004 3004 uiop = data->cd_uio;
3005 3005 if (uiop->uio_iovcnt > 1) {
3006 3006 return (TRUE);
3007 3007 }
3008 3008 /* So there is only one iovec */
3009 3009 if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) ||
3010 3010 ((uintptr_t)uiop->uio_iov[0].iov_base %
3011 3011 sizeof (uint32_t))) {
3012 3012 rv = TRUE;
3013 3013 }
3014 3014 break;
3015 3015
3016 3016 case CRYPTO_DATA_MBLK:
3017 3017 mp = data->cd_mp;
3018 3018 if (mp->b_cont != NULL) {
3019 3019 return (TRUE);
3020 3020 }
3021 3021 /* So there is only one mblk in the chain */
3022 3022 if ((MBLKL(mp) % sizeof (uint32_t)) ||
3023 3023 ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) {
3024 3024 rv = TRUE;
3025 3025 }
3026 3026 break;
3027 3027
3028 3028 default:
3029 3029 DBG(NULL, DWARN, "unrecognised crypto data format");
3030 3030 }
3031 3031 break;
3032 3032
3033 3033 case DCA_SG_PALIGN:
3034 3034 /*
3035 3035 * Check that the data buffer is page aligned and is of
3036 3036 * page multiples in size.
3037 3037 */
3038 3038 switch (data->cd_format) {
3039 3039 case CRYPTO_DATA_RAW:
3040 3040 if ((data->cd_length % dca->dca_pagesize) ||
3041 3041 ((uintptr_t)data->cd_raw.iov_base %
3042 3042 dca->dca_pagesize)) {
3043 3043 rv = TRUE;
3044 3044 }
3045 3045 break;
3046 3046
3047 3047 case CRYPTO_DATA_UIO:
3048 3048 uiop = data->cd_uio;
3049 3049 if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) ||
3050 3050 ((uintptr_t)uiop->uio_iov[0].iov_base %
3051 3051 dca->dca_pagesize)) {
3052 3052 rv = TRUE;
3053 3053 }
3054 3054 break;
3055 3055
3056 3056 case CRYPTO_DATA_MBLK:
3057 3057 mp = data->cd_mp;
3058 3058 if ((MBLKL(mp) % dca->dca_pagesize) ||
3059 3059 ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) {
3060 3060 rv = TRUE;
3061 3061 }
3062 3062 break;
3063 3063
3064 3064 default:
3065 3065 DBG(NULL, DWARN, "unrecognised crypto data format");
3066 3066 }
3067 3067 break;
3068 3068
3069 3069 default:
3070 3070 DBG(NULL, DWARN, "unrecognised scatter/gather param type");
3071 3071 }
3072 3072
3073 3073 return (rv);
3074 3074 }
3075 3075
3076 3076 /*
3077 3077 * Increments the cd_offset and decrements the cd_length as the data is
3078 3078 * gathered from the crypto_data_t struct.
3079 3079 * The data is reverse-copied into the dest buffer if the flag is true.
3080 3080 */
3081 3081 int
3082 3082 dca_gather(crypto_data_t *in, char *dest, int count, int reverse)
3083 3083 {
3084 3084 int rv = CRYPTO_SUCCESS;
3085 3085 uint_t vec_idx;
3086 3086 uio_t *uiop;
3087 3087 off_t off = in->cd_offset;
3088 3088 size_t cur_len;
3089 3089 mblk_t *mp;
3090 3090
3091 3091 switch (in->cd_format) {
3092 3092 case CRYPTO_DATA_RAW:
3093 3093 if (count > in->cd_length) {
3094 3094 /*
3095 3095 * The caller specified a length greater than the
3096 3096 * size of the buffer.
3097 3097 */
3098 3098 return (CRYPTO_DATA_LEN_RANGE);
3099 3099 }
3100 3100 if (reverse)
3101 3101 dca_reverse(in->cd_raw.iov_base + off, dest, count,
3102 3102 count);
3103 3103 else
3104 3104 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3105 3105 in->cd_offset += count;
3106 3106 in->cd_length -= count;
3107 3107 break;
3108 3108
3109 3109 case CRYPTO_DATA_UIO:
3110 3110 /*
3111 3111 * Jump to the first iovec containing data to be processed.
3112 3112 */
3113 3113 uiop = in->cd_uio;
3114 3114 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3115 3115 off >= uiop->uio_iov[vec_idx].iov_len;
3116 3116 off -= uiop->uio_iov[vec_idx++].iov_len)
3117 3117 ;
3118 3118 if (vec_idx == uiop->uio_iovcnt) {
3119 3119 /*
3120 3120 * The caller specified an offset that is larger than
3121 3121 * the total size of the buffers it provided.
3122 3122 */
3123 3123 return (CRYPTO_DATA_LEN_RANGE);
3124 3124 }
3125 3125
3126 3126 /*
3127 3127 * Now process the iovecs.
3128 3128 */
3129 3129 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3130 3130 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3131 3131 off, count);
3132 3132 count -= cur_len;
3133 3133 if (reverse) {
3134 3134 /* Fill the dest buffer from the end */
3135 3135 dca_reverse(uiop->uio_iov[vec_idx].iov_base +
3136 3136 off, dest+count, cur_len, cur_len);
3137 3137 } else {
3138 3138 bcopy(uiop->uio_iov[vec_idx].iov_base + off,
3139 3139 dest, cur_len);
3140 3140 dest += cur_len;
3141 3141 }
3142 3142 in->cd_offset += cur_len;
3143 3143 in->cd_length -= cur_len;
3144 3144 vec_idx++;
3145 3145 off = 0;
3146 3146 }
3147 3147
3148 3148 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3149 3149 /*
3150 3150 * The end of the specified iovec's was reached but
3151 3151 * the length requested could not be processed
3152 3152 * (requested to digest more data than it provided).
3153 3153 */
3154 3154 return (CRYPTO_DATA_LEN_RANGE);
3155 3155 }
3156 3156 break;
3157 3157
3158 3158 case CRYPTO_DATA_MBLK:
3159 3159 /*
3160 3160 * Jump to the first mblk_t containing data to be processed.
3161 3161 */
3162 3162 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3163 3163 off -= MBLKL(mp), mp = mp->b_cont)
3164 3164 ;
3165 3165 if (mp == NULL) {
3166 3166 /*
3167 3167 * The caller specified an offset that is larger than
3168 3168 * the total size of the buffers it provided.
3169 3169 */
3170 3170 return (CRYPTO_DATA_LEN_RANGE);
3171 3171 }
3172 3172
3173 3173 /*
3174 3174 * Now do the processing on the mblk chain.
3175 3175 */
3176 3176 while (mp != NULL && count > 0) {
3177 3177 cur_len = min(MBLKL(mp) - off, count);
3178 3178 count -= cur_len;
3179 3179 if (reverse) {
3180 3180 /* Fill the dest buffer from the end */
3181 3181 dca_reverse((char *)(mp->b_rptr + off),
3182 3182 dest+count, cur_len, cur_len);
3183 3183 } else {
3184 3184 bcopy((char *)(mp->b_rptr + off), dest,
3185 3185 cur_len);
3186 3186 dest += cur_len;
3187 3187 }
3188 3188 in->cd_offset += cur_len;
3189 3189 in->cd_length -= cur_len;
3190 3190 mp = mp->b_cont;
3191 3191 off = 0;
3192 3192 }
3193 3193
3194 3194 if (mp == NULL && count > 0) {
3195 3195 /*
3196 3196 * The end of the mblk was reached but the length
3197 3197 * requested could not be processed, (requested to
3198 3198 * digest more data than it provided).
3199 3199 */
3200 3200 return (CRYPTO_DATA_LEN_RANGE);
3201 3201 }
3202 3202 break;
3203 3203
3204 3204 default:
3205 3205 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format");
3206 3206 rv = CRYPTO_ARGUMENTS_BAD;
3207 3207 }
3208 3208 return (rv);
3209 3209 }
3210 3210
3211 3211 /*
3212 3212 * Increments the cd_offset and decrements the cd_length as the data is
3213 3213 * gathered from the crypto_data_t struct.
3214 3214 */
3215 3215 int
3216 3216 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest,
3217 3217 int count)
3218 3218 {
3219 3219 int rv = CRYPTO_SUCCESS;
3220 3220 caddr_t baddr;
3221 3221 uint_t vec_idx;
3222 3222 uio_t *uiop;
3223 3223 off_t off = in->cd_offset;
3224 3224 size_t cur_len;
3225 3225 mblk_t *mp;
3226 3226
3227 3227 /* Process the residual first */
3228 3228 if (*residlen > 0) {
3229 3229 uint_t num = min(count, *residlen);
3230 3230 bcopy(resid, dest, num);
3231 3231 *residlen -= num;
3232 3232 if (*residlen > 0) {
3233 3233 /*
3234 3234 * Requested amount 'count' is less than what's in
3235 3235 * the residual, so shuffle any remaining resid to
3236 3236 * the front.
3237 3237 */
3238 3238 baddr = resid + num;
3239 3239 bcopy(baddr, resid, *residlen);
3240 3240 }
3241 3241 dest += num;
3242 3242 count -= num;
3243 3243 }
3244 3244
3245 3245 /* Now process what's in the crypto_data_t structs */
3246 3246 switch (in->cd_format) {
3247 3247 case CRYPTO_DATA_RAW:
3248 3248 if (count > in->cd_length) {
3249 3249 /*
3250 3250 * The caller specified a length greater than the
3251 3251 * size of the buffer.
3252 3252 */
3253 3253 return (CRYPTO_DATA_LEN_RANGE);
3254 3254 }
3255 3255 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3256 3256 in->cd_offset += count;
3257 3257 in->cd_length -= count;
3258 3258 break;
3259 3259
3260 3260 case CRYPTO_DATA_UIO:
3261 3261 /*
3262 3262 * Jump to the first iovec containing data to be processed.
3263 3263 */
3264 3264 uiop = in->cd_uio;
3265 3265 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3266 3266 off >= uiop->uio_iov[vec_idx].iov_len;
3267 3267 off -= uiop->uio_iov[vec_idx++].iov_len)
3268 3268 ;
3269 3269 if (vec_idx == uiop->uio_iovcnt) {
3270 3270 /*
3271 3271 * The caller specified an offset that is larger than
3272 3272 * the total size of the buffers it provided.
3273 3273 */
3274 3274 return (CRYPTO_DATA_LEN_RANGE);
3275 3275 }
3276 3276
3277 3277 /*
3278 3278 * Now process the iovecs.
3279 3279 */
3280 3280 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3281 3281 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3282 3282 off, count);
3283 3283 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
3284 3284 cur_len);
3285 3285 count -= cur_len;
3286 3286 dest += cur_len;
3287 3287 in->cd_offset += cur_len;
3288 3288 in->cd_length -= cur_len;
3289 3289 vec_idx++;
3290 3290 off = 0;
3291 3291 }
3292 3292
3293 3293 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3294 3294 /*
3295 3295 * The end of the specified iovec's was reached but
3296 3296 * the length requested could not be processed
3297 3297 * (requested to digest more data than it provided).
3298 3298 */
3299 3299 return (CRYPTO_DATA_LEN_RANGE);
3300 3300 }
3301 3301 break;
3302 3302
3303 3303 case CRYPTO_DATA_MBLK:
3304 3304 /*
3305 3305 * Jump to the first mblk_t containing data to be processed.
3306 3306 */
3307 3307 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3308 3308 off -= MBLKL(mp), mp = mp->b_cont)
3309 3309 ;
3310 3310 if (mp == NULL) {
3311 3311 /*
3312 3312 * The caller specified an offset that is larger than
3313 3313 * the total size of the buffers it provided.
3314 3314 */
3315 3315 return (CRYPTO_DATA_LEN_RANGE);
3316 3316 }
3317 3317
3318 3318 /*
3319 3319 * Now do the processing on the mblk chain.
3320 3320 */
3321 3321 while (mp != NULL && count > 0) {
3322 3322 cur_len = min(MBLKL(mp) - off, count);
3323 3323 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
3324 3324 count -= cur_len;
3325 3325 dest += cur_len;
3326 3326 in->cd_offset += cur_len;
3327 3327 in->cd_length -= cur_len;
3328 3328 mp = mp->b_cont;
3329 3329 off = 0;
3330 3330 }
3331 3331
3332 3332 if (mp == NULL && count > 0) {
3333 3333 /*
3334 3334 * The end of the mblk was reached but the length
3335 3335 * requested could not be processed, (requested to
3336 3336 * digest more data than it provided).
3337 3337 */
3338 3338 return (CRYPTO_DATA_LEN_RANGE);
3339 3339 }
3340 3340 break;
3341 3341
3342 3342 default:
3343 3343 DBG(NULL, DWARN,
3344 3344 "dca_resid_gather: unrecognised crypto data format");
3345 3345 rv = CRYPTO_ARGUMENTS_BAD;
3346 3346 }
3347 3347 return (rv);
3348 3348 }
3349 3349
3350 3350 /*
3351 3351 * Appends the data to the crypto_data_t struct increasing cd_length.
3352 3352 * cd_offset is left unchanged.
3353 3353 * Data is reverse-copied if the flag is TRUE.
3354 3354 */
3355 3355 int
3356 3356 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse)
3357 3357 {
3358 3358 int rv = CRYPTO_SUCCESS;
3359 3359 off_t offset = out->cd_offset + out->cd_length;
3360 3360 uint_t vec_idx;
3361 3361 uio_t *uiop;
3362 3362 size_t cur_len;
3363 3363 mblk_t *mp;
3364 3364
3365 3365 switch (out->cd_format) {
3366 3366 case CRYPTO_DATA_RAW:
3367 3367 if (out->cd_raw.iov_len - offset < count) {
3368 3368 /* Trying to write out more than space available. */
3369 3369 return (CRYPTO_DATA_LEN_RANGE);
3370 3370 }
3371 3371 if (reverse)
3372 3372 dca_reverse((void*) src, out->cd_raw.iov_base + offset,
3373 3373 count, count);
3374 3374 else
3375 3375 bcopy(src, out->cd_raw.iov_base + offset, count);
3376 3376 out->cd_length += count;
3377 3377 break;
3378 3378
3379 3379 case CRYPTO_DATA_UIO:
3380 3380 /*
3381 3381 * Jump to the first iovec that can be written to.
3382 3382 */
3383 3383 uiop = out->cd_uio;
3384 3384 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3385 3385 offset >= uiop->uio_iov[vec_idx].iov_len;
3386 3386 offset -= uiop->uio_iov[vec_idx++].iov_len)
3387 3387 ;
3388 3388 if (vec_idx == uiop->uio_iovcnt) {
3389 3389 /*
3390 3390 * The caller specified an offset that is larger than
3391 3391 * the total size of the buffers it provided.
3392 3392 */
3393 3393 return (CRYPTO_DATA_LEN_RANGE);
3394 3394 }
3395 3395
3396 3396 /*
3397 3397 * Now process the iovecs.
3398 3398 */
3399 3399 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3400 3400 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3401 3401 offset, count);
3402 3402 count -= cur_len;
3403 3403 if (reverse) {
3404 3404 dca_reverse((void*) (src+count),
3405 3405 uiop->uio_iov[vec_idx].iov_base +
3406 3406 offset, cur_len, cur_len);
3407 3407 } else {
3408 3408 bcopy(src, uiop->uio_iov[vec_idx].iov_base +
3409 3409 offset, cur_len);
3410 3410 src += cur_len;
3411 3411 }
3412 3412 out->cd_length += cur_len;
3413 3413 vec_idx++;
3414 3414 offset = 0;
3415 3415 }
3416 3416
3417 3417 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3418 3418 /*
3419 3419 * The end of the specified iovec's was reached but
3420 3420 * the length requested could not be processed
3421 3421 * (requested to write more data than space provided).
3422 3422 */
3423 3423 return (CRYPTO_DATA_LEN_RANGE);
3424 3424 }
3425 3425 break;
3426 3426
3427 3427 case CRYPTO_DATA_MBLK:
3428 3428 /*
3429 3429 * Jump to the first mblk_t that can be written to.
3430 3430 */
3431 3431 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp);
3432 3432 offset -= MBLKL(mp), mp = mp->b_cont)
3433 3433 ;
3434 3434 if (mp == NULL) {
3435 3435 /*
3436 3436 * The caller specified an offset that is larger than
3437 3437 * the total size of the buffers it provided.
3438 3438 */
3439 3439 return (CRYPTO_DATA_LEN_RANGE);
3440 3440 }
3441 3441
3442 3442 /*
3443 3443 * Now do the processing on the mblk chain.
3444 3444 */
3445 3445 while (mp != NULL && count > 0) {
3446 3446 cur_len = min(MBLKL(mp) - offset, count);
3447 3447 count -= cur_len;
3448 3448 if (reverse) {
3449 3449 dca_reverse((void*) (src+count),
3450 3450 (char *)(mp->b_rptr + offset), cur_len,
3451 3451 cur_len);
3452 3452 } else {
3453 3453 bcopy(src, (char *)(mp->b_rptr + offset),
3454 3454 cur_len);
3455 3455 src += cur_len;
3456 3456 }
3457 3457 out->cd_length += cur_len;
3458 3458 mp = mp->b_cont;
3459 3459 offset = 0;
3460 3460 }
3461 3461
3462 3462 if (mp == NULL && count > 0) {
3463 3463 /*
3464 3464 * The end of the mblk was reached but the length
3465 3465 * requested could not be processed, (requested to
3466 3466 * digest more data than it provided).
3467 3467 */
3468 3468 return (CRYPTO_DATA_LEN_RANGE);
3469 3469 }
3470 3470 break;
3471 3471
3472 3472 default:
3473 3473 DBG(NULL, DWARN, "unrecognised crypto data format");
3474 3474 rv = CRYPTO_ARGUMENTS_BAD;
3475 3475 }
3476 3476 return (rv);
3477 3477 }
3478 3478
3479 3479 /*
3480 3480 * Compare two byte arrays in reverse order.
3481 3481 * Return 0 if they are identical, 1 otherwise.
3482 3482 */
3483 3483 int
3484 3484 dca_bcmp_reverse(const void *s1, const void *s2, size_t n)
3485 3485 {
3486 3486 int i;
3487 3487 caddr_t src, dst;
3488 3488
3489 3489 if (!n)
3490 3490 return (0);
3491 3491
3492 3492 src = ((caddr_t)s1) + n - 1;
3493 3493 dst = (caddr_t)s2;
3494 3494 for (i = 0; i < n; i++) {
3495 3495 if (*src != *dst)
3496 3496 return (1);
3497 3497 src--;
3498 3498 dst++;
3499 3499 }
3500 3500
3501 3501 return (0);
3502 3502 }
3503 3503
3504 3504
3505 3505 /*
3506 3506 * This calculates the size of a bignum in bits, specifically not counting
3507 3507 * leading zero bits. This size calculation must be done *before* any
3508 3508 * endian reversal takes place (i.e. the numbers are in absolute big-endian
3509 3509 * order.)
3510 3510 */
3511 3511 int
3512 3512 dca_bitlen(unsigned char *bignum, int bytelen)
3513 3513 {
3514 3514 unsigned char msbyte;
3515 3515 int i, j;
3516 3516
3517 3517 for (i = 0; i < bytelen - 1; i++) {
3518 3518 if (bignum[i] != 0) {
3519 3519 break;
3520 3520 }
3521 3521 }
3522 3522 msbyte = bignum[i];
3523 3523 for (j = 8; j > 1; j--) {
3524 3524 if (msbyte & 0x80) {
3525 3525 break;
3526 3526 }
3527 3527 msbyte <<= 1;
3528 3528 }
3529 3529 return ((8 * (bytelen - i - 1)) + j);
3530 3530 }
3531 3531
3532 3532 /*
3533 3533 * This compares to bignums (in big-endian order). It ignores leading
3534 3534 * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc.
3535 3535 */
3536 3536 int
3537 3537 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len)
3538 3538 {
3539 3539 while ((n1len > 1) && (*n1 == 0)) {
3540 3540 n1len--;
3541 3541 n1++;
3542 3542 }
3543 3543 while ((n2len > 1) && (*n2 == 0)) {
3544 3544 n2len--;
3545 3545 n2++;
3546 3546 }
3547 3547 if (n1len != n2len) {
3548 3548 return (n1len - n2len);
3549 3549 }
3550 3550 while ((n1len > 1) && (*n1 == *n2)) {
3551 3551 n1++;
3552 3552 n2++;
3553 3553 n1len--;
3554 3554 }
3555 3555 return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2));
3556 3556 }
3557 3557
3558 3558 /*
3559 3559 * Return array of key attributes.
3560 3560 */
3561 3561 crypto_object_attribute_t *
3562 3562 dca_get_key_attr(crypto_key_t *key)
3563 3563 {
3564 3564 if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) ||
3565 3565 (key->ck_count == 0)) {
3566 3566 return (NULL);
3567 3567 }
3568 3568
3569 3569 return (key->ck_attrs);
3570 3570 }
3571 3571
3572 3572 /*
3573 3573 * If attribute type exists valp points to it's 32-bit value.
3574 3574 */
3575 3575 int
3576 3576 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum,
3577 3577 uint64_t atype, uint32_t *valp)
3578 3578 {
3579 3579 crypto_object_attribute_t *bap;
3580 3580
3581 3581 bap = dca_find_attribute(attrp, atnum, atype);
3582 3582 if (bap == NULL) {
3583 3583 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3584 3584 }
3585 3585
3586 3586 *valp = *bap->oa_value;
3587 3587
3588 3588 return (CRYPTO_SUCCESS);
3589 3589 }
3590 3590
3591 3591 /*
3592 3592 * If attribute type exists data contains the start address of the value,
3593 3593 * and numelems contains it's length.
3594 3594 */
3595 3595 int
3596 3596 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum,
3597 3597 uint64_t atype, void **data, unsigned int *numelems)
3598 3598 {
3599 3599 crypto_object_attribute_t *bap;
3600 3600
3601 3601 bap = dca_find_attribute(attrp, atnum, atype);
3602 3602 if (bap == NULL) {
3603 3603 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3604 3604 }
3605 3605
3606 3606 *data = bap->oa_value;
3607 3607 *numelems = bap->oa_value_len;
3608 3608
3609 3609 return (CRYPTO_SUCCESS);
3610 3610 }
3611 3611
3612 3612 /*
3613 3613 * Finds entry of specified name. If it is not found dca_find_attribute returns
3614 3614 * NULL.
3615 3615 */
3616 3616 crypto_object_attribute_t *
3617 3617 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum,
3618 3618 uint64_t atype)
3619 3619 {
3620 3620 while (atnum) {
3621 3621 if (attrp->oa_type == atype)
3622 3622 return (attrp);
3623 3623 atnum--;
3624 3624 attrp++;
3625 3625 }
3626 3626 return (NULL);
3627 3627 }
3628 3628
3629 3629 /*
3630 3630 * Return the address of the first data buffer. If the data format is
3631 3631 * unrecognised return NULL.
3632 3632 */
3633 3633 caddr_t
3634 3634 dca_bufdaddr(crypto_data_t *data)
3635 3635 {
3636 3636 switch (data->cd_format) {
3637 3637 case CRYPTO_DATA_RAW:
3638 3638 return (data->cd_raw.iov_base + data->cd_offset);
3639 3639 case CRYPTO_DATA_UIO:
3640 3640 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset);
3641 3641 case CRYPTO_DATA_MBLK:
3642 3642 return ((char *)data->cd_mp->b_rptr + data->cd_offset);
3643 3643 default:
3644 3644 DBG(NULL, DWARN,
3645 3645 "dca_bufdaddr: unrecognised crypto data format");
3646 3646 return (NULL);
3647 3647 }
3648 3648 }
3649 3649
3650 3650 static caddr_t
3651 3651 dca_bufdaddr_out(crypto_data_t *data)
3652 3652 {
3653 3653 size_t offset = data->cd_offset + data->cd_length;
3654 3654
3655 3655 switch (data->cd_format) {
3656 3656 case CRYPTO_DATA_RAW:
3657 3657 return (data->cd_raw.iov_base + offset);
3658 3658 case CRYPTO_DATA_UIO:
3659 3659 return (data->cd_uio->uio_iov[0].iov_base + offset);
3660 3660 case CRYPTO_DATA_MBLK:
3661 3661 return ((char *)data->cd_mp->b_rptr + offset);
3662 3662 default:
3663 3663 DBG(NULL, DWARN,
3664 3664 "dca_bufdaddr_out: unrecognised crypto data format");
3665 3665 return (NULL);
3666 3666 }
3667 3667 }
3668 3668
3669 3669 /*
3670 3670 * Control entry points.
3671 3671 */
3672 3672
3673 3673 /* ARGSUSED */
3674 3674 static void
3675 3675 dca_provider_status(crypto_provider_handle_t provider, uint_t *status)
3676 3676 {
3677 3677 *status = CRYPTO_PROVIDER_READY;
3678 3678 }
3679 3679
3680 3680 /*
3681 3681 * Cipher (encrypt/decrypt) entry points.
3682 3682 */
3683 3683
3684 3684 /* ARGSUSED */
3685 3685 static int
3686 3686 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3687 3687 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3688 3688 crypto_req_handle_t req)
3689 3689 {
3690 3690 int error = CRYPTO_FAILED;
3691 3691 dca_t *softc;
3692 3692
3693 3693 softc = DCA_SOFTC_FROM_CTX(ctx);
3694 3694 DBG(softc, DENTRY, "dca_encrypt_init: started");
3695 3695
3696 3696 /* check mechanism */
3697 3697 switch (mechanism->cm_type) {
3698 3698 case DES_CBC_MECH_INFO_TYPE:
3699 3699 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3700 3700 DR_ENCRYPT);
3701 3701 break;
3702 3702 case DES3_CBC_MECH_INFO_TYPE:
3703 3703 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3704 3704 DR_ENCRYPT | DR_TRIPLE);
3705 3705 break;
3706 3706 case RSA_PKCS_MECH_INFO_TYPE:
3707 3707 case RSA_X_509_MECH_INFO_TYPE:
3708 3708 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3709 3709 break;
3710 3710 default:
3711 3711 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type "
3712 3712 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3713 3713 error = CRYPTO_MECHANISM_INVALID;
3714 3714 }
3715 3715
3716 3716 DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error);
3717 3717
3718 3718 if (error == CRYPTO_SUCCESS)
3719 3719 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3720 3720 &softc->dca_ctx_list_lock);
3721 3721
3722 3722 return (error);
3723 3723 }
3724 3724
3725 3725 /* ARGSUSED */
3726 3726 static int
3727 3727 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3728 3728 crypto_data_t *ciphertext, crypto_req_handle_t req)
3729 3729 {
3730 3730 int error = CRYPTO_FAILED;
3731 3731 dca_t *softc;
3732 3732
3733 3733 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3734 3734 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3735 3735
3736 3736 softc = DCA_SOFTC_FROM_CTX(ctx);
3737 3737 DBG(softc, DENTRY, "dca_encrypt: started");
3738 3738
3739 3739 /* handle inplace ops */
3740 3740 if (!ciphertext) {
3741 3741 dca_request_t *reqp = ctx->cc_provider_private;
3742 3742 reqp->dr_flags |= DR_INPLACE;
3743 3743 ciphertext = plaintext;
3744 3744 }
3745 3745
3746 3746 /* check mechanism */
3747 3747 switch (DCA_MECH_FROM_CTX(ctx)) {
3748 3748 case DES_CBC_MECH_INFO_TYPE:
3749 3749 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT);
3750 3750 break;
3751 3751 case DES3_CBC_MECH_INFO_TYPE:
3752 3752 error = dca_3des(ctx, plaintext, ciphertext, req,
3753 3753 DR_ENCRYPT | DR_TRIPLE);
3754 3754 break;
3755 3755 case RSA_PKCS_MECH_INFO_TYPE:
3756 3756 case RSA_X_509_MECH_INFO_TYPE:
3757 3757 error = dca_rsastart(ctx, plaintext, ciphertext, req,
3758 3758 DCA_RSA_ENC);
3759 3759 break;
3760 3760 default:
3761 3761 /* Should never reach here */
3762 3762 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type "
3763 3763 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3764 3764 error = CRYPTO_MECHANISM_INVALID;
3765 3765 }
3766 3766
3767 3767 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3768 3768 (error != CRYPTO_BUFFER_TOO_SMALL)) {
3769 3769 ciphertext->cd_length = 0;
3770 3770 }
3771 3771
3772 3772 DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error);
3773 3773
3774 3774 return (error);
3775 3775 }
3776 3776
3777 3777 /* ARGSUSED */
3778 3778 static int
3779 3779 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3780 3780 crypto_data_t *ciphertext, crypto_req_handle_t req)
3781 3781 {
3782 3782 int error = CRYPTO_FAILED;
3783 3783 dca_t *softc;
3784 3784
3785 3785 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3786 3786 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3787 3787
3788 3788 softc = DCA_SOFTC_FROM_CTX(ctx);
3789 3789 DBG(softc, DENTRY, "dca_encrypt_update: started");
3790 3790
3791 3791 /* handle inplace ops */
3792 3792 if (!ciphertext) {
3793 3793 dca_request_t *reqp = ctx->cc_provider_private;
3794 3794 reqp->dr_flags |= DR_INPLACE;
3795 3795 ciphertext = plaintext;
3796 3796 }
3797 3797
3798 3798 /* check mechanism */
3799 3799 switch (DCA_MECH_FROM_CTX(ctx)) {
3800 3800 case DES_CBC_MECH_INFO_TYPE:
3801 3801 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3802 3802 DR_ENCRYPT);
3803 3803 break;
3804 3804 case DES3_CBC_MECH_INFO_TYPE:
3805 3805 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3806 3806 DR_ENCRYPT | DR_TRIPLE);
3807 3807 break;
3808 3808 default:
3809 3809 /* Should never reach here */
3810 3810 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type "
3811 3811 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3812 3812 error = CRYPTO_MECHANISM_INVALID;
3813 3813 }
3814 3814
3815 3815 DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error);
3816 3816
3817 3817 return (error);
3818 3818 }
3819 3819
3820 3820 /* ARGSUSED */
3821 3821 static int
3822 3822 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3823 3823 crypto_req_handle_t req)
3824 3824 {
3825 3825 int error = CRYPTO_FAILED;
3826 3826 dca_t *softc;
3827 3827
3828 3828 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3829 3829 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3830 3830
3831 3831 softc = DCA_SOFTC_FROM_CTX(ctx);
3832 3832 DBG(softc, DENTRY, "dca_encrypt_final: started");
3833 3833
3834 3834 /* check mechanism */
3835 3835 switch (DCA_MECH_FROM_CTX(ctx)) {
3836 3836 case DES_CBC_MECH_INFO_TYPE:
3837 3837 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT);
3838 3838 break;
3839 3839 case DES3_CBC_MECH_INFO_TYPE:
3840 3840 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE);
3841 3841 break;
3842 3842 default:
3843 3843 /* Should never reach here */
3844 3844 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type "
3845 3845 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3846 3846 error = CRYPTO_MECHANISM_INVALID;
3847 3847 }
3848 3848
3849 3849 DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error);
3850 3850
3851 3851 return (error);
3852 3852 }
3853 3853
3854 3854 /* ARGSUSED */
3855 3855 static int
3856 3856 dca_encrypt_atomic(crypto_provider_handle_t provider,
3857 3857 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
3858 3858 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
3859 3859 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
3860 3860 {
3861 3861 int error = CRYPTO_FAILED;
3862 3862 dca_t *softc = (dca_t *)provider;
3863 3863
3864 3864 DBG(softc, DENTRY, "dca_encrypt_atomic: started");
3865 3865
3866 3866 if (ctx_template != NULL)
3867 3867 return (CRYPTO_ARGUMENTS_BAD);
3868 3868
3869 3869 /* handle inplace ops */
3870 3870 if (!ciphertext) {
3871 3871 ciphertext = plaintext;
3872 3872 }
3873 3873
3874 3874 /* check mechanism */
3875 3875 switch (mechanism->cm_type) {
3876 3876 case DES_CBC_MECH_INFO_TYPE:
3877 3877 error = dca_3desatomic(provider, session_id, mechanism, key,
3878 3878 plaintext, ciphertext, KM_SLEEP, req,
3879 3879 DR_ENCRYPT | DR_ATOMIC);
3880 3880 break;
3881 3881 case DES3_CBC_MECH_INFO_TYPE:
3882 3882 error = dca_3desatomic(provider, session_id, mechanism, key,
3883 3883 plaintext, ciphertext, KM_SLEEP, req,
3884 3884 DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC);
3885 3885 break;
3886 3886 case RSA_PKCS_MECH_INFO_TYPE:
3887 3887 case RSA_X_509_MECH_INFO_TYPE:
3888 3888 error = dca_rsaatomic(provider, session_id, mechanism, key,
3889 3889 plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC);
3890 3890 break;
3891 3891 default:
3892 3892 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type "
3893 3893 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3894 3894 error = CRYPTO_MECHANISM_INVALID;
3895 3895 }
3896 3896
3897 3897 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
3898 3898 ciphertext->cd_length = 0;
3899 3899 }
3900 3900
3901 3901 DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error);
3902 3902
3903 3903 return (error);
3904 3904 }
3905 3905
3906 3906 /* ARGSUSED */
3907 3907 static int
3908 3908 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3909 3909 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3910 3910 crypto_req_handle_t req)
3911 3911 {
3912 3912 int error = CRYPTO_FAILED;
3913 3913 dca_t *softc;
3914 3914
3915 3915 softc = DCA_SOFTC_FROM_CTX(ctx);
3916 3916 DBG(softc, DENTRY, "dca_decrypt_init: started");
3917 3917
3918 3918 /* check mechanism */
3919 3919 switch (mechanism->cm_type) {
3920 3920 case DES_CBC_MECH_INFO_TYPE:
3921 3921 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3922 3922 DR_DECRYPT);
3923 3923 break;
3924 3924 case DES3_CBC_MECH_INFO_TYPE:
3925 3925 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3926 3926 DR_DECRYPT | DR_TRIPLE);
3927 3927 break;
3928 3928 case RSA_PKCS_MECH_INFO_TYPE:
3929 3929 case RSA_X_509_MECH_INFO_TYPE:
3930 3930 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3931 3931 break;
3932 3932 default:
3933 3933 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type "
3934 3934 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3935 3935 error = CRYPTO_MECHANISM_INVALID;
3936 3936 }
3937 3937
3938 3938 DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error);
3939 3939
3940 3940 if (error == CRYPTO_SUCCESS)
3941 3941 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3942 3942 &softc->dca_ctx_list_lock);
3943 3943
3944 3944 return (error);
3945 3945 }
3946 3946
3947 3947 /* ARGSUSED */
3948 3948 static int
3949 3949 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3950 3950 crypto_data_t *plaintext, crypto_req_handle_t req)
3951 3951 {
3952 3952 int error = CRYPTO_FAILED;
3953 3953 dca_t *softc;
3954 3954
3955 3955 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3956 3956 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3957 3957
3958 3958 softc = DCA_SOFTC_FROM_CTX(ctx);
3959 3959 DBG(softc, DENTRY, "dca_decrypt: started");
3960 3960
3961 3961 /* handle inplace ops */
3962 3962 if (!plaintext) {
3963 3963 dca_request_t *reqp = ctx->cc_provider_private;
3964 3964 reqp->dr_flags |= DR_INPLACE;
3965 3965 plaintext = ciphertext;
3966 3966 }
3967 3967
3968 3968 /* check mechanism */
3969 3969 switch (DCA_MECH_FROM_CTX(ctx)) {
3970 3970 case DES_CBC_MECH_INFO_TYPE:
3971 3971 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT);
3972 3972 break;
3973 3973 case DES3_CBC_MECH_INFO_TYPE:
3974 3974 error = dca_3des(ctx, ciphertext, plaintext, req,
3975 3975 DR_DECRYPT | DR_TRIPLE);
3976 3976 break;
3977 3977 case RSA_PKCS_MECH_INFO_TYPE:
3978 3978 case RSA_X_509_MECH_INFO_TYPE:
3979 3979 error = dca_rsastart(ctx, ciphertext, plaintext, req,
3980 3980 DCA_RSA_DEC);
3981 3981 break;
3982 3982 default:
3983 3983 /* Should never reach here */
3984 3984 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type "
3985 3985 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3986 3986 error = CRYPTO_MECHANISM_INVALID;
3987 3987 }
3988 3988
3989 3989 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3990 3990 (error != CRYPTO_BUFFER_TOO_SMALL)) {
3991 3991 if (plaintext)
3992 3992 plaintext->cd_length = 0;
3993 3993 }
3994 3994
3995 3995 DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error);
3996 3996
3997 3997 return (error);
3998 3998 }
3999 3999
4000 4000 /* ARGSUSED */
4001 4001 static int
4002 4002 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
4003 4003 crypto_data_t *plaintext, crypto_req_handle_t req)
4004 4004 {
4005 4005 int error = CRYPTO_FAILED;
4006 4006 dca_t *softc;
4007 4007
4008 4008 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4009 4009 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4010 4010
4011 4011 softc = DCA_SOFTC_FROM_CTX(ctx);
4012 4012 DBG(softc, DENTRY, "dca_decrypt_update: started");
4013 4013
4014 4014 /* handle inplace ops */
4015 4015 if (!plaintext) {
4016 4016 dca_request_t *reqp = ctx->cc_provider_private;
4017 4017 reqp->dr_flags |= DR_INPLACE;
4018 4018 plaintext = ciphertext;
4019 4019 }
4020 4020
4021 4021 /* check mechanism */
4022 4022 switch (DCA_MECH_FROM_CTX(ctx)) {
4023 4023 case DES_CBC_MECH_INFO_TYPE:
4024 4024 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4025 4025 DR_DECRYPT);
4026 4026 break;
4027 4027 case DES3_CBC_MECH_INFO_TYPE:
4028 4028 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4029 4029 DR_DECRYPT | DR_TRIPLE);
4030 4030 break;
4031 4031 default:
4032 4032 /* Should never reach here */
4033 4033 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type "
4034 4034 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4035 4035 error = CRYPTO_MECHANISM_INVALID;
4036 4036 }
4037 4037
4038 4038 DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error);
4039 4039
4040 4040 return (error);
4041 4041 }
4042 4042
4043 4043 /* ARGSUSED */
4044 4044 static int
4045 4045 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext,
4046 4046 crypto_req_handle_t req)
4047 4047 {
4048 4048 int error = CRYPTO_FAILED;
4049 4049 dca_t *softc;
4050 4050
4051 4051 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4052 4052 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4053 4053
4054 4054 softc = DCA_SOFTC_FROM_CTX(ctx);
4055 4055 DBG(softc, DENTRY, "dca_decrypt_final: started");
4056 4056
4057 4057 /* check mechanism */
4058 4058 switch (DCA_MECH_FROM_CTX(ctx)) {
4059 4059 case DES_CBC_MECH_INFO_TYPE:
4060 4060 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT);
4061 4061 break;
4062 4062 case DES3_CBC_MECH_INFO_TYPE:
4063 4063 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE);
4064 4064 break;
4065 4065 default:
4066 4066 /* Should never reach here */
4067 4067 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type "
4068 4068 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4069 4069 error = CRYPTO_MECHANISM_INVALID;
4070 4070 }
4071 4071
4072 4072 DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error);
4073 4073
4074 4074 return (error);
4075 4075 }
4076 4076
4077 4077 /* ARGSUSED */
4078 4078 static int
4079 4079 dca_decrypt_atomic(crypto_provider_handle_t provider,
4080 4080 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4081 4081 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
4082 4082 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4083 4083 {
4084 4084 int error = CRYPTO_FAILED;
4085 4085 dca_t *softc = (dca_t *)provider;
4086 4086
4087 4087 DBG(softc, DENTRY, "dca_decrypt_atomic: started");
4088 4088
4089 4089 if (ctx_template != NULL)
4090 4090 return (CRYPTO_ARGUMENTS_BAD);
4091 4091
4092 4092 /* handle inplace ops */
4093 4093 if (!plaintext) {
4094 4094 plaintext = ciphertext;
4095 4095 }
4096 4096
4097 4097 /* check mechanism */
4098 4098 switch (mechanism->cm_type) {
4099 4099 case DES_CBC_MECH_INFO_TYPE:
4100 4100 error = dca_3desatomic(provider, session_id, mechanism, key,
4101 4101 ciphertext, plaintext, KM_SLEEP, req,
4102 4102 DR_DECRYPT | DR_ATOMIC);
4103 4103 break;
4104 4104 case DES3_CBC_MECH_INFO_TYPE:
4105 4105 error = dca_3desatomic(provider, session_id, mechanism, key,
4106 4106 ciphertext, plaintext, KM_SLEEP, req,
4107 4107 DR_DECRYPT | DR_TRIPLE | DR_ATOMIC);
4108 4108 break;
4109 4109 case RSA_PKCS_MECH_INFO_TYPE:
4110 4110 case RSA_X_509_MECH_INFO_TYPE:
4111 4111 error = dca_rsaatomic(provider, session_id, mechanism, key,
4112 4112 ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC);
4113 4113 break;
4114 4114 default:
4115 4115 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type "
4116 4116 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4117 4117 error = CRYPTO_MECHANISM_INVALID;
4118 4118 }
4119 4119
4120 4120 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
4121 4121 plaintext->cd_length = 0;
4122 4122 }
4123 4123
4124 4124 DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error);
4125 4125
4126 4126 return (error);
4127 4127 }
4128 4128
4129 4129 /*
4130 4130 * Sign entry points.
4131 4131 */
4132 4132
4133 4133 /* ARGSUSED */
4134 4134 static int
4135 4135 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4136 4136 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4137 4137 crypto_req_handle_t req)
4138 4138 {
4139 4139 int error = CRYPTO_FAILED;
4140 4140 dca_t *softc;
4141 4141
4142 4142 softc = DCA_SOFTC_FROM_CTX(ctx);
4143 4143 DBG(softc, DENTRY, "dca_sign_init: started\n");
4144 4144
4145 4145 if (ctx_template != NULL)
4146 4146 return (CRYPTO_ARGUMENTS_BAD);
4147 4147
4148 4148 /* check mechanism */
4149 4149 switch (mechanism->cm_type) {
4150 4150 case RSA_PKCS_MECH_INFO_TYPE:
4151 4151 case RSA_X_509_MECH_INFO_TYPE:
4152 4152 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4153 4153 break;
4154 4154 case DSA_MECH_INFO_TYPE:
4155 4155 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4156 4156 DCA_DSA_SIGN);
4157 4157 break;
4158 4158 default:
4159 4159 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type "
4160 4160 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4161 4161 error = CRYPTO_MECHANISM_INVALID;
4162 4162 }
4163 4163
4164 4164 DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error);
4165 4165
4166 4166 if (error == CRYPTO_SUCCESS)
4167 4167 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4168 4168 &softc->dca_ctx_list_lock);
4169 4169
4170 4170 return (error);
4171 4171 }
4172 4172
4173 4173 static int
4174 4174 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data,
4175 4175 crypto_data_t *signature, crypto_req_handle_t req)
4176 4176 {
4177 4177 int error = CRYPTO_FAILED;
4178 4178 dca_t *softc;
4179 4179
4180 4180 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4181 4181 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4182 4182
4183 4183 softc = DCA_SOFTC_FROM_CTX(ctx);
4184 4184 DBG(softc, DENTRY, "dca_sign: started\n");
4185 4185
4186 4186 /* check mechanism */
4187 4187 switch (DCA_MECH_FROM_CTX(ctx)) {
4188 4188 case RSA_PKCS_MECH_INFO_TYPE:
4189 4189 case RSA_X_509_MECH_INFO_TYPE:
4190 4190 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN);
4191 4191 break;
4192 4192 case DSA_MECH_INFO_TYPE:
4193 4193 error = dca_dsa_sign(ctx, data, signature, req);
4194 4194 break;
4195 4195 default:
4196 4196 cmn_err(CE_WARN, "dca_sign: unexpected mech type "
4197 4197 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4198 4198 error = CRYPTO_MECHANISM_INVALID;
4199 4199 }
4200 4200
4201 4201 DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error);
4202 4202
4203 4203 return (error);
4204 4204 }
4205 4205
4206 4206 /* ARGSUSED */
4207 4207 static int
4208 4208 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data,
4209 4209 crypto_req_handle_t req)
4210 4210 {
4211 4211 int error = CRYPTO_MECHANISM_INVALID;
4212 4212 dca_t *softc;
4213 4213
4214 4214 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4215 4215 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4216 4216
4217 4217 softc = DCA_SOFTC_FROM_CTX(ctx);
4218 4218 DBG(softc, DENTRY, "dca_sign_update: started\n");
4219 4219
4220 4220 cmn_err(CE_WARN, "dca_sign_update: unexpected mech type "
4221 4221 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4222 4222
4223 4223 DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error);
4224 4224
4225 4225 return (error);
4226 4226 }
4227 4227
4228 4228 /* ARGSUSED */
4229 4229 static int
4230 4230 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4231 4231 crypto_req_handle_t req)
4232 4232 {
4233 4233 int error = CRYPTO_MECHANISM_INVALID;
4234 4234 dca_t *softc;
4235 4235
4236 4236 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4237 4237 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4238 4238
4239 4239 softc = DCA_SOFTC_FROM_CTX(ctx);
4240 4240 DBG(softc, DENTRY, "dca_sign_final: started\n");
4241 4241
4242 4242 cmn_err(CE_WARN, "dca_sign_final: unexpected mech type "
4243 4243 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4244 4244
4245 4245 DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error);
4246 4246
4247 4247 return (error);
4248 4248 }
4249 4249
4250 4250 static int
4251 4251 dca_sign_atomic(crypto_provider_handle_t provider,
4252 4252 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4253 4253 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4254 4254 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4255 4255 {
4256 4256 int error = CRYPTO_FAILED;
4257 4257 dca_t *softc = (dca_t *)provider;
4258 4258
4259 4259 DBG(softc, DENTRY, "dca_sign_atomic: started\n");
4260 4260
4261 4261 if (ctx_template != NULL)
4262 4262 return (CRYPTO_ARGUMENTS_BAD);
4263 4263
4264 4264 /* check mechanism */
4265 4265 switch (mechanism->cm_type) {
4266 4266 case RSA_PKCS_MECH_INFO_TYPE:
4267 4267 case RSA_X_509_MECH_INFO_TYPE:
4268 4268 error = dca_rsaatomic(provider, session_id, mechanism, key,
4269 4269 data, signature, KM_SLEEP, req, DCA_RSA_SIGN);
4270 4270 break;
4271 4271 case DSA_MECH_INFO_TYPE:
4272 4272 error = dca_dsaatomic(provider, session_id, mechanism, key,
4273 4273 data, signature, KM_SLEEP, req, DCA_DSA_SIGN);
4274 4274 break;
4275 4275 default:
4276 4276 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type "
4277 4277 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4278 4278 error = CRYPTO_MECHANISM_INVALID;
4279 4279 }
4280 4280
4281 4281 DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error);
4282 4282
4283 4283 return (error);
4284 4284 }
4285 4285
4286 4286 /* ARGSUSED */
4287 4287 static int
4288 4288 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4289 4289 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4290 4290 crypto_req_handle_t req)
4291 4291 {
4292 4292 int error = CRYPTO_FAILED;
4293 4293 dca_t *softc;
4294 4294
4295 4295 softc = DCA_SOFTC_FROM_CTX(ctx);
4296 4296 DBG(softc, DENTRY, "dca_sign_recover_init: started\n");
4297 4297
4298 4298 if (ctx_template != NULL)
4299 4299 return (CRYPTO_ARGUMENTS_BAD);
4300 4300
4301 4301 /* check mechanism */
4302 4302 switch (mechanism->cm_type) {
4303 4303 case RSA_PKCS_MECH_INFO_TYPE:
4304 4304 case RSA_X_509_MECH_INFO_TYPE:
4305 4305 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4306 4306 break;
4307 4307 default:
4308 4308 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type "
4309 4309 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4310 4310 error = CRYPTO_MECHANISM_INVALID;
4311 4311 }
4312 4312
4313 4313 DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error);
4314 4314
4315 4315 if (error == CRYPTO_SUCCESS)
4316 4316 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4317 4317 &softc->dca_ctx_list_lock);
4318 4318
4319 4319 return (error);
4320 4320 }
4321 4321
4322 4322 static int
4323 4323 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data,
4324 4324 crypto_data_t *signature, crypto_req_handle_t req)
4325 4325 {
4326 4326 int error = CRYPTO_FAILED;
4327 4327 dca_t *softc;
4328 4328
4329 4329 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4330 4330 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4331 4331
4332 4332 softc = DCA_SOFTC_FROM_CTX(ctx);
4333 4333 DBG(softc, DENTRY, "dca_sign_recover: started\n");
4334 4334
4335 4335 /* check mechanism */
4336 4336 switch (DCA_MECH_FROM_CTX(ctx)) {
4337 4337 case RSA_PKCS_MECH_INFO_TYPE:
4338 4338 case RSA_X_509_MECH_INFO_TYPE:
4339 4339 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR);
4340 4340 break;
4341 4341 default:
4342 4342 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type "
4343 4343 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4344 4344 error = CRYPTO_MECHANISM_INVALID;
4345 4345 }
4346 4346
4347 4347 DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error);
4348 4348
4349 4349 return (error);
4350 4350 }
4351 4351
4352 4352 static int
4353 4353 dca_sign_recover_atomic(crypto_provider_handle_t provider,
4354 4354 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4355 4355 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4356 4356 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4357 4357 {
4358 4358 int error = CRYPTO_FAILED;
4359 4359 dca_t *softc = (dca_t *)provider;
4360 4360
4361 4361 DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n");
4362 4362
4363 4363 if (ctx_template != NULL)
4364 4364 return (CRYPTO_ARGUMENTS_BAD);
4365 4365
4366 4366 /* check mechanism */
4367 4367 switch (mechanism->cm_type) {
4368 4368 case RSA_PKCS_MECH_INFO_TYPE:
4369 4369 case RSA_X_509_MECH_INFO_TYPE:
4370 4370 error = dca_rsaatomic(provider, session_id, mechanism, key,
4371 4371 data, signature, KM_SLEEP, req, DCA_RSA_SIGNR);
4372 4372 break;
4373 4373 default:
4374 4374 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type"
4375 4375 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4376 4376 error = CRYPTO_MECHANISM_INVALID;
4377 4377 }
4378 4378
4379 4379 DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error);
4380 4380
4381 4381 return (error);
4382 4382 }
4383 4383
4384 4384 /*
4385 4385 * Verify entry points.
4386 4386 */
4387 4387
4388 4388 /* ARGSUSED */
4389 4389 static int
4390 4390 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4391 4391 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4392 4392 crypto_req_handle_t req)
4393 4393 {
4394 4394 int error = CRYPTO_FAILED;
4395 4395 dca_t *softc;
4396 4396
4397 4397 softc = DCA_SOFTC_FROM_CTX(ctx);
4398 4398 DBG(softc, DENTRY, "dca_verify_init: started\n");
4399 4399
4400 4400 if (ctx_template != NULL)
4401 4401 return (CRYPTO_ARGUMENTS_BAD);
4402 4402
4403 4403 /* check mechanism */
4404 4404 switch (mechanism->cm_type) {
4405 4405 case RSA_PKCS_MECH_INFO_TYPE:
4406 4406 case RSA_X_509_MECH_INFO_TYPE:
4407 4407 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4408 4408 break;
4409 4409 case DSA_MECH_INFO_TYPE:
4410 4410 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4411 4411 DCA_DSA_VRFY);
4412 4412 break;
4413 4413 default:
4414 4414 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type "
4415 4415 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4416 4416 error = CRYPTO_MECHANISM_INVALID;
4417 4417 }
4418 4418
4419 4419 DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error);
4420 4420
4421 4421 if (error == CRYPTO_SUCCESS)
4422 4422 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4423 4423 &softc->dca_ctx_list_lock);
4424 4424
4425 4425 return (error);
4426 4426 }
4427 4427
4428 4428 static int
4429 4429 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature,
4430 4430 crypto_req_handle_t req)
4431 4431 {
4432 4432 int error = CRYPTO_FAILED;
4433 4433 dca_t *softc;
4434 4434
4435 4435 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4436 4436 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4437 4437
4438 4438 softc = DCA_SOFTC_FROM_CTX(ctx);
4439 4439 DBG(softc, DENTRY, "dca_verify: started\n");
4440 4440
4441 4441 /* check mechanism */
4442 4442 switch (DCA_MECH_FROM_CTX(ctx)) {
4443 4443 case RSA_PKCS_MECH_INFO_TYPE:
4444 4444 case RSA_X_509_MECH_INFO_TYPE:
4445 4445 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY);
4446 4446 break;
4447 4447 case DSA_MECH_INFO_TYPE:
4448 4448 error = dca_dsa_verify(ctx, data, signature, req);
4449 4449 break;
4450 4450 default:
4451 4451 cmn_err(CE_WARN, "dca_verify: unexpected mech type "
4452 4452 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4453 4453 error = CRYPTO_MECHANISM_INVALID;
4454 4454 }
4455 4455
4456 4456 DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error);
4457 4457
4458 4458 return (error);
4459 4459 }
4460 4460
4461 4461 /* ARGSUSED */
4462 4462 static int
4463 4463 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data,
4464 4464 crypto_req_handle_t req)
4465 4465 {
4466 4466 int error = CRYPTO_MECHANISM_INVALID;
4467 4467 dca_t *softc;
4468 4468
4469 4469 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4470 4470 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4471 4471
4472 4472 softc = DCA_SOFTC_FROM_CTX(ctx);
4473 4473 DBG(softc, DENTRY, "dca_verify_update: started\n");
4474 4474
4475 4475 cmn_err(CE_WARN, "dca_verify_update: unexpected mech type "
4476 4476 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4477 4477
4478 4478 DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error);
4479 4479
4480 4480 return (error);
4481 4481 }
4482 4482
4483 4483 /* ARGSUSED */
4484 4484 static int
4485 4485 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4486 4486 crypto_req_handle_t req)
4487 4487 {
4488 4488 int error = CRYPTO_MECHANISM_INVALID;
4489 4489 dca_t *softc;
4490 4490
4491 4491 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4492 4492 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4493 4493
4494 4494 softc = DCA_SOFTC_FROM_CTX(ctx);
4495 4495 DBG(softc, DENTRY, "dca_verify_final: started\n");
4496 4496
4497 4497 cmn_err(CE_WARN, "dca_verify_final: unexpected mech type "
4498 4498 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4499 4499
4500 4500 DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error);
4501 4501
4502 4502 return (error);
4503 4503 }
4504 4504
4505 4505 static int
4506 4506 dca_verify_atomic(crypto_provider_handle_t provider,
4507 4507 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4508 4508 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4509 4509 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4510 4510 {
4511 4511 int error = CRYPTO_FAILED;
4512 4512 dca_t *softc = (dca_t *)provider;
4513 4513
4514 4514 DBG(softc, DENTRY, "dca_verify_atomic: started\n");
4515 4515
4516 4516 if (ctx_template != NULL)
4517 4517 return (CRYPTO_ARGUMENTS_BAD);
4518 4518
4519 4519 /* check mechanism */
4520 4520 switch (mechanism->cm_type) {
4521 4521 case RSA_PKCS_MECH_INFO_TYPE:
4522 4522 case RSA_X_509_MECH_INFO_TYPE:
4523 4523 error = dca_rsaatomic(provider, session_id, mechanism, key,
4524 4524 signature, data, KM_SLEEP, req, DCA_RSA_VRFY);
4525 4525 break;
4526 4526 case DSA_MECH_INFO_TYPE:
4527 4527 error = dca_dsaatomic(provider, session_id, mechanism, key,
4528 4528 data, signature, KM_SLEEP, req, DCA_DSA_VRFY);
4529 4529 break;
4530 4530 default:
4531 4531 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type "
4532 4532 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4533 4533 error = CRYPTO_MECHANISM_INVALID;
4534 4534 }
4535 4535
4536 4536 DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error);
4537 4537
4538 4538 return (error);
4539 4539 }
4540 4540
4541 4541 /* ARGSUSED */
4542 4542 static int
4543 4543 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4544 4544 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4545 4545 crypto_req_handle_t req)
4546 4546 {
4547 4547 int error = CRYPTO_MECHANISM_INVALID;
4548 4548 dca_t *softc;
4549 4549
4550 4550 softc = DCA_SOFTC_FROM_CTX(ctx);
4551 4551 DBG(softc, DENTRY, "dca_verify_recover_init: started\n");
4552 4552
4553 4553 if (ctx_template != NULL)
4554 4554 return (CRYPTO_ARGUMENTS_BAD);
4555 4555
4556 4556 /* check mechanism */
4557 4557 switch (mechanism->cm_type) {
4558 4558 case RSA_PKCS_MECH_INFO_TYPE:
4559 4559 case RSA_X_509_MECH_INFO_TYPE:
4560 4560 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4561 4561 break;
4562 4562 default:
4563 4563 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type"
4564 4564 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4565 4565 }
4566 4566
4567 4567 DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error);
4568 4568
4569 4569 if (error == CRYPTO_SUCCESS)
4570 4570 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4571 4571 &softc->dca_ctx_list_lock);
4572 4572
4573 4573 return (error);
4574 4574 }
4575 4575
4576 4576 static int
4577 4577 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature,
4578 4578 crypto_data_t *data, crypto_req_handle_t req)
4579 4579 {
4580 4580 int error = CRYPTO_MECHANISM_INVALID;
4581 4581 dca_t *softc;
4582 4582
4583 4583 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4584 4584 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4585 4585
4586 4586 softc = DCA_SOFTC_FROM_CTX(ctx);
4587 4587 DBG(softc, DENTRY, "dca_verify_recover: started\n");
4588 4588
4589 4589 /* check mechanism */
4590 4590 switch (DCA_MECH_FROM_CTX(ctx)) {
4591 4591 case RSA_PKCS_MECH_INFO_TYPE:
4592 4592 case RSA_X_509_MECH_INFO_TYPE:
4593 4593 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR);
4594 4594 break;
4595 4595 default:
4596 4596 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type "
4597 4597 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4598 4598 }
4599 4599
4600 4600 DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error);
4601 4601
4602 4602 return (error);
4603 4603 }
4604 4604
4605 4605 static int
4606 4606 dca_verify_recover_atomic(crypto_provider_handle_t provider,
4607 4607 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4608 4608 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4609 4609 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4610 4610 {
4611 4611 int error = CRYPTO_MECHANISM_INVALID;
4612 4612 dca_t *softc = (dca_t *)provider;
4613 4613
4614 4614 DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n");
4615 4615
4616 4616 if (ctx_template != NULL)
4617 4617 return (CRYPTO_ARGUMENTS_BAD);
4618 4618
4619 4619 /* check mechanism */
4620 4620 switch (mechanism->cm_type) {
4621 4621 case RSA_PKCS_MECH_INFO_TYPE:
4622 4622 case RSA_X_509_MECH_INFO_TYPE:
4623 4623 error = dca_rsaatomic(provider, session_id, mechanism, key,
4624 4624 signature, data, KM_SLEEP, req, DCA_RSA_VRFYR);
4625 4625 break;
4626 4626 default:
4627 4627 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech "
4628 4628 "type 0x%llx\n", (unsigned long long)mechanism->cm_type);
4629 4629 error = CRYPTO_MECHANISM_INVALID;
4630 4630 }
4631 4631
4632 4632 DBG(softc, DENTRY,
4633 4633 "dca_verify_recover_atomic: done, err = 0x%x", error);
4634 4634
4635 4635 return (error);
4636 4636 }
4637 4637
4638 4638 /*
4639 4639 * Random number entry points.
4640 4640 */
4641 4641
4642 4642 /* ARGSUSED */
4643 4643 static int
4644 4644 dca_generate_random(crypto_provider_handle_t provider,
4645 4645 crypto_session_id_t session_id,
4646 4646 uchar_t *buf, size_t len, crypto_req_handle_t req)
4647 4647 {
4648 4648 int error = CRYPTO_FAILED;
4649 4649 dca_t *softc = (dca_t *)provider;
4650 4650
4651 4651 DBG(softc, DENTRY, "dca_generate_random: started");
4652 4652
4653 4653 error = dca_rng(softc, buf, len, req);
4654 4654
4655 4655 DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error);
4656 4656
4657 4657 return (error);
4658 4658 }
4659 4659
4660 4660 /*
4661 4661 * Context management entry points.
4662 4662 */
4663 4663
4664 4664 int
4665 4665 dca_free_context(crypto_ctx_t *ctx)
4666 4666 {
4667 4667 int error = CRYPTO_SUCCESS;
4668 4668 dca_t *softc;
4669 4669
4670 4670 softc = DCA_SOFTC_FROM_CTX(ctx);
4671 4671 DBG(softc, DENTRY, "dca_free_context: entered");
4672 4672
4673 4673 if (ctx->cc_provider_private == NULL)
4674 4674 return (error);
4675 4675
4676 4676 dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock);
4677 4677
4678 4678 error = dca_free_context_low(ctx);
4679 4679
4680 4680 DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error);
4681 4681
4682 4682 return (error);
4683 4683 }
4684 4684
4685 4685 static int
4686 4686 dca_free_context_low(crypto_ctx_t *ctx)
4687 4687 {
4688 4688 int error = CRYPTO_SUCCESS;
4689 4689
4690 4690 /* check mechanism */
4691 4691 switch (DCA_MECH_FROM_CTX(ctx)) {
4692 4692 case DES_CBC_MECH_INFO_TYPE:
4693 4693 case DES3_CBC_MECH_INFO_TYPE:
4694 4694 dca_3desctxfree(ctx);
4695 4695 break;
4696 4696 case RSA_PKCS_MECH_INFO_TYPE:
4697 4697 case RSA_X_509_MECH_INFO_TYPE:
4698 4698 dca_rsactxfree(ctx);
4699 4699 break;
4700 4700 case DSA_MECH_INFO_TYPE:
4701 4701 dca_dsactxfree(ctx);
4702 4702 break;
4703 4703 default:
4704 4704 /* Should never reach here */
4705 4705 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type "
4706 4706 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4707 4707 error = CRYPTO_MECHANISM_INVALID;
4708 4708 }
4709 4709
4710 4710 return (error);
4711 4711 }
4712 4712
4713 4713
4714 4714 /* Free any unfreed private context. It is called in detach. */
4715 4715 static void
4716 4716 dca_free_context_list(dca_t *dca)
4717 4717 {
4718 4718 dca_listnode_t *node;
4719 4719 crypto_ctx_t ctx;
4720 4720
4721 4721 (void) memset(&ctx, 0, sizeof (ctx));
4722 4722 ctx.cc_provider = dca;
4723 4723
4724 4724 while ((node = dca_delist2(&dca->dca_ctx_list,
4725 4725 &dca->dca_ctx_list_lock)) != NULL) {
4726 4726 ctx.cc_provider_private = node;
4727 4727 (void) dca_free_context_low(&ctx);
4728 4728 }
4729 4729 }
4730 4730
4731 4731 static int
4732 4732 ext_info_sym(crypto_provider_handle_t prov,
4733 4733 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4734 4734 {
4735 4735 return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM));
4736 4736 }
4737 4737
4738 4738 static int
4739 4739 ext_info_asym(crypto_provider_handle_t prov,
4740 4740 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4741 4741 {
4742 4742 int rv;
4743 4743
4744 4744 rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM);
4745 4745 /* The asymmetric cipher slot supports random */
4746 4746 ext_info->ei_flags |= CRYPTO_EXTF_RNG;
4747 4747
4748 4748 return (rv);
4749 4749 }
4750 4750
4751 4751 /* ARGSUSED */
4752 4752 static int
4753 4753 ext_info_base(crypto_provider_handle_t prov,
4754 4754 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id)
4755 4755 {
4756 4756 dca_t *dca = (dca_t *)prov;
4757 4757 int len;
4758 4758
4759 4759 /* Label */
4760 4760 (void) sprintf((char *)ext_info->ei_label, "%s/%d %s",
4761 4761 ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id);
4762 4762 len = strlen((char *)ext_info->ei_label);
4763 4763 (void) memset(ext_info->ei_label + len, ' ',
4764 4764 CRYPTO_EXT_SIZE_LABEL - len);
4765 4765
4766 4766 /* Manufacturer ID */
4767 4767 (void) sprintf((char *)ext_info->ei_manufacturerID, "%s",
4768 4768 DCA_MANUFACTURER_ID);
4769 4769 len = strlen((char *)ext_info->ei_manufacturerID);
4770 4770 (void) memset(ext_info->ei_manufacturerID + len, ' ',
4771 4771 CRYPTO_EXT_SIZE_MANUF - len);
4772 4772
4773 4773 /* Model */
4774 4774 (void) sprintf((char *)ext_info->ei_model, dca->dca_model);
4775 4775
4776 4776 DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model);
4777 4777
4778 4778 len = strlen((char *)ext_info->ei_model);
4779 4779 (void) memset(ext_info->ei_model + len, ' ',
4780 4780 CRYPTO_EXT_SIZE_MODEL - len);
4781 4781
4782 4782 /* Serial Number. Blank for Deimos */
4783 4783 (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL);
4784 4784
4785 4785 ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED;
4786 4786
4787 4787 ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO;
4788 4788 ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO;
4789 4789 ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO;
4790 4790 ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO;
4791 4791 ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO;
4792 4792 ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO;
4793 4793 ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO;
4794 4794 ext_info->ei_hardware_version.cv_major = 0;
4795 4795 ext_info->ei_hardware_version.cv_minor = 0;
4796 4796 ext_info->ei_firmware_version.cv_major = 0;
4797 4797 ext_info->ei_firmware_version.cv_minor = 0;
4798 4798
4799 4799 /* Time. No need to be supplied for token without a clock */
4800 4800 ext_info->ei_time[0] = '\000';
4801 4801
4802 4802 return (CRYPTO_SUCCESS);
4803 4803 }
4804 4804
4805 4805 static void
4806 4806 dca_fma_init(dca_t *dca)
4807 4807 {
4808 4808 ddi_iblock_cookie_t fm_ibc;
4809 4809 int fm_capabilities = DDI_FM_EREPORT_CAPABLE |
4810 4810 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
4811 4811 DDI_FM_ERRCB_CAPABLE;
4812 4812
4813 4813 /* Read FMA capabilities from dca.conf file (if present) */
4814 4814 dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip,
4815 4815 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
4816 4816 fm_capabilities);
4817 4817
4818 4818 DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities);
4819 4819
4820 4820 /* Only register with IO Fault Services if we have some capability */
4821 4821 if (dca->fm_capabilities) {
4822 4822 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC;
4823 4823 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR;
4824 4824
4825 4825 /* Register capabilities with IO Fault Services */
4826 4826 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc);
4827 4827 DBG(dca, DWARN, "fm_capable() = 0x%x",
4828 4828 ddi_fm_capable(dca->dca_dip));
4829 4829
4830 4830 /*
4831 4831 * Initialize pci ereport capabilities if ereport capable
4832 4832 */
4833 4833 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4834 4834 DDI_FM_ERRCB_CAP(dca->fm_capabilities))
4835 4835 pci_ereport_setup(dca->dca_dip);
4836 4836
4837 4837 /*
4838 4838 * Initialize callback mutex and register error callback if
4839 4839 * error callback capable.
4840 4840 */
4841 4841 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4842 4842 ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb,
4843 4843 (void *)dca);
4844 4844 }
4845 4845 } else {
4846 4846 /*
4847 4847 * These fields have to be cleared of FMA if there are no
4848 4848 * FMA capabilities at runtime.
4849 4849 */
4850 4850 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC;
4851 4851 dca_dmaattr.dma_attr_flags = 0;
4852 4852 }
4853 4853 }
4854 4854
4855 4855
4856 4856 static void
4857 4857 dca_fma_fini(dca_t *dca)
4858 4858 {
4859 4859 /* Only unregister FMA capabilities if we registered some */
4860 4860 if (dca->fm_capabilities) {
4861 4861
4862 4862 /*
4863 4863 * Release any resources allocated by pci_ereport_setup()
4864 4864 */
4865 4865 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4866 4866 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4867 4867 pci_ereport_teardown(dca->dca_dip);
4868 4868 }
4869 4869
4870 4870 /*
4871 4871 * Free callback mutex and un-register error callback if
4872 4872 * error callback capable.
4873 4873 */
4874 4874 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4875 4875 ddi_fm_handler_unregister(dca->dca_dip);
4876 4876 }
4877 4877
4878 4878 /* Unregister from IO Fault Services */
4879 4879 ddi_fm_fini(dca->dca_dip);
4880 4880 DBG(dca, DWARN, "fm_capable() = 0x%x",
4881 4881 ddi_fm_capable(dca->dca_dip));
4882 4882 }
4883 4883 }
4884 4884
4885 4885
4886 4886 /*
4887 4887 * The IO fault service error handling callback function
4888 4888 */
4889 4889 /*ARGSUSED*/
4890 4890 static int
4891 4891 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4892 4892 {
4893 4893 dca_t *dca = (dca_t *)impl_data;
4894 4894
4895 4895 pci_ereport_post(dip, err, NULL);
4896 4896 if (err->fme_status == DDI_FM_FATAL) {
4897 4897 dca_failure(dca, DDI_DATAPATH_FAULT,
4898 4898 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
4899 4899 "fault PCI in FMA callback.");
4900 4900 }
4901 4901 return (err->fme_status);
4902 4902 }
4903 4903
4904 4904
4905 4905 static int
4906 4906 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
4907 4907 dca_fma_eclass_t eclass_index)
4908 4908 {
4909 4909 ddi_fm_error_t de;
4910 4910 int version = 0;
4911 4911
4912 4912 ddi_fm_acc_err_get(handle, &de, version);
4913 4913 if (de.fme_status != DDI_FM_OK) {
4914 4914 dca_failure(dca, DDI_DATAPATH_FAULT,
4915 4915 eclass_index, fm_ena_increment(de.fme_ena),
4916 4916 CRYPTO_DEVICE_ERROR, "");
4917 4917 return (DDI_FAILURE);
4918 4918 }
4919 4919
4920 4920 return (DDI_SUCCESS);
4921 4921 }
4922 4922
4923 4923 int
4924 4924 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle,
4925 4925 dca_fma_eclass_t eclass_index)
4926 4926 {
4927 4927 ddi_fm_error_t de;
4928 4928 int version = 0;
4929 4929
4930 4930 ddi_fm_dma_err_get(handle, &de, version);
4931 4931 if (de.fme_status != DDI_FM_OK) {
4932 4932 dca_failure(dca, DDI_DATAPATH_FAULT,
4933 4933 eclass_index, fm_ena_increment(de.fme_ena),
4934 4934 CRYPTO_DEVICE_ERROR, "");
4935 4935 return (DDI_FAILURE);
4936 4936 }
4937 4937 return (DDI_SUCCESS);
4938 4938 }
4939 4939
4940 4940 static uint64_t
4941 4941 dca_ena(uint64_t ena)
4942 4942 {
4943 4943 if (ena == 0)
4944 4944 ena = fm_ena_generate(0, FM_ENA_FMT1);
4945 4945 else
4946 4946 ena = fm_ena_increment(ena);
4947 4947 return (ena);
4948 4948 }
4949 4949
4950 4950 static char *
4951 4951 dca_fma_eclass_string(char *model, dca_fma_eclass_t index)
4952 4952 {
4953 4953 if (strstr(model, "500"))
4954 4954 return (dca_fma_eclass_sca500[index]);
4955 4955 else
4956 4956 return (dca_fma_eclass_sca1000[index]);
4957 4957 }
↓ open down ↓ |
4533 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX