Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / crypto / io / dca.c
blob9df70d126a27b85f5245902d54e38b4a86283c69
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
29 * Deimos - cryptographic acceleration based upon Broadcom 582x.
32 #include <sys/types.h>
33 #include <sys/modctl.h>
34 #include <sys/conf.h>
35 #include <sys/devops.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/cmn_err.h>
39 #include <sys/varargs.h>
40 #include <sys/file.h>
41 #include <sys/stat.h>
42 #include <sys/kmem.h>
43 #include <sys/ioccom.h>
44 #include <sys/open.h>
45 #include <sys/cred.h>
46 #include <sys/kstat.h>
47 #include <sys/strsun.h>
48 #include <sys/note.h>
49 #include <sys/crypto/common.h>
50 #include <sys/crypto/spi.h>
51 #include <sys/ddifm.h>
52 #include <sys/fm/protocol.h>
53 #include <sys/fm/util.h>
54 #include <sys/fm/io/ddi.h>
55 #include <sys/crypto/dca.h>
58 * Core Deimos driver.
61 static void dca_enlist2(dca_listnode_t *, dca_listnode_t *,
62 kmutex_t *);
63 static void dca_rmlist2(dca_listnode_t *node, kmutex_t *);
64 static dca_listnode_t *dca_delist2(dca_listnode_t *q, kmutex_t *);
65 static void dca_free_context_list(dca_t *dca);
66 static int dca_free_context_low(crypto_ctx_t *ctx);
67 static int dca_attach(dev_info_t *, ddi_attach_cmd_t);
68 static int dca_detach(dev_info_t *, ddi_detach_cmd_t);
69 static int dca_suspend(dca_t *);
70 static int dca_resume(dca_t *);
71 static int dca_init(dca_t *);
72 static int dca_reset(dca_t *, int);
73 static int dca_initworklist(dca_t *, dca_worklist_t *);
74 static void dca_uninit(dca_t *);
75 static void dca_initq(dca_listnode_t *);
76 static void dca_enqueue(dca_listnode_t *, dca_listnode_t *);
77 static dca_listnode_t *dca_dequeue(dca_listnode_t *);
78 static dca_listnode_t *dca_unqueue(dca_listnode_t *);
79 static dca_request_t *dca_newreq(dca_t *);
80 static dca_work_t *dca_getwork(dca_t *, int);
81 static void dca_freework(dca_work_t *);
82 static dca_work_t *dca_newwork(dca_t *);
83 static void dca_destroywork(dca_work_t *);
84 static void dca_schedule(dca_t *, int);
85 static void dca_reclaim(dca_t *, int);
86 static uint_t dca_intr(char *);
87 static void dca_failure(dca_t *, ddi_fault_location_t,
88 dca_fma_eclass_t index, uint64_t, int, char *, ...);
89 static void dca_jobtimeout(void *);
90 static int dca_drain(dca_t *);
91 static void dca_undrain(dca_t *);
92 static void dca_rejectjobs(dca_t *);
94 #ifdef SCHEDDELAY
95 static void dca_schedtimeout(void *);
96 #endif
99 * We want these inlined for performance.
101 #ifndef DEBUG
102 #pragma inline(dca_freereq, dca_getreq, dca_freework, dca_getwork)
103 #pragma inline(dca_enqueue, dca_dequeue, dca_rmqueue, dca_done)
104 #pragma inline(dca_reverse, dca_length)
105 #endif
108 * Device operations.
110 static struct dev_ops devops = {
111 DEVO_REV, /* devo_rev */
112 0, /* devo_refcnt */
113 nodev, /* devo_getinfo */
114 nulldev, /* devo_identify */
115 nulldev, /* devo_probe */
116 dca_attach, /* devo_attach */
117 dca_detach, /* devo_detach */
118 nodev, /* devo_reset */
119 NULL, /* devo_cb_ops */
120 NULL, /* devo_bus_ops */
121 ddi_power, /* devo_power */
122 ddi_quiesce_not_supported, /* devo_quiesce */
125 #define IDENT "PCI Crypto Accelerator"
126 #define IDENT_SYM "Crypto Accel Sym 2.0"
127 #define IDENT_ASYM "Crypto Accel Asym 2.0"
129 /* Space-padded, will be filled in dynamically during registration */
130 #define IDENT3 "PCI Crypto Accelerator Mod 2.0"
132 #define VENDOR "Sun Microsystems, Inc."
134 #define STALETIME (30 * SECOND)
136 #define crypto_prov_notify crypto_provider_notification
137 /* A 28 char function name doesn't leave much line space */
140 * Module linkage.
142 static struct modldrv modldrv = {
143 &mod_driverops, /* drv_modops */
144 IDENT, /* drv_linkinfo */
145 &devops, /* drv_dev_ops */
148 extern struct mod_ops mod_cryptoops;
150 static struct modlcrypto modlcrypto = {
151 &mod_cryptoops,
152 IDENT3
155 static struct modlinkage modlinkage = {
156 MODREV_1, /* ml_rev */
157 &modldrv, /* ml_linkage */
158 &modlcrypto,
159 NULL
163 * CSPI information (entry points, provider info, etc.)
166 /* Mechanisms for the symmetric cipher provider */
167 static crypto_mech_info_t dca_mech_info_tab1[] = {
168 /* DES-CBC */
169 {SUN_CKM_DES_CBC, DES_CBC_MECH_INFO_TYPE,
170 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
171 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
172 DES_KEY_LEN, DES_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
173 /* 3DES-CBC */
174 {SUN_CKM_DES3_CBC, DES3_CBC_MECH_INFO_TYPE,
175 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT |
176 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC,
177 DES3_MIN_KEY_LEN, DES3_MAX_KEY_LEN, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
180 /* Mechanisms for the asymmetric cipher provider */
181 static crypto_mech_info_t dca_mech_info_tab2[] = {
182 /* DSA */
183 {SUN_CKM_DSA, DSA_MECH_INFO_TYPE,
184 CRYPTO_FG_SIGN | CRYPTO_FG_VERIFY |
185 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_VERIFY_ATOMIC,
186 CRYPTO_BYTES2BITS(DSA_MIN_KEY_LEN),
187 CRYPTO_BYTES2BITS(DSA_MAX_KEY_LEN),
188 CRYPTO_KEYSIZE_UNIT_IN_BITS},
190 /* RSA */
191 {SUN_CKM_RSA_X_509, RSA_X_509_MECH_INFO_TYPE,
192 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
193 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
194 CRYPTO_FG_VERIFY_RECOVER |
195 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
196 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
197 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
198 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
199 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
200 CRYPTO_KEYSIZE_UNIT_IN_BITS},
201 {SUN_CKM_RSA_PKCS, RSA_PKCS_MECH_INFO_TYPE,
202 CRYPTO_FG_ENCRYPT | CRYPTO_FG_DECRYPT | CRYPTO_FG_SIGN |
203 CRYPTO_FG_SIGN_RECOVER | CRYPTO_FG_VERIFY |
204 CRYPTO_FG_VERIFY_RECOVER |
205 CRYPTO_FG_ENCRYPT_ATOMIC | CRYPTO_FG_DECRYPT_ATOMIC |
206 CRYPTO_FG_SIGN_ATOMIC | CRYPTO_FG_SIGN_RECOVER_ATOMIC |
207 CRYPTO_FG_VERIFY_ATOMIC | CRYPTO_FG_VERIFY_RECOVER_ATOMIC,
208 CRYPTO_BYTES2BITS(RSA_MIN_KEY_LEN),
209 CRYPTO_BYTES2BITS(RSA_MAX_KEY_LEN),
210 CRYPTO_KEYSIZE_UNIT_IN_BITS}
213 static void dca_provider_status(crypto_provider_handle_t, uint_t *);
215 static crypto_control_ops_t dca_control_ops = {
216 dca_provider_status
219 static int dca_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
220 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
221 static int dca_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
222 crypto_req_handle_t);
223 static int dca_encrypt_update(crypto_ctx_t *, crypto_data_t *,
224 crypto_data_t *, crypto_req_handle_t);
225 static int dca_encrypt_final(crypto_ctx_t *, crypto_data_t *,
226 crypto_req_handle_t);
227 static int dca_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
228 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
229 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
231 static int dca_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
232 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
233 static int dca_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
234 crypto_req_handle_t);
235 static int dca_decrypt_update(crypto_ctx_t *, crypto_data_t *,
236 crypto_data_t *, crypto_req_handle_t);
237 static int dca_decrypt_final(crypto_ctx_t *, crypto_data_t *,
238 crypto_req_handle_t);
239 static int dca_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
240 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
241 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
243 static crypto_cipher_ops_t dca_cipher_ops = {
244 dca_encrypt_init,
245 dca_encrypt,
246 dca_encrypt_update,
247 dca_encrypt_final,
248 dca_encrypt_atomic,
249 dca_decrypt_init,
250 dca_decrypt,
251 dca_decrypt_update,
252 dca_decrypt_final,
253 dca_decrypt_atomic
256 static int dca_sign_init(crypto_ctx_t *, crypto_mechanism_t *, crypto_key_t *,
257 crypto_spi_ctx_template_t, crypto_req_handle_t);
258 static int dca_sign(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
259 crypto_req_handle_t);
260 static int dca_sign_update(crypto_ctx_t *, crypto_data_t *,
261 crypto_req_handle_t);
262 static int dca_sign_final(crypto_ctx_t *, crypto_data_t *,
263 crypto_req_handle_t);
264 static int dca_sign_atomic(crypto_provider_handle_t, crypto_session_id_t,
265 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
266 crypto_spi_ctx_template_t, crypto_req_handle_t);
267 static int dca_sign_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
268 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
269 static int dca_sign_recover(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
270 crypto_req_handle_t);
271 static int dca_sign_recover_atomic(crypto_provider_handle_t,
272 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
273 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
275 static crypto_sign_ops_t dca_sign_ops = {
276 dca_sign_init,
277 dca_sign,
278 dca_sign_update,
279 dca_sign_final,
280 dca_sign_atomic,
281 dca_sign_recover_init,
282 dca_sign_recover,
283 dca_sign_recover_atomic
286 static int dca_verify_init(crypto_ctx_t *, crypto_mechanism_t *,
287 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
288 static int dca_verify(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
289 crypto_req_handle_t);
290 static int dca_verify_update(crypto_ctx_t *, crypto_data_t *,
291 crypto_req_handle_t);
292 static int dca_verify_final(crypto_ctx_t *, crypto_data_t *,
293 crypto_req_handle_t);
294 static int dca_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
295 crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
296 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
297 static int dca_verify_recover_init(crypto_ctx_t *, crypto_mechanism_t *,
298 crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
299 static int dca_verify_recover(crypto_ctx_t *, crypto_data_t *,
300 crypto_data_t *, crypto_req_handle_t);
301 static int dca_verify_recover_atomic(crypto_provider_handle_t,
302 crypto_session_id_t, crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
303 crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
305 static crypto_verify_ops_t dca_verify_ops = {
306 dca_verify_init,
307 dca_verify,
308 dca_verify_update,
309 dca_verify_final,
310 dca_verify_atomic,
311 dca_verify_recover_init,
312 dca_verify_recover,
313 dca_verify_recover_atomic
316 static int dca_generate_random(crypto_provider_handle_t, crypto_session_id_t,
317 uchar_t *, size_t, crypto_req_handle_t);
319 static crypto_random_number_ops_t dca_random_number_ops = {
320 NULL,
321 dca_generate_random
324 static int ext_info_sym(crypto_provider_handle_t prov,
325 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
326 static int ext_info_asym(crypto_provider_handle_t prov,
327 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq);
328 static int ext_info_base(crypto_provider_handle_t prov,
329 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id);
331 static crypto_provider_management_ops_t dca_provmanage_ops_1 = {
332 ext_info_sym, /* ext_info */
333 NULL, /* init_token */
334 NULL, /* init_pin */
335 NULL /* set_pin */
338 static crypto_provider_management_ops_t dca_provmanage_ops_2 = {
339 ext_info_asym, /* ext_info */
340 NULL, /* init_token */
341 NULL, /* init_pin */
342 NULL /* set_pin */
345 int dca_free_context(crypto_ctx_t *);
347 static crypto_ctx_ops_t dca_ctx_ops = {
348 NULL,
349 dca_free_context
352 /* Operations for the symmetric cipher provider */
353 static crypto_ops_t dca_crypto_ops1 = {
354 &dca_control_ops,
355 NULL, /* digest_ops */
356 &dca_cipher_ops,
357 NULL, /* mac_ops */
358 NULL, /* sign_ops */
359 NULL, /* verify_ops */
360 NULL, /* dual_ops */
361 NULL, /* cipher_mac_ops */
362 NULL, /* random_number_ops */
363 NULL, /* session_ops */
364 NULL, /* object_ops */
365 NULL, /* key_ops */
366 &dca_provmanage_ops_1, /* management_ops */
367 &dca_ctx_ops
370 /* Operations for the asymmetric cipher provider */
371 static crypto_ops_t dca_crypto_ops2 = {
372 &dca_control_ops,
373 NULL, /* digest_ops */
374 &dca_cipher_ops,
375 NULL, /* mac_ops */
376 &dca_sign_ops,
377 &dca_verify_ops,
378 NULL, /* dual_ops */
379 NULL, /* cipher_mac_ops */
380 &dca_random_number_ops,
381 NULL, /* session_ops */
382 NULL, /* object_ops */
383 NULL, /* key_ops */
384 &dca_provmanage_ops_2, /* management_ops */
385 &dca_ctx_ops
388 /* Provider information for the symmetric cipher provider */
389 static crypto_provider_info_t dca_prov_info1 = {
390 CRYPTO_SPI_VERSION_1,
391 NULL, /* pi_provider_description */
392 CRYPTO_HW_PROVIDER,
393 NULL, /* pi_provider_dev */
394 NULL, /* pi_provider_handle */
395 &dca_crypto_ops1,
396 sizeof (dca_mech_info_tab1)/sizeof (crypto_mech_info_t),
397 dca_mech_info_tab1,
398 0, /* pi_logical_provider_count */
399 NULL /* pi_logical_providers */
402 /* Provider information for the asymmetric cipher provider */
403 static crypto_provider_info_t dca_prov_info2 = {
404 CRYPTO_SPI_VERSION_1,
405 NULL, /* pi_provider_description */
406 CRYPTO_HW_PROVIDER,
407 NULL, /* pi_provider_dev */
408 NULL, /* pi_provider_handle */
409 &dca_crypto_ops2,
410 sizeof (dca_mech_info_tab2)/sizeof (crypto_mech_info_t),
411 dca_mech_info_tab2,
412 0, /* pi_logical_provider_count */
413 NULL /* pi_logical_providers */
416 /* Convenience macros */
417 #define DCA_SOFTC_FROM_CTX(ctx) ((dca_t *)(ctx)->cc_provider)
418 #define DCA_MECH_FROM_CTX(ctx) \
419 (((dca_request_t *)(ctx)->cc_provider_private)->dr_ctx.ctx_cm_type)
421 static int dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
422 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
423 dca_chain_t *head, int *n_chain);
424 static uint64_t dca_ena(uint64_t ena);
425 static caddr_t dca_bufdaddr_out(crypto_data_t *data);
426 static char *dca_fma_eclass_string(char *model, dca_fma_eclass_t index);
427 static int dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
428 dca_fma_eclass_t eclass_index);
430 static void dca_fma_init(dca_t *dca);
431 static void dca_fma_fini(dca_t *dca);
432 static int dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
433 const void *impl_data);
436 static dca_device_t dca_devices[] = {
437 /* Broadcom vanilla variants */
438 { 0x14e4, 0x5820, "Broadcom 5820" },
439 { 0x14e4, 0x5821, "Broadcom 5821" },
440 { 0x14e4, 0x5822, "Broadcom 5822" },
441 { 0x14e4, 0x5825, "Broadcom 5825" },
442 /* Sun specific OEMd variants */
443 { 0x108e, 0x5454, "SCA" },
444 { 0x108e, 0x5455, "SCA 1000" },
445 { 0x108e, 0x5457, "SCA 500" },
446 /* subsysid should be 0x5457, but got 0x1 from HW. Assume both here. */
447 { 0x108e, 0x1, "SCA 500" },
451 * Device attributes.
453 static struct ddi_device_acc_attr dca_regsattr = {
454 DDI_DEVICE_ATTR_V1,
455 DDI_STRUCTURE_LE_ACC,
456 DDI_STRICTORDER_ACC,
457 DDI_FLAGERR_ACC
460 static struct ddi_device_acc_attr dca_devattr = {
461 DDI_DEVICE_ATTR_V0,
462 DDI_STRUCTURE_LE_ACC,
463 DDI_STRICTORDER_ACC
466 #if !defined(i386) && !defined(__i386)
467 static struct ddi_device_acc_attr dca_bufattr = {
468 DDI_DEVICE_ATTR_V0,
469 DDI_NEVERSWAP_ACC,
470 DDI_STRICTORDER_ACC
472 #endif
474 static struct ddi_dma_attr dca_dmaattr = {
475 DMA_ATTR_V0, /* dma_attr_version */
476 0x0, /* dma_attr_addr_lo */
477 0xffffffffUL, /* dma_attr_addr_hi */
478 0x00ffffffUL, /* dma_attr_count_max */
479 0x40, /* dma_attr_align */
480 0x40, /* dma_attr_burstsizes */
481 0x1, /* dma_attr_minxfer */
482 0x00ffffffUL, /* dma_attr_maxxfer */
483 0xffffffffUL, /* dma_attr_seg */
484 #if defined(i386) || defined(__i386) || defined(__amd64)
485 512, /* dma_attr_sgllen */
486 #else
487 1, /* dma_attr_sgllen */
488 #endif
489 1, /* dma_attr_granular */
490 DDI_DMA_FLAGERR /* dma_attr_flags */
493 static void *dca_state = NULL;
494 int dca_mindma = 2500;
497 * FMA eclass string definitions. Note that these string arrays must be
498 * consistent with the dca_fma_eclass_t enum.
500 static char *dca_fma_eclass_sca1000[] = {
501 "sca1000.hw.device",
502 "sca1000.hw.timeout",
503 "sca1000.none"
506 static char *dca_fma_eclass_sca500[] = {
507 "sca500.hw.device",
508 "sca500.hw.timeout",
509 "sca500.none"
513 * DDI entry points.
516 _init(void)
518 int rv;
520 DBG(NULL, DMOD, "dca: in _init");
522 if ((rv = ddi_soft_state_init(&dca_state, sizeof (dca_t), 1)) != 0) {
523 /* this should *never* happen! */
524 return (rv);
527 if ((rv = mod_install(&modlinkage)) != 0) {
528 /* cleanup here */
529 ddi_soft_state_fini(&dca_state);
530 return (rv);
533 return (0);
537 _fini(void)
539 int rv;
541 DBG(NULL, DMOD, "dca: in _fini");
543 if ((rv = mod_remove(&modlinkage)) == 0) {
544 /* cleanup here */
545 ddi_soft_state_fini(&dca_state);
547 return (rv);
551 _info(struct modinfo *modinfop)
553 DBG(NULL, DMOD, "dca: in _info");
555 return (mod_info(&modlinkage, modinfop));
559 dca_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
561 ddi_acc_handle_t pci;
562 int instance;
563 ddi_iblock_cookie_t ibc;
564 int intr_added = 0;
565 dca_t *dca;
566 ushort_t venid;
567 ushort_t devid;
568 ushort_t revid;
569 ushort_t subsysid;
570 ushort_t subvenid;
571 int i;
572 int ret;
573 char ID[64];
574 static char *unknowndev = "Unknown device";
576 #if DEBUG
577 /* these are only used for debugging */
578 ushort_t pcicomm;
579 ushort_t pcistat;
580 uchar_t cachelinesz;
581 uchar_t mingnt;
582 uchar_t maxlat;
583 uchar_t lattmr;
584 #endif
586 instance = ddi_get_instance(dip);
588 DBG(NULL, DMOD, "dca: in dca_attach() for %d", instance);
590 switch (cmd) {
591 case DDI_RESUME:
592 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
593 dca_diperror(dip, "no soft state in detach");
594 return (DDI_FAILURE);
596 /* assumption: we won't be DDI_DETACHed until we return */
597 return (dca_resume(dca));
598 case DDI_ATTACH:
599 break;
600 default:
601 return (DDI_FAILURE);
604 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
605 dca_diperror(dip, "slot does not support PCI bus-master");
606 return (DDI_FAILURE);
609 if (ddi_intr_hilevel(dip, 0) != 0) {
610 dca_diperror(dip, "hilevel interrupts not supported");
611 return (DDI_FAILURE);
614 if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
615 dca_diperror(dip, "unable to setup PCI config handle");
616 return (DDI_FAILURE);
619 /* common PCI attributes */
620 venid = pci_config_get16(pci, PCI_VENID);
621 devid = pci_config_get16(pci, PCI_DEVID);
622 revid = pci_config_get8(pci, PCI_REVID);
623 subvenid = pci_config_get16(pci, PCI_SUBVENID);
624 subsysid = pci_config_get16(pci, PCI_SUBSYSID);
627 * Broadcom-specific timings.
628 * We disable these timers/counters since they can cause
629 * incorrect false failures when the bus is just a little
630 * bit slow, or busy.
632 pci_config_put8(pci, PCI_TRDYTO, 0);
633 pci_config_put8(pci, PCI_RETRIES, 0);
635 /* initialize PCI access settings */
636 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
637 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
639 /* set up our PCI latency timer */
640 pci_config_put8(pci, PCI_LATTMR, 0x40);
642 #if DEBUG
643 /* read registers (for debugging) */
644 pcicomm = pci_config_get16(pci, PCI_COMM);
645 pcistat = pci_config_get16(pci, PCI_STATUS);
646 cachelinesz = pci_config_get8(pci, PCI_CACHELINESZ);
647 mingnt = pci_config_get8(pci, PCI_MINGNT);
648 maxlat = pci_config_get8(pci, PCI_MAXLAT);
649 lattmr = pci_config_get8(pci, PCI_LATTMR);
650 #endif
652 pci_config_teardown(&pci);
654 if (ddi_get_iblock_cookie(dip, 0, &ibc) != DDI_SUCCESS) {
655 dca_diperror(dip, "unable to get iblock cookie");
656 return (DDI_FAILURE);
659 if (ddi_soft_state_zalloc(dca_state, instance) != DDI_SUCCESS) {
660 dca_diperror(dip, "unable to allocate soft state");
661 return (DDI_FAILURE);
664 dca = ddi_get_soft_state(dca_state, instance);
665 ASSERT(dca != NULL);
666 dca->dca_dip = dip;
667 WORKLIST(dca, MCR1)->dwl_prov = 0;
668 WORKLIST(dca, MCR2)->dwl_prov = 0;
669 /* figure pagesize */
670 dca->dca_pagesize = ddi_ptob(dip, 1);
673 * Search for the device in our supported devices table. This
674 * is here for two reasons. First, we want to ensure that
675 * only Sun-qualified (and presumably Sun-labeled) devices can
676 * be used with this driver. Second, some devices have
677 * specific differences. E.g. the 5821 has support for a
678 * special mode of RC4, deeper queues, power management, and
679 * other changes. Also, the export versions of some of these
680 * chips don't support RC4 or 3DES, so we catch that here.
682 * Note that we only look at the upper nibble of the device
683 * id, which is used to distinguish export vs. domestic
684 * versions of the chip. (The lower nibble is used for
685 * stepping information.)
687 for (i = 0; i < (sizeof (dca_devices) / sizeof (dca_device_t)); i++) {
689 * Try to match the subsystem information first.
691 if (subvenid && (subvenid == dca_devices[i].dd_vendor_id) &&
692 subsysid && (subsysid == dca_devices[i].dd_device_id)) {
693 dca->dca_model = dca_devices[i].dd_model;
694 dca->dca_devid = dca_devices[i].dd_device_id;
695 break;
698 * Failing that, try the generic vendor and device id.
699 * Even if we find a match, we keep searching anyway,
700 * since we would prefer to find a match based on the
701 * subsystem ids.
703 if ((venid == dca_devices[i].dd_vendor_id) &&
704 (devid == dca_devices[i].dd_device_id)) {
705 dca->dca_model = dca_devices[i].dd_model;
706 dca->dca_devid = dca_devices[i].dd_device_id;
709 /* try and handle an unrecognized device */
710 if (dca->dca_model == NULL) {
711 dca->dca_model = unknowndev;
712 dca_error(dca, "device not recognized, not supported");
713 DBG(dca, DPCI, "i=%d venid=%x devid=%x rev=%d",
714 i, venid, devid, revid);
717 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "description",
718 dca->dca_model) != DDI_SUCCESS) {
719 dca_error(dca, "unable to create description property");
720 return (DDI_FAILURE);
723 DBG(dca, DPCI, "PCI command=0x%x status=%x cachelinesz=%x",
724 pcicomm, pcistat, cachelinesz);
725 DBG(dca, DPCI, "mingnt=0x%x maxlat=0x%x lattmr=0x%x",
726 mingnt, maxlat, lattmr);
729 * initialize locks, etc.
731 (void) mutex_init(&dca->dca_intrlock, NULL, MUTEX_DRIVER, ibc);
733 /* use RNGSHA1 by default */
734 if (ddi_getprop(DDI_DEV_T_ANY, dip,
735 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "rngdirect", 0) == 0) {
736 dca->dca_flags |= DCA_RNGSHA1;
739 /* initialize FMA */
740 dca_fma_init(dca);
742 /* initialize some key data structures */
743 if (dca_init(dca) != DDI_SUCCESS) {
744 goto failed;
747 /* initialize kstats */
748 dca_ksinit(dca);
750 /* setup access to registers */
751 if (ddi_regs_map_setup(dip, 1, (caddr_t *)&dca->dca_regs,
752 0, 0, &dca_regsattr, &dca->dca_regs_handle) != DDI_SUCCESS) {
753 dca_error(dca, "unable to map registers");
754 goto failed;
757 DBG(dca, DCHATTY, "MCR1 = %x", GETCSR(dca, CSR_MCR1));
758 DBG(dca, DCHATTY, "CONTROL = %x", GETCSR(dca, CSR_DMACTL));
759 DBG(dca, DCHATTY, "STATUS = %x", GETCSR(dca, CSR_DMASTAT));
760 DBG(dca, DCHATTY, "DMAEA = %x", GETCSR(dca, CSR_DMAEA));
761 DBG(dca, DCHATTY, "MCR2 = %x", GETCSR(dca, CSR_MCR2));
763 /* reset the chip */
764 if (dca_reset(dca, 0) < 0) {
765 goto failed;
768 /* initialize the chip */
769 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
770 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
771 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
772 goto failed;
775 /* add the interrupt */
776 if (ddi_add_intr(dip, 0, &dca->dca_icookie, NULL, dca_intr,
777 (void *)dca) != DDI_SUCCESS) {
778 DBG(dca, DWARN, "ddi_add_intr failed");
779 goto failed;
780 } else {
781 intr_added = 1;
784 /* enable interrupts on the device */
786 * XXX: Note, 5820A1 errata indicates that this may clobber
787 * bits 24 and 23, which affect the speed of the RNG. Since
788 * we always want to run in full-speed mode, this should be
789 * harmless.
791 if (dca->dca_devid == 0x5825) {
792 /* for 5825 - increase the DMA read size */
793 SETBIT(dca, CSR_DMACTL,
794 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
795 } else {
796 SETBIT(dca, CSR_DMACTL,
797 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
799 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
800 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
801 goto failed;
804 /* register MCR1 with the crypto framework */
805 /* Be careful not to exceed 32 chars */
806 (void) sprintf(ID, "%s/%d %s",
807 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_SYM);
808 dca_prov_info1.pi_provider_description = ID;
809 dca_prov_info1.pi_provider_dev.pd_hw = dip;
810 dca_prov_info1.pi_provider_handle = dca;
811 if ((ret = crypto_register_provider(&dca_prov_info1,
812 &WORKLIST(dca, MCR1)->dwl_prov)) != CRYPTO_SUCCESS) {
813 cmn_err(CE_WARN,
814 "crypto_register_provider() failed (%d) for MCR1", ret);
815 goto failed;
818 /* register MCR2 with the crypto framework */
819 /* Be careful not to exceed 32 chars */
820 (void) sprintf(ID, "%s/%d %s",
821 ddi_driver_name(dip), ddi_get_instance(dip), IDENT_ASYM);
822 dca_prov_info2.pi_provider_description = ID;
823 dca_prov_info2.pi_provider_dev.pd_hw = dip;
824 dca_prov_info2.pi_provider_handle = dca;
825 if ((ret = crypto_register_provider(&dca_prov_info2,
826 &WORKLIST(dca, MCR2)->dwl_prov)) != CRYPTO_SUCCESS) {
827 cmn_err(CE_WARN,
828 "crypto_register_provider() failed (%d) for MCR2", ret);
829 goto failed;
832 crypto_prov_notify(WORKLIST(dca, MCR1)->dwl_prov,
833 CRYPTO_PROVIDER_READY);
834 crypto_prov_notify(WORKLIST(dca, MCR2)->dwl_prov,
835 CRYPTO_PROVIDER_READY);
837 /* Initialize the local random number pool for this instance */
838 if ((ret = dca_random_init(dca)) != CRYPTO_SUCCESS) {
839 goto failed;
842 mutex_enter(&dca->dca_intrlock);
843 dca->dca_jobtid = timeout(dca_jobtimeout, (void *)dca,
844 drv_usectohz(SECOND));
845 mutex_exit(&dca->dca_intrlock);
847 ddi_set_driver_private(dip, (caddr_t)dca);
849 ddi_report_dev(dip);
851 if (ddi_get_devstate(dca->dca_dip) != DDI_DEVSTATE_UP) {
852 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_RESTORED);
855 return (DDI_SUCCESS);
857 failed:
858 /* unregister from the crypto framework */
859 if (WORKLIST(dca, MCR1)->dwl_prov != 0) {
860 (void) crypto_unregister_provider(
861 WORKLIST(dca, MCR1)->dwl_prov);
863 if (WORKLIST(dca, MCR2)->dwl_prov != 0) {
864 (void) crypto_unregister_provider(
865 WORKLIST(dca, MCR2)->dwl_prov);
867 if (intr_added) {
868 CLRBIT(dca, CSR_DMACTL,
869 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
870 /* unregister intr handler */
871 ddi_remove_intr(dip, 0, dca->dca_icookie);
873 if (dca->dca_regs_handle) {
874 ddi_regs_map_free(&dca->dca_regs_handle);
876 if (dca->dca_intrstats) {
877 kstat_delete(dca->dca_intrstats);
879 if (dca->dca_ksp) {
880 kstat_delete(dca->dca_ksp);
882 dca_uninit(dca);
884 /* finalize FMA */
885 dca_fma_fini(dca);
887 mutex_destroy(&dca->dca_intrlock);
888 ddi_soft_state_free(dca_state, instance);
889 return (DDI_FAILURE);
894 dca_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
896 int instance;
897 dca_t *dca;
898 timeout_id_t tid;
900 instance = ddi_get_instance(dip);
902 DBG(NULL, DMOD, "dca: in dca_detach() for %d", instance);
904 switch (cmd) {
905 case DDI_SUSPEND:
906 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
907 dca_diperror(dip, "no soft state in detach");
908 return (DDI_FAILURE);
910 /* assumption: we won't be DDI_DETACHed until we return */
911 return (dca_suspend(dca));
913 case DDI_DETACH:
914 break;
915 default:
916 return (DDI_FAILURE);
919 if ((dca = (dca_t *)ddi_get_driver_private(dip)) == NULL) {
920 dca_diperror(dip, "no soft state in detach");
921 return (DDI_FAILURE);
925 * Unregister from kCF.
926 * This needs to be done at the beginning of detach.
928 if (WORKLIST(dca, MCR1)->dwl_prov != 0) {
929 if (crypto_unregister_provider(
930 WORKLIST(dca, MCR1)->dwl_prov) != CRYPTO_SUCCESS) {
931 dca_error(dca, "unable to unregister MCR1 from kcf");
932 return (DDI_FAILURE);
936 if (WORKLIST(dca, MCR2)->dwl_prov != 0) {
937 if (crypto_unregister_provider(
938 WORKLIST(dca, MCR2)->dwl_prov) != CRYPTO_SUCCESS) {
939 dca_error(dca, "unable to unregister MCR2 from kcf");
940 return (DDI_FAILURE);
945 * Cleanup the private context list. Once the
946 * crypto_unregister_provider returns, it is safe to do so.
948 dca_free_context_list(dca);
950 /* Cleanup the local random number pool */
951 dca_random_fini(dca);
953 /* send any jobs in the waitq back to kCF */
954 dca_rejectjobs(dca);
956 /* untimeout the timeouts */
957 mutex_enter(&dca->dca_intrlock);
958 tid = dca->dca_jobtid;
959 dca->dca_jobtid = 0;
960 mutex_exit(&dca->dca_intrlock);
961 if (tid) {
962 (void) untimeout(tid);
965 /* disable device interrupts */
966 CLRBIT(dca, CSR_DMACTL, DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
968 /* unregister interrupt handlers */
969 ddi_remove_intr(dip, 0, dca->dca_icookie);
971 /* release our regs handle */
972 ddi_regs_map_free(&dca->dca_regs_handle);
974 /* toss out kstats */
975 if (dca->dca_intrstats) {
976 kstat_delete(dca->dca_intrstats);
978 if (dca->dca_ksp) {
979 kstat_delete(dca->dca_ksp);
982 mutex_destroy(&dca->dca_intrlock);
983 dca_uninit(dca);
985 /* finalize FMA */
986 dca_fma_fini(dca);
988 ddi_soft_state_free(dca_state, instance);
990 return (DDI_SUCCESS);
994 dca_resume(dca_t *dca)
996 ddi_acc_handle_t pci;
998 if (pci_config_setup(dca->dca_dip, &pci) != DDI_SUCCESS) {
999 dca_error(dca, "unable to setup PCI config handle");
1000 return (DDI_FAILURE);
1004 * Reprogram registers in PCI configuration space.
1007 /* Broadcom-specific timers -- we disable them. */
1008 pci_config_put8(pci, PCI_TRDYTO, 0);
1009 pci_config_put8(pci, PCI_RETRIES, 0);
1011 /* initialize PCI access settings */
1012 pci_config_put16(pci, PCI_COMM, PCICOMM_SEE |
1013 PCICOMM_PEE | PCICOMM_BME | PCICOMM_MAE);
1015 /* set up our PCI latency timer */
1016 pci_config_put8(pci, PCI_LATTMR, 0x40);
1018 pci_config_teardown(&pci);
1020 if (dca_reset(dca, 0) < 0) {
1021 dca_error(dca, "unable to reset device during resume");
1022 return (DDI_FAILURE);
1026 * Now restore the card-specific CSRs.
1029 /* restore endianness settings */
1030 PUTCSR(dca, CSR_DMACTL, DMACTL_BE32 | DMACTL_BE64);
1031 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1032 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1033 return (DDI_FAILURE);
1035 /* restore interrupt enables */
1036 if (dca->dca_devid == 0x5825) {
1037 /* for 5825 set 256 byte read size to improve performance */
1038 SETBIT(dca, CSR_DMACTL,
1039 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE | DMACTL_RD256);
1040 } else {
1041 SETBIT(dca, CSR_DMACTL,
1042 DMACTL_MCR1IE | DMACTL_MCR2IE | DMACTL_EIE);
1044 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1045 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1046 return (DDI_FAILURE);
1048 /* resume scheduling jobs on the device */
1049 dca_undrain(dca);
1051 return (DDI_SUCCESS);
1055 dca_suspend(dca_t *dca)
1057 if ((dca_drain(dca)) != 0) {
1058 return (DDI_FAILURE);
1060 if (dca_reset(dca, 0) < 0) {
1061 dca_error(dca, "unable to reset device during suspend");
1062 return (DDI_FAILURE);
1064 return (DDI_SUCCESS);
1068 * Hardware access stuff.
1071 dca_reset(dca_t *dca, int failreset)
1073 int i;
1075 if (dca->dca_regs_handle == NULL) {
1076 return (-1);
1079 PUTCSR(dca, CSR_DMACTL, DMACTL_RESET);
1080 if (!failreset) {
1081 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1082 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1083 return (-1);
1086 /* now wait for a reset */
1087 for (i = 1; i < 100; i++) {
1088 uint32_t dmactl;
1089 drv_usecwait(100);
1090 dmactl = GETCSR(dca, CSR_DMACTL);
1091 if (!failreset) {
1092 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1093 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
1094 return (-1);
1096 if ((dmactl & DMACTL_RESET) == 0) {
1097 DBG(dca, DCHATTY, "reset in %d usec", i * 100);
1098 return (0);
1101 if (!failreset) {
1102 dca_failure(dca, DDI_DEVICE_FAULT,
1103 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1104 "timeout waiting for reset after %d usec", i * 100);
1106 return (-1);
1110 dca_initworklist(dca_t *dca, dca_worklist_t *wlp)
1112 int i;
1113 int reqprealloc = wlp->dwl_hiwater + (MAXWORK * MAXREQSPERMCR);
1116 * Set up work queue.
1118 mutex_init(&wlp->dwl_lock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1119 mutex_init(&wlp->dwl_freereqslock, NULL, MUTEX_DRIVER,
1120 dca->dca_icookie);
1121 mutex_init(&wlp->dwl_freelock, NULL, MUTEX_DRIVER, dca->dca_icookie);
1122 cv_init(&wlp->dwl_cv, NULL, CV_DRIVER, NULL);
1124 mutex_enter(&wlp->dwl_lock);
1126 dca_initq(&wlp->dwl_freereqs);
1127 dca_initq(&wlp->dwl_waitq);
1128 dca_initq(&wlp->dwl_freework);
1129 dca_initq(&wlp->dwl_runq);
1131 for (i = 0; i < MAXWORK; i++) {
1132 dca_work_t *workp;
1134 if ((workp = dca_newwork(dca)) == NULL) {
1135 dca_error(dca, "unable to allocate work");
1136 mutex_exit(&wlp->dwl_lock);
1137 return (DDI_FAILURE);
1139 workp->dw_wlp = wlp;
1140 dca_freework(workp);
1142 mutex_exit(&wlp->dwl_lock);
1144 for (i = 0; i < reqprealloc; i++) {
1145 dca_request_t *reqp;
1147 if ((reqp = dca_newreq(dca)) == NULL) {
1148 dca_error(dca, "unable to allocate request");
1149 return (DDI_FAILURE);
1151 reqp->dr_dca = dca;
1152 reqp->dr_wlp = wlp;
1153 dca_freereq(reqp);
1155 return (DDI_SUCCESS);
1159 dca_init(dca_t *dca)
1161 dca_worklist_t *wlp;
1163 /* Initialize the private context list and the corresponding lock. */
1164 mutex_init(&dca->dca_ctx_list_lock, NULL, MUTEX_DRIVER, NULL);
1165 dca_initq(&dca->dca_ctx_list);
1168 * MCR1 algorithms.
1170 wlp = WORKLIST(dca, MCR1);
1171 (void) sprintf(wlp->dwl_name, "dca%d:mcr1",
1172 ddi_get_instance(dca->dca_dip));
1173 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1174 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1175 "mcr1_lowater", MCR1LOWATER);
1176 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1177 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1178 "mcr1_hiwater", MCR1HIWATER);
1179 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1180 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1181 "mcr1_maxreqs", MCR1MAXREQS), MAXREQSPERMCR);
1182 wlp->dwl_dca = dca;
1183 wlp->dwl_mcr = MCR1;
1184 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1185 return (DDI_FAILURE);
1189 * MCR2 algorithms.
1191 wlp = WORKLIST(dca, MCR2);
1192 (void) sprintf(wlp->dwl_name, "dca%d:mcr2",
1193 ddi_get_instance(dca->dca_dip));
1194 wlp->dwl_lowater = ddi_getprop(DDI_DEV_T_ANY,
1195 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1196 "mcr2_lowater", MCR2LOWATER);
1197 wlp->dwl_hiwater = ddi_getprop(DDI_DEV_T_ANY,
1198 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1199 "mcr2_hiwater", MCR2HIWATER);
1200 wlp->dwl_reqspermcr = min(ddi_getprop(DDI_DEV_T_ANY,
1201 dca->dca_dip, DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS,
1202 "mcr2_maxreqs", MCR2MAXREQS), MAXREQSPERMCR);
1203 wlp->dwl_dca = dca;
1204 wlp->dwl_mcr = MCR2;
1205 if (dca_initworklist(dca, wlp) != DDI_SUCCESS) {
1206 return (DDI_FAILURE);
1208 return (DDI_SUCCESS);
1212 * Uninitialize worklists. This routine should only be called when no
1213 * active jobs (hence DMA mappings) exist. One way to ensure this is
1214 * to unregister from kCF before calling this routine. (This is done
1215 * e.g. in detach(9e).)
1217 void
1218 dca_uninit(dca_t *dca)
1220 int mcr;
1222 mutex_destroy(&dca->dca_ctx_list_lock);
1224 for (mcr = MCR1; mcr <= MCR2; mcr++) {
1225 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1226 dca_work_t *workp;
1227 dca_request_t *reqp;
1229 if (dca->dca_regs_handle == NULL) {
1230 continue;
1233 mutex_enter(&wlp->dwl_lock);
1234 while ((workp = dca_getwork(dca, mcr)) != NULL) {
1235 dca_destroywork(workp);
1237 mutex_exit(&wlp->dwl_lock);
1238 while ((reqp = dca_getreq(dca, mcr, 0)) != NULL) {
1239 dca_destroyreq(reqp);
1242 mutex_destroy(&wlp->dwl_lock);
1243 mutex_destroy(&wlp->dwl_freereqslock);
1244 mutex_destroy(&wlp->dwl_freelock);
1245 cv_destroy(&wlp->dwl_cv);
1246 wlp->dwl_prov = 0;
1250 static void
1251 dca_enlist2(dca_listnode_t *q, dca_listnode_t *node, kmutex_t *lock)
1253 if (!q || !node)
1254 return;
1256 mutex_enter(lock);
1257 node->dl_next2 = q;
1258 node->dl_prev2 = q->dl_prev2;
1259 node->dl_next2->dl_prev2 = node;
1260 node->dl_prev2->dl_next2 = node;
1261 mutex_exit(lock);
1264 static void
1265 dca_rmlist2(dca_listnode_t *node, kmutex_t *lock)
1267 if (!node)
1268 return;
1270 mutex_enter(lock);
1271 node->dl_next2->dl_prev2 = node->dl_prev2;
1272 node->dl_prev2->dl_next2 = node->dl_next2;
1273 node->dl_next2 = NULL;
1274 node->dl_prev2 = NULL;
1275 mutex_exit(lock);
1278 static dca_listnode_t *
1279 dca_delist2(dca_listnode_t *q, kmutex_t *lock)
1281 dca_listnode_t *node;
1283 mutex_enter(lock);
1284 if ((node = q->dl_next2) == q) {
1285 mutex_exit(lock);
1286 return (NULL);
1289 node->dl_next2->dl_prev2 = node->dl_prev2;
1290 node->dl_prev2->dl_next2 = node->dl_next2;
1291 node->dl_next2 = NULL;
1292 node->dl_prev2 = NULL;
1293 mutex_exit(lock);
1295 return (node);
1298 void
1299 dca_initq(dca_listnode_t *q)
1301 q->dl_next = q;
1302 q->dl_prev = q;
1303 q->dl_next2 = q;
1304 q->dl_prev2 = q;
1307 void
1308 dca_enqueue(dca_listnode_t *q, dca_listnode_t *node)
1311 * Enqueue submits at the "tail" of the list, i.e. just
1312 * behind the sentinel.
1314 node->dl_next = q;
1315 node->dl_prev = q->dl_prev;
1316 node->dl_next->dl_prev = node;
1317 node->dl_prev->dl_next = node;
1320 void
1321 dca_rmqueue(dca_listnode_t *node)
1323 node->dl_next->dl_prev = node->dl_prev;
1324 node->dl_prev->dl_next = node->dl_next;
1325 node->dl_next = NULL;
1326 node->dl_prev = NULL;
1329 dca_listnode_t *
1330 dca_dequeue(dca_listnode_t *q)
1332 dca_listnode_t *node;
1334 * Dequeue takes from the "head" of the list, i.e. just after
1335 * the sentinel.
1337 if ((node = q->dl_next) == q) {
1338 /* queue is empty */
1339 return (NULL);
1341 dca_rmqueue(node);
1342 return (node);
1345 /* this is the opposite of dequeue, it takes things off in LIFO order */
1346 dca_listnode_t *
1347 dca_unqueue(dca_listnode_t *q)
1349 dca_listnode_t *node;
1351 * unqueue takes from the "tail" of the list, i.e. just before
1352 * the sentinel.
1354 if ((node = q->dl_prev) == q) {
1355 /* queue is empty */
1356 return (NULL);
1358 dca_rmqueue(node);
1359 return (node);
1362 dca_listnode_t *
1363 dca_peekqueue(dca_listnode_t *q)
1365 dca_listnode_t *node;
1367 if ((node = q->dl_next) == q) {
1368 return (NULL);
1369 } else {
1370 return (node);
1375 * Interrupt service routine.
1377 uint_t
1378 dca_intr(char *arg)
1380 dca_t *dca = (dca_t *)arg;
1381 uint32_t status;
1383 mutex_enter(&dca->dca_intrlock);
1384 status = GETCSR(dca, CSR_DMASTAT);
1385 PUTCSR(dca, CSR_DMASTAT, status & DMASTAT_INTERRUPTS);
1386 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
1387 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
1388 mutex_exit(&dca->dca_intrlock);
1389 return ((uint_t)DDI_FAILURE);
1392 DBG(dca, DINTR, "interrupted, status = 0x%x!", status);
1394 if ((status & DMASTAT_INTERRUPTS) == 0) {
1395 /* increment spurious interrupt kstat */
1396 if (dca->dca_intrstats) {
1397 KIOIP(dca)->intrs[KSTAT_INTR_SPURIOUS]++;
1399 mutex_exit(&dca->dca_intrlock);
1400 return (DDI_INTR_UNCLAIMED);
1403 if (dca->dca_intrstats) {
1404 KIOIP(dca)->intrs[KSTAT_INTR_HARD]++;
1406 if (status & DMASTAT_MCR1INT) {
1407 DBG(dca, DINTR, "MCR1 interrupted");
1408 mutex_enter(&(WORKLIST(dca, MCR1)->dwl_lock));
1409 dca_schedule(dca, MCR1);
1410 dca_reclaim(dca, MCR1);
1411 mutex_exit(&(WORKLIST(dca, MCR1)->dwl_lock));
1414 if (status & DMASTAT_MCR2INT) {
1415 DBG(dca, DINTR, "MCR2 interrupted");
1416 mutex_enter(&(WORKLIST(dca, MCR2)->dwl_lock));
1417 dca_schedule(dca, MCR2);
1418 dca_reclaim(dca, MCR2);
1419 mutex_exit(&(WORKLIST(dca, MCR2)->dwl_lock));
1422 if (status & DMASTAT_ERRINT) {
1423 uint32_t erraddr;
1424 erraddr = GETCSR(dca, CSR_DMAEA);
1425 mutex_exit(&dca->dca_intrlock);
1428 * bit 1 of the error address indicates failure during
1429 * read if set, during write otherwise.
1431 dca_failure(dca, DDI_DEVICE_FAULT,
1432 DCA_FM_ECLASS_HW_DEVICE, dca_ena(0), CRYPTO_DEVICE_ERROR,
1433 "DMA master access error %s address 0x%x",
1434 erraddr & 0x1 ? "reading" : "writing", erraddr & ~1);
1435 return (DDI_INTR_CLAIMED);
1438 mutex_exit(&dca->dca_intrlock);
1440 return (DDI_INTR_CLAIMED);
1444 * Reverse a string of bytes from s1 into s2. The reversal happens
1445 * from the tail of s1. If len1 < len2, then null bytes will be
1446 * padded to the end of s2. If len2 < len1, then (presumably null)
1447 * bytes will be dropped from the start of s1.
1449 * The rationale here is that when s1 (source) is shorter, then we
1450 * are reversing from big-endian ordering, into device ordering, and
1451 * want to add some extra nulls to the tail (MSB) side of the device.
1453 * Similarly, when s2 (dest) is shorter, then we are truncating what
1454 * are presumably null MSB bits from the device.
1456 * There is an expectation when reversing from the device back into
1457 * big-endian, that the number of bytes to reverse and the target size
1458 * will match, and no truncation or padding occurs.
1460 void
1461 dca_reverse(void *s1, void *s2, int len1, int len2)
1463 caddr_t src, dst;
1465 if (len1 == 0) {
1466 if (len2) {
1467 bzero(s2, len2);
1469 return;
1471 src = (caddr_t)s1 + len1 - 1;
1472 dst = s2;
1473 while ((src >= (caddr_t)s1) && (len2)) {
1474 *dst++ = *src--;
1475 len2--;
1477 while (len2 > 0) {
1478 *dst++ = 0;
1479 len2--;
1483 uint16_t
1484 dca_padfull(int num)
1486 if (num <= 512) {
1487 return (BITS2BYTES(512));
1489 if (num <= 768) {
1490 return (BITS2BYTES(768));
1492 if (num <= 1024) {
1493 return (BITS2BYTES(1024));
1495 if (num <= 1536) {
1496 return (BITS2BYTES(1536));
1498 if (num <= 2048) {
1499 return (BITS2BYTES(2048));
1501 return (0);
1504 uint16_t
1505 dca_padhalf(int num)
1507 if (num <= 256) {
1508 return (BITS2BYTES(256));
1510 if (num <= 384) {
1511 return (BITS2BYTES(384));
1513 if (num <= 512) {
1514 return (BITS2BYTES(512));
1516 if (num <= 768) {
1517 return (BITS2BYTES(768));
1519 if (num <= 1024) {
1520 return (BITS2BYTES(1024));
1522 return (0);
1525 dca_work_t *
1526 dca_newwork(dca_t *dca)
1528 dca_work_t *workp;
1529 size_t size;
1530 ddi_dma_cookie_t c;
1531 unsigned nc;
1532 int rv;
1534 workp = kmem_zalloc(sizeof (dca_work_t), KM_SLEEP);
1536 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1537 DDI_DMA_SLEEP, NULL, &workp->dw_mcr_dmah);
1538 if (rv != 0) {
1539 dca_error(dca, "unable to alloc MCR DMA handle");
1540 dca_destroywork(workp);
1541 return (NULL);
1544 rv = ddi_dma_mem_alloc(workp->dw_mcr_dmah,
1545 ROUNDUP(MCR_SIZE, dca->dca_pagesize),
1546 &dca_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
1547 &workp->dw_mcr_kaddr, &size, &workp->dw_mcr_acch);
1548 if (rv != 0) {
1549 dca_error(dca, "unable to alloc MCR DMA memory");
1550 dca_destroywork(workp);
1551 return (NULL);
1554 rv = ddi_dma_addr_bind_handle(workp->dw_mcr_dmah, NULL,
1555 workp->dw_mcr_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR,
1556 DDI_DMA_SLEEP, NULL, &c, &nc);
1557 if (rv != DDI_DMA_MAPPED) {
1558 dca_error(dca, "unable to map MCR DMA memory");
1559 dca_destroywork(workp);
1560 return (NULL);
1563 workp->dw_mcr_paddr = c.dmac_address;
1564 return (workp);
1567 void
1568 dca_destroywork(dca_work_t *workp)
1570 if (workp->dw_mcr_paddr) {
1571 (void) ddi_dma_unbind_handle(workp->dw_mcr_dmah);
1573 if (workp->dw_mcr_acch) {
1574 ddi_dma_mem_free(&workp->dw_mcr_acch);
1576 if (workp->dw_mcr_dmah) {
1577 ddi_dma_free_handle(&workp->dw_mcr_dmah);
1579 kmem_free(workp, sizeof (dca_work_t));
1582 dca_request_t *
1583 dca_newreq(dca_t *dca)
1585 dca_request_t *reqp;
1586 size_t size;
1587 ddi_dma_cookie_t c;
1588 unsigned nc;
1589 int rv;
1590 int n_chain = 0;
1592 size = (DESC_SIZE * MAXFRAGS) + CTX_MAXLENGTH;
1594 reqp = kmem_zalloc(sizeof (dca_request_t), KM_SLEEP);
1596 reqp->dr_dca = dca;
1599 * Setup the DMA region for the context and descriptors.
1601 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr, DDI_DMA_SLEEP,
1602 NULL, &reqp->dr_ctx_dmah);
1603 if (rv != DDI_SUCCESS) {
1604 dca_error(dca, "failure allocating request DMA handle");
1605 dca_destroyreq(reqp);
1606 return (NULL);
1609 /* for driver hardening, allocate in whole pages */
1610 rv = ddi_dma_mem_alloc(reqp->dr_ctx_dmah,
1611 ROUNDUP(size, dca->dca_pagesize), &dca_devattr, DDI_DMA_CONSISTENT,
1612 DDI_DMA_SLEEP, NULL, &reqp->dr_ctx_kaddr, &size,
1613 &reqp->dr_ctx_acch);
1614 if (rv != DDI_SUCCESS) {
1615 dca_error(dca, "unable to alloc request DMA memory");
1616 dca_destroyreq(reqp);
1617 return (NULL);
1620 rv = ddi_dma_addr_bind_handle(reqp->dr_ctx_dmah, NULL,
1621 reqp->dr_ctx_kaddr, size, DDI_DMA_CONSISTENT | DDI_DMA_WRITE,
1622 DDI_DMA_SLEEP, 0, &c, &nc);
1623 if (rv != DDI_DMA_MAPPED) {
1624 dca_error(dca, "failed binding request DMA handle");
1625 dca_destroyreq(reqp);
1626 return (NULL);
1628 reqp->dr_ctx_paddr = c.dmac_address;
1630 reqp->dr_dma_size = size;
1633 * Set up the dma for our scratch/shared buffers.
1635 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1636 DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_dmah);
1637 if (rv != DDI_SUCCESS) {
1638 dca_error(dca, "failure allocating ibuf DMA handle");
1639 dca_destroyreq(reqp);
1640 return (NULL);
1642 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1643 DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_dmah);
1644 if (rv != DDI_SUCCESS) {
1645 dca_error(dca, "failure allocating obuf DMA handle");
1646 dca_destroyreq(reqp);
1647 return (NULL);
1650 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1651 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_in_dmah);
1652 if (rv != DDI_SUCCESS) {
1653 dca_error(dca, "failure allocating chain_in DMA handle");
1654 dca_destroyreq(reqp);
1655 return (NULL);
1658 rv = ddi_dma_alloc_handle(dca->dca_dip, &dca_dmaattr,
1659 DDI_DMA_SLEEP, NULL, &reqp->dr_chain_out_dmah);
1660 if (rv != DDI_SUCCESS) {
1661 dca_error(dca, "failure allocating chain_out DMA handle");
1662 dca_destroyreq(reqp);
1663 return (NULL);
1667 * for driver hardening, allocate in whole pages.
1669 size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1670 #if defined(i386) || defined(__i386)
1672 * Use kmem_alloc instead of ddi_dma_mem_alloc here since the latter
1673 * may fail on x86 platform if a physically contiguous memory chunk
1674 * cannot be found. From initial testing, we did not see performance
1675 * degradation as seen on Sparc.
1677 if ((reqp->dr_ibuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
1678 dca_error(dca, "unable to alloc request ibuf memory");
1679 dca_destroyreq(reqp);
1680 return (NULL);
1682 if ((reqp->dr_obuf_kaddr = kmem_alloc(size, KM_SLEEP)) == NULL) {
1683 dca_error(dca, "unable to alloc request obuf memory");
1684 dca_destroyreq(reqp);
1685 return (NULL);
1687 #else
1689 * We could kmem_alloc for Sparc too. However, it gives worse
1690 * performance when transferring more than one page data. For example,
1691 * using 4 threads and 12032 byte data and 3DES on 900MHZ Sparc system,
1692 * kmem_alloc uses 80% CPU and ddi_dma_mem_alloc uses 50% CPU for
1693 * the same throughput.
1695 rv = ddi_dma_mem_alloc(reqp->dr_ibuf_dmah,
1696 size, &dca_bufattr,
1697 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_ibuf_kaddr,
1698 &size, &reqp->dr_ibuf_acch);
1699 if (rv != DDI_SUCCESS) {
1700 dca_error(dca, "unable to alloc request DMA memory");
1701 dca_destroyreq(reqp);
1702 return (NULL);
1705 rv = ddi_dma_mem_alloc(reqp->dr_obuf_dmah,
1706 size, &dca_bufattr,
1707 DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL, &reqp->dr_obuf_kaddr,
1708 &size, &reqp->dr_obuf_acch);
1709 if (rv != DDI_SUCCESS) {
1710 dca_error(dca, "unable to alloc request DMA memory");
1711 dca_destroyreq(reqp);
1712 return (NULL);
1714 #endif
1716 /* Skip the used portion in the context page */
1717 reqp->dr_offset = CTX_MAXLENGTH;
1718 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1719 reqp->dr_ibuf_kaddr, reqp->dr_ibuf_dmah,
1720 DDI_DMA_WRITE | DDI_DMA_STREAMING,
1721 &reqp->dr_ibuf_head, &n_chain)) != DDI_SUCCESS) {
1722 (void) dca_destroyreq(reqp);
1723 return (NULL);
1725 reqp->dr_ibuf_paddr = reqp->dr_ibuf_head.dc_buffer_paddr;
1726 /* Skip the space used by the input buffer */
1727 reqp->dr_offset += DESC_SIZE * n_chain;
1729 if ((rv = dca_bindchains_one(reqp, size, reqp->dr_offset,
1730 reqp->dr_obuf_kaddr, reqp->dr_obuf_dmah,
1731 DDI_DMA_READ | DDI_DMA_STREAMING,
1732 &reqp->dr_obuf_head, &n_chain)) != DDI_SUCCESS) {
1733 (void) dca_destroyreq(reqp);
1734 return (NULL);
1736 reqp->dr_obuf_paddr = reqp->dr_obuf_head.dc_buffer_paddr;
1737 /* Skip the space used by the output buffer */
1738 reqp->dr_offset += DESC_SIZE * n_chain;
1740 DBG(dca, DCHATTY, "CTX is 0x%p, phys 0x%x, len %d",
1741 reqp->dr_ctx_kaddr, reqp->dr_ctx_paddr, CTX_MAXLENGTH);
1742 return (reqp);
1745 void
1746 dca_destroyreq(dca_request_t *reqp)
1748 #if defined(i386) || defined(__i386)
1749 dca_t *dca = reqp->dr_dca;
1750 size_t size = ROUNDUP(MAXPACKET, dca->dca_pagesize);
1751 #endif
1754 * Clean up DMA for the context structure.
1756 if (reqp->dr_ctx_paddr) {
1757 (void) ddi_dma_unbind_handle(reqp->dr_ctx_dmah);
1760 if (reqp->dr_ctx_acch) {
1761 ddi_dma_mem_free(&reqp->dr_ctx_acch);
1764 if (reqp->dr_ctx_dmah) {
1765 ddi_dma_free_handle(&reqp->dr_ctx_dmah);
1769 * Clean up DMA for the scratch buffer.
1771 #if defined(i386) || defined(__i386)
1772 if (reqp->dr_ibuf_dmah) {
1773 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1774 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1776 if (reqp->dr_obuf_dmah) {
1777 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1778 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1781 kmem_free(reqp->dr_ibuf_kaddr, size);
1782 kmem_free(reqp->dr_obuf_kaddr, size);
1783 #else
1784 if (reqp->dr_ibuf_paddr) {
1785 (void) ddi_dma_unbind_handle(reqp->dr_ibuf_dmah);
1787 if (reqp->dr_obuf_paddr) {
1788 (void) ddi_dma_unbind_handle(reqp->dr_obuf_dmah);
1791 if (reqp->dr_ibuf_acch) {
1792 ddi_dma_mem_free(&reqp->dr_ibuf_acch);
1794 if (reqp->dr_obuf_acch) {
1795 ddi_dma_mem_free(&reqp->dr_obuf_acch);
1798 if (reqp->dr_ibuf_dmah) {
1799 ddi_dma_free_handle(&reqp->dr_ibuf_dmah);
1801 if (reqp->dr_obuf_dmah) {
1802 ddi_dma_free_handle(&reqp->dr_obuf_dmah);
1804 #endif
1806 * These two DMA handles should have been unbinded in
1807 * dca_unbindchains() function
1809 if (reqp->dr_chain_in_dmah) {
1810 ddi_dma_free_handle(&reqp->dr_chain_in_dmah);
1812 if (reqp->dr_chain_out_dmah) {
1813 ddi_dma_free_handle(&reqp->dr_chain_out_dmah);
1816 kmem_free(reqp, sizeof (dca_request_t));
1819 dca_work_t *
1820 dca_getwork(dca_t *dca, int mcr)
1822 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1823 dca_work_t *workp;
1825 mutex_enter(&wlp->dwl_freelock);
1826 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_freework);
1827 mutex_exit(&wlp->dwl_freelock);
1828 if (workp) {
1829 int nreqs;
1830 bzero(workp->dw_mcr_kaddr, 8);
1832 /* clear out old requests */
1833 for (nreqs = 0; nreqs < MAXREQSPERMCR; nreqs++) {
1834 workp->dw_reqs[nreqs] = NULL;
1837 return (workp);
1840 void
1841 dca_freework(dca_work_t *workp)
1843 mutex_enter(&workp->dw_wlp->dwl_freelock);
1844 dca_enqueue(&workp->dw_wlp->dwl_freework, (dca_listnode_t *)workp);
1845 mutex_exit(&workp->dw_wlp->dwl_freelock);
1848 dca_request_t *
1849 dca_getreq(dca_t *dca, int mcr, int tryhard)
1851 dca_worklist_t *wlp = WORKLIST(dca, mcr);
1852 dca_request_t *reqp;
1854 mutex_enter(&wlp->dwl_freereqslock);
1855 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_freereqs);
1856 mutex_exit(&wlp->dwl_freereqslock);
1857 if (reqp) {
1858 reqp->dr_flags = 0;
1859 reqp->dr_callback = NULL;
1860 } else if (tryhard) {
1862 * failed to get a free one, try an allocation, the hard way.
1863 * XXX: Kstat desired here.
1865 if ((reqp = dca_newreq(dca)) != NULL) {
1866 reqp->dr_wlp = wlp;
1867 reqp->dr_dca = dca;
1868 reqp->dr_flags = 0;
1869 reqp->dr_callback = NULL;
1872 return (reqp);
1875 void
1876 dca_freereq(dca_request_t *reqp)
1878 reqp->dr_kcf_req = NULL;
1879 if (!(reqp->dr_flags & DR_NOCACHE)) {
1880 mutex_enter(&reqp->dr_wlp->dwl_freereqslock);
1881 dca_enqueue(&reqp->dr_wlp->dwl_freereqs,
1882 (dca_listnode_t *)reqp);
1883 mutex_exit(&reqp->dr_wlp->dwl_freereqslock);
1888 * Binds user buffers to DMA handles dynamically. On Sparc, a user buffer
1889 * is mapped to a single physical address. On x86, a user buffer is mapped
1890 * to multiple physical addresses. These physical addresses are chained
1891 * using the method specified in Broadcom BCM5820 specification.
1894 dca_bindchains(dca_request_t *reqp, size_t incnt, size_t outcnt)
1896 int rv;
1897 caddr_t kaddr;
1898 uint_t flags;
1899 int n_chain = 0;
1901 if (reqp->dr_flags & DR_INPLACE) {
1902 flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
1903 } else {
1904 flags = DDI_DMA_WRITE | DDI_DMA_STREAMING;
1907 /* first the input */
1908 if (incnt) {
1909 if ((kaddr = dca_bufdaddr(reqp->dr_in)) == NULL) {
1910 DBG(NULL, DWARN, "unrecognised crypto data format");
1911 return (DDI_FAILURE);
1913 if ((rv = dca_bindchains_one(reqp, incnt, reqp->dr_offset,
1914 kaddr, reqp->dr_chain_in_dmah, flags,
1915 &reqp->dr_chain_in_head, &n_chain)) != DDI_SUCCESS) {
1916 (void) dca_unbindchains(reqp);
1917 return (rv);
1921 * The offset and length are altered by the calling routine
1922 * reqp->dr_in->cd_offset += incnt;
1923 * reqp->dr_in->cd_length -= incnt;
1925 /* Save the first one in the chain for MCR */
1926 reqp->dr_in_paddr = reqp->dr_chain_in_head.dc_buffer_paddr;
1927 reqp->dr_in_next = reqp->dr_chain_in_head.dc_next_paddr;
1928 reqp->dr_in_len = reqp->dr_chain_in_head.dc_buffer_length;
1929 } else {
1930 reqp->dr_in_paddr = (uintptr_t)NULL;
1931 reqp->dr_in_next = 0;
1932 reqp->dr_in_len = 0;
1935 if (reqp->dr_flags & DR_INPLACE) {
1936 reqp->dr_out_paddr = reqp->dr_in_paddr;
1937 reqp->dr_out_len = reqp->dr_in_len;
1938 reqp->dr_out_next = reqp->dr_in_next;
1939 return (DDI_SUCCESS);
1942 /* then the output */
1943 if (outcnt) {
1944 flags = DDI_DMA_READ | DDI_DMA_STREAMING;
1945 if ((kaddr = dca_bufdaddr_out(reqp->dr_out)) == NULL) {
1946 DBG(NULL, DWARN, "unrecognised crypto data format");
1947 (void) dca_unbindchains(reqp);
1948 return (DDI_FAILURE);
1950 rv = dca_bindchains_one(reqp, outcnt, reqp->dr_offset +
1951 n_chain * DESC_SIZE, kaddr, reqp->dr_chain_out_dmah,
1952 flags, &reqp->dr_chain_out_head, &n_chain);
1953 if (rv != DDI_SUCCESS) {
1954 (void) dca_unbindchains(reqp);
1955 return (DDI_FAILURE);
1958 /* Save the first one in the chain for MCR */
1959 reqp->dr_out_paddr = reqp->dr_chain_out_head.dc_buffer_paddr;
1960 reqp->dr_out_next = reqp->dr_chain_out_head.dc_next_paddr;
1961 reqp->dr_out_len = reqp->dr_chain_out_head.dc_buffer_length;
1962 } else {
1963 reqp->dr_out_paddr = (uintptr_t)NULL;
1964 reqp->dr_out_next = 0;
1965 reqp->dr_out_len = 0;
1968 return (DDI_SUCCESS);
1972 * Unbind the user buffers from the DMA handles.
1975 dca_unbindchains(dca_request_t *reqp)
1977 int rv = DDI_SUCCESS;
1978 int rv1 = DDI_SUCCESS;
1980 /* Clear the input chain */
1981 if (reqp->dr_chain_in_head.dc_buffer_paddr != 0) {
1982 (void) ddi_dma_unbind_handle(reqp->dr_chain_in_dmah);
1983 reqp->dr_chain_in_head.dc_buffer_paddr = 0;
1986 if (reqp->dr_flags & DR_INPLACE) {
1987 return (rv);
1990 /* Clear the output chain */
1991 if (reqp->dr_chain_out_head.dc_buffer_paddr != 0) {
1992 (void) ddi_dma_unbind_handle(reqp->dr_chain_out_dmah);
1993 reqp->dr_chain_out_head.dc_buffer_paddr = 0;
1996 return ((rv != DDI_SUCCESS)? rv : rv1);
2000 * Build either input chain or output chain. It is single-item chain for Sparc,
2001 * and possible mutiple-item chain for x86.
2003 static int
2004 dca_bindchains_one(dca_request_t *reqp, size_t cnt, int dr_offset,
2005 caddr_t kaddr, ddi_dma_handle_t handle, uint_t flags,
2006 dca_chain_t *head, int *n_chain)
2008 ddi_dma_cookie_t c;
2009 uint_t nc;
2010 int rv;
2011 caddr_t chain_kaddr_pre;
2012 caddr_t chain_kaddr;
2013 uint32_t chain_paddr;
2014 int i;
2016 /* Advance past the context structure to the starting address */
2017 chain_paddr = reqp->dr_ctx_paddr + dr_offset;
2018 chain_kaddr = reqp->dr_ctx_kaddr + dr_offset;
2021 * Bind the kernel address to the DMA handle. On x86, the actual
2022 * buffer is mapped into multiple physical addresses. On Sparc,
2023 * the actual buffer is mapped into a single address.
2025 rv = ddi_dma_addr_bind_handle(handle,
2026 NULL, kaddr, cnt, flags, DDI_DMA_DONTWAIT, NULL, &c, &nc);
2027 if (rv != DDI_DMA_MAPPED) {
2028 return (DDI_FAILURE);
2031 (void) ddi_dma_sync(handle, 0, cnt, DDI_DMA_SYNC_FORDEV);
2032 if ((rv = dca_check_dma_handle(reqp->dr_dca, handle,
2033 DCA_FM_ECLASS_NONE)) != DDI_SUCCESS) {
2034 reqp->destroy = TRUE;
2035 return (rv);
2038 *n_chain = nc;
2040 /* Setup the data buffer chain for DMA transfer */
2041 chain_kaddr_pre = NULL;
2042 head->dc_buffer_paddr = 0;
2043 head->dc_next_paddr = 0;
2044 head->dc_buffer_length = 0;
2045 for (i = 0; i < nc; i++) {
2046 /* PIO */
2047 PUTDESC32(reqp, chain_kaddr, DESC_BUFADDR, c.dmac_address);
2048 PUTDESC16(reqp, chain_kaddr, DESC_RSVD, 0);
2049 PUTDESC16(reqp, chain_kaddr, DESC_LENGTH, c.dmac_size);
2051 /* Remember the head of the chain */
2052 if (head->dc_buffer_paddr == 0) {
2053 head->dc_buffer_paddr = c.dmac_address;
2054 head->dc_buffer_length = c.dmac_size;
2057 /* Link to the previous one if one exists */
2058 if (chain_kaddr_pre) {
2059 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT,
2060 chain_paddr);
2061 if (head->dc_next_paddr == 0)
2062 head->dc_next_paddr = chain_paddr;
2064 chain_kaddr_pre = chain_kaddr;
2066 /* Maintain pointers */
2067 chain_paddr += DESC_SIZE;
2068 chain_kaddr += DESC_SIZE;
2070 /* Retrieve the next cookie if there is one */
2071 if (i < nc-1)
2072 ddi_dma_nextcookie(handle, &c);
2075 /* Set the next pointer in the last entry to NULL */
2076 PUTDESC32(reqp, chain_kaddr_pre, DESC_NEXT, 0);
2078 return (DDI_SUCCESS);
2082 * Schedule some work.
2085 dca_start(dca_t *dca, dca_request_t *reqp, int mcr, int dosched)
2087 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2089 mutex_enter(&wlp->dwl_lock);
2091 DBG(dca, DCHATTY, "req=%p, in=%p, out=%p, ctx=%p, ibuf=%p, obuf=%p",
2092 reqp, reqp->dr_in, reqp->dr_out, reqp->dr_ctx_kaddr,
2093 reqp->dr_ibuf_kaddr, reqp->dr_obuf_kaddr);
2094 DBG(dca, DCHATTY, "ctx paddr = %x, ibuf paddr = %x, obuf paddr = %x",
2095 reqp->dr_ctx_paddr, reqp->dr_ibuf_paddr, reqp->dr_obuf_paddr);
2096 /* sync out the entire context and descriptor chains */
2097 (void) ddi_dma_sync(reqp->dr_ctx_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2098 if (dca_check_dma_handle(dca, reqp->dr_ctx_dmah,
2099 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2100 reqp->destroy = TRUE;
2101 mutex_exit(&wlp->dwl_lock);
2102 return (CRYPTO_DEVICE_ERROR);
2105 dca_enqueue(&wlp->dwl_waitq, (dca_listnode_t *)reqp);
2106 wlp->dwl_count++;
2107 wlp->dwl_lastsubmit = ddi_get_lbolt();
2108 reqp->dr_wlp = wlp;
2110 if ((wlp->dwl_count == wlp->dwl_hiwater) && (wlp->dwl_busy == 0)) {
2111 /* we are fully loaded now, let kCF know */
2113 wlp->dwl_flowctl++;
2114 wlp->dwl_busy = 1;
2116 crypto_prov_notify(wlp->dwl_prov, CRYPTO_PROVIDER_BUSY);
2119 if (dosched) {
2120 #ifdef SCHEDDELAY
2121 /* possibly wait for more work to arrive */
2122 if (wlp->dwl_count >= wlp->dwl_reqspermcr) {
2123 dca_schedule(dca, mcr);
2124 } else if (!wlp->dwl_schedtid) {
2125 /* wait 1 msec for more work before doing it */
2126 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2127 (void *)wlp, drv_usectohz(MSEC));
2129 #else
2130 dca_schedule(dca, mcr);
2131 #endif
2133 mutex_exit(&wlp->dwl_lock);
2135 return (CRYPTO_QUEUED);
2138 void
2139 dca_schedule(dca_t *dca, int mcr)
2141 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2142 int csr;
2143 int full;
2144 uint32_t status;
2146 ASSERT(mutex_owned(&wlp->dwl_lock));
2148 * If the card is draining or has an outstanding failure,
2149 * don't schedule any more work on it right now
2151 if (wlp->dwl_drain || (dca->dca_flags & DCA_FAILED)) {
2152 return;
2155 if (mcr == MCR2) {
2156 csr = CSR_MCR2;
2157 full = DMASTAT_MCR2FULL;
2158 } else {
2159 csr = CSR_MCR1;
2160 full = DMASTAT_MCR1FULL;
2163 for (;;) {
2164 dca_work_t *workp;
2165 uint32_t offset;
2166 int nreqs;
2168 status = GETCSR(dca, CSR_DMASTAT);
2169 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2170 DCA_FM_ECLASS_NONE) != DDI_SUCCESS)
2171 return;
2173 if ((status & full) != 0)
2174 break;
2176 #ifdef SCHEDDELAY
2177 /* if there isn't enough to do, don't bother now */
2178 if ((wlp->dwl_count < wlp->dwl_reqspermcr) &&
2179 (ddi_get_lbolt() < (wlp->dwl_lastsubmit +
2180 drv_usectohz(MSEC)))) {
2181 /* wait a bit longer... */
2182 if (wlp->dwl_schedtid == 0) {
2183 wlp->dwl_schedtid = timeout(dca_schedtimeout,
2184 (void *)wlp, drv_usectohz(MSEC));
2186 return;
2188 #endif
2190 /* grab a work structure */
2191 workp = dca_getwork(dca, mcr);
2193 if (workp == NULL) {
2195 * There must be work ready to be reclaimed,
2196 * in this case, since the chip can only hold
2197 * less work outstanding than there are total.
2199 dca_reclaim(dca, mcr);
2200 continue;
2203 nreqs = 0;
2204 offset = MCR_CTXADDR;
2206 while (nreqs < wlp->dwl_reqspermcr) {
2207 dca_request_t *reqp;
2209 reqp = (dca_request_t *)dca_dequeue(&wlp->dwl_waitq);
2210 if (reqp == NULL) {
2211 /* nothing left to process */
2212 break;
2215 * Update flow control.
2217 wlp->dwl_count--;
2218 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2219 (wlp->dwl_busy)) {
2220 wlp->dwl_busy = 0;
2221 crypto_prov_notify(wlp->dwl_prov,
2222 CRYPTO_PROVIDER_READY);
2226 * Context address.
2228 PUTMCR32(workp, offset, reqp->dr_ctx_paddr);
2229 offset += 4;
2232 * Input chain.
2234 /* input buffer address */
2235 PUTMCR32(workp, offset, reqp->dr_in_paddr);
2236 offset += 4;
2237 /* next input buffer entry */
2238 PUTMCR32(workp, offset, reqp->dr_in_next);
2239 offset += 4;
2240 /* input buffer length */
2241 PUTMCR16(workp, offset, reqp->dr_in_len);
2242 offset += 2;
2243 /* zero the reserved field */
2244 PUTMCR16(workp, offset, 0);
2245 offset += 2;
2248 * Overall length.
2250 /* reserved field */
2251 PUTMCR16(workp, offset, 0);
2252 offset += 2;
2253 /* total packet length */
2254 PUTMCR16(workp, offset, reqp->dr_pkt_length);
2255 offset += 2;
2258 * Output chain.
2260 /* output buffer address */
2261 PUTMCR32(workp, offset, reqp->dr_out_paddr);
2262 offset += 4;
2263 /* next output buffer entry */
2264 PUTMCR32(workp, offset, reqp->dr_out_next);
2265 offset += 4;
2266 /* output buffer length */
2267 PUTMCR16(workp, offset, reqp->dr_out_len);
2268 offset += 2;
2269 /* zero the reserved field */
2270 PUTMCR16(workp, offset, 0);
2271 offset += 2;
2274 * Note submission.
2276 workp->dw_reqs[nreqs] = reqp;
2277 nreqs++;
2280 if (nreqs == 0) {
2281 /* nothing in the queue! */
2282 dca_freework(workp);
2283 return;
2286 wlp->dwl_submit++;
2288 PUTMCR16(workp, MCR_FLAGS, 0);
2289 PUTMCR16(workp, MCR_COUNT, nreqs);
2291 DBG(dca, DCHATTY,
2292 "posting work (phys %x, virt 0x%p) (%d reqs) to MCR%d",
2293 workp->dw_mcr_paddr, workp->dw_mcr_kaddr,
2294 nreqs, mcr);
2296 workp->dw_lbolt = ddi_get_lbolt();
2297 /* Make sure MCR is synced out to device. */
2298 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 0,
2299 DDI_DMA_SYNC_FORDEV);
2300 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2301 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2302 dca_destroywork(workp);
2303 return;
2306 PUTCSR(dca, csr, workp->dw_mcr_paddr);
2307 if (dca_check_acc_handle(dca, dca->dca_regs_handle,
2308 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2309 dca_destroywork(workp);
2310 return;
2311 } else {
2312 dca_enqueue(&wlp->dwl_runq, (dca_listnode_t *)workp);
2315 DBG(dca, DCHATTY, "posted");
2320 * Reclaim completed work, called in interrupt context.
2322 void
2323 dca_reclaim(dca_t *dca, int mcr)
2325 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2326 dca_work_t *workp;
2327 ushort_t flags;
2328 int nreclaimed = 0;
2329 int i;
2331 DBG(dca, DRECLAIM, "worklist = 0x%p (MCR%d)", wlp, mcr);
2332 ASSERT(mutex_owned(&wlp->dwl_lock));
2334 * For each MCR in the submitted (runq), we check to see if
2335 * it has been processed. If so, then we note each individual
2336 * job in the MCR, and and do the completion processing for
2337 * each of such job.
2339 for (;;) {
2341 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2342 if (workp == NULL) {
2343 break;
2346 /* only sync the MCR flags, since that's all we need */
2347 (void) ddi_dma_sync(workp->dw_mcr_dmah, 0, 4,
2348 DDI_DMA_SYNC_FORKERNEL);
2349 if (dca_check_dma_handle(dca, workp->dw_mcr_dmah,
2350 DCA_FM_ECLASS_NONE) != DDI_SUCCESS) {
2351 dca_rmqueue((dca_listnode_t *)workp);
2352 dca_destroywork(workp);
2353 return;
2356 flags = GETMCR16(workp, MCR_FLAGS);
2357 if ((flags & MCRFLAG_FINISHED) == 0) {
2358 /* chip is still working on it */
2359 DBG(dca, DRECLAIM,
2360 "chip still working on it (MCR%d)", mcr);
2361 break;
2364 /* its really for us, so remove it from the queue */
2365 dca_rmqueue((dca_listnode_t *)workp);
2367 /* if we were draining, signal on the cv */
2368 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2369 cv_signal(&wlp->dwl_cv);
2372 /* update statistics, done under the lock */
2373 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2374 dca_request_t *reqp = workp->dw_reqs[i];
2375 if (reqp == NULL) {
2376 continue;
2378 if (reqp->dr_byte_stat >= 0) {
2379 dca->dca_stats[reqp->dr_byte_stat] +=
2380 reqp->dr_pkt_length;
2382 if (reqp->dr_job_stat >= 0) {
2383 dca->dca_stats[reqp->dr_job_stat]++;
2386 mutex_exit(&wlp->dwl_lock);
2388 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2389 dca_request_t *reqp = workp->dw_reqs[i];
2391 if (reqp == NULL) {
2392 continue;
2395 /* Do the callback. */
2396 workp->dw_reqs[i] = NULL;
2397 dca_done(reqp, CRYPTO_SUCCESS);
2399 nreclaimed++;
2402 /* now we can release the work */
2403 dca_freework(workp);
2405 mutex_enter(&wlp->dwl_lock);
2407 DBG(dca, DRECLAIM, "reclaimed %d cmds", nreclaimed);
2411 dca_length(crypto_data_t *cdata)
2413 return (cdata->cd_length);
2417 * This is the callback function called from the interrupt when a kCF job
2418 * completes. It does some driver-specific things, and then calls the
2419 * kCF-provided callback. Finally, it cleans up the state for the work
2420 * request and drops the reference count to allow for DR.
2422 void
2423 dca_done(dca_request_t *reqp, int err)
2425 uint64_t ena = 0;
2427 /* unbind any chains we were using */
2428 if (dca_unbindchains(reqp) != DDI_SUCCESS) {
2429 /* DMA failure */
2430 ena = dca_ena(ena);
2431 dca_failure(reqp->dr_dca, DDI_DATAPATH_FAULT,
2432 DCA_FM_ECLASS_NONE, ena, CRYPTO_DEVICE_ERROR,
2433 "fault on buffer DMA handle");
2434 if (err == CRYPTO_SUCCESS) {
2435 err = CRYPTO_DEVICE_ERROR;
2439 if (reqp->dr_callback != NULL) {
2440 reqp->dr_callback(reqp, err);
2441 } else {
2442 dca_freereq(reqp);
2447 * Call this when a failure is detected. It will reset the chip,
2448 * log a message, alert kCF, and mark jobs in the runq as failed.
2450 /* ARGSUSED */
2451 void
2452 dca_failure(dca_t *dca, ddi_fault_location_t loc, dca_fma_eclass_t index,
2453 uint64_t ena, int errno, char *mess, ...)
2455 va_list ap;
2456 char buf[256];
2457 int mcr;
2458 char *eclass;
2459 int have_mutex;
2461 va_start(ap, mess);
2462 (void) vsprintf(buf, mess, ap);
2463 va_end(ap);
2465 eclass = dca_fma_eclass_string(dca->dca_model, index);
2467 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) &&
2468 index != DCA_FM_ECLASS_NONE) {
2469 ddi_fm_ereport_post(dca->dca_dip, eclass, ena,
2470 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
2471 FM_EREPORT_VERS0, NULL);
2473 /* Report the impact of the failure to the DDI. */
2474 ddi_fm_service_impact(dca->dca_dip, DDI_SERVICE_LOST);
2475 } else {
2476 /* Just log the error string to the message log */
2477 dca_error(dca, buf);
2481 * Indicate a failure (keeps schedule from running).
2483 dca->dca_flags |= DCA_FAILED;
2486 * Reset the chip. This should also have as a side effect, the
2487 * disabling of all interrupts from the device.
2489 (void) dca_reset(dca, 1);
2492 * Report the failure to kCF.
2494 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2495 if (WORKLIST(dca, mcr)->dwl_prov) {
2496 crypto_prov_notify(WORKLIST(dca, mcr)->dwl_prov,
2497 CRYPTO_PROVIDER_FAILED);
2502 * Return jobs not sent to hardware back to kCF.
2504 dca_rejectjobs(dca);
2507 * From this point on, no new work should be arriving, and the
2508 * chip should not be doing any active DMA.
2512 * Now find all the work submitted to the device and fail
2513 * them.
2515 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2516 dca_worklist_t *wlp;
2517 int i;
2519 wlp = WORKLIST(dca, mcr);
2521 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2522 continue;
2524 for (;;) {
2525 dca_work_t *workp;
2527 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2528 workp = (dca_work_t *)dca_dequeue(&wlp->dwl_runq);
2529 if (workp == NULL) {
2530 if (have_mutex)
2531 mutex_exit(&wlp->dwl_lock);
2532 break;
2534 mutex_exit(&wlp->dwl_lock);
2537 * Free up requests
2539 for (i = 0; i < wlp->dwl_reqspermcr; i++) {
2540 dca_request_t *reqp = workp->dw_reqs[i];
2541 if (reqp) {
2542 dca_done(reqp, errno);
2543 workp->dw_reqs[i] = NULL;
2547 mutex_enter(&wlp->dwl_lock);
2549 * If waiting to drain, signal on the waiter.
2551 if (wlp->dwl_drain && QEMPTY(&wlp->dwl_runq)) {
2552 cv_signal(&wlp->dwl_cv);
2556 * Return the work and request structures to
2557 * the free pool.
2559 dca_freework(workp);
2560 if (have_mutex)
2561 mutex_exit(&wlp->dwl_lock);
2567 #ifdef SCHEDDELAY
2569 * Reschedule worklist as needed.
2571 void
2572 dca_schedtimeout(void *arg)
2574 dca_worklist_t *wlp = (dca_worklist_t *)arg;
2575 mutex_enter(&wlp->dwl_lock);
2576 wlp->dwl_schedtid = 0;
2577 dca_schedule(wlp->dwl_dca, wlp->dwl_mcr);
2578 mutex_exit(&wlp->dwl_lock);
2580 #endif
2583 * Check for stalled jobs.
2585 void
2586 dca_jobtimeout(void *arg)
2588 int mcr;
2589 dca_t *dca = (dca_t *)arg;
2590 int hung = 0;
2592 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2593 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2594 dca_work_t *workp;
2595 clock_t when;
2597 mutex_enter(&wlp->dwl_lock);
2598 when = ddi_get_lbolt();
2600 workp = (dca_work_t *)dca_peekqueue(&wlp->dwl_runq);
2601 if (workp == NULL) {
2602 /* nothing sitting in the queue */
2603 mutex_exit(&wlp->dwl_lock);
2604 continue;
2607 if ((when - workp->dw_lbolt) < drv_usectohz(STALETIME)) {
2608 /* request has been queued for less than STALETIME */
2609 mutex_exit(&wlp->dwl_lock);
2610 continue;
2613 /* job has been sitting around for over 1 second, badness */
2614 DBG(dca, DWARN, "stale job (0x%p) found in MCR%d!", workp,
2615 mcr);
2617 /* put it back in the queue, until we reset the chip */
2618 hung++;
2619 mutex_exit(&wlp->dwl_lock);
2622 if (hung) {
2623 dca_failure(dca, DDI_DEVICE_FAULT,
2624 DCA_FM_ECLASS_HW_TIMEOUT, dca_ena(0), CRYPTO_DEVICE_ERROR,
2625 "timeout processing job.)");
2628 /* reschedule ourself */
2629 mutex_enter(&dca->dca_intrlock);
2630 if (dca->dca_jobtid == 0) {
2631 /* timeout has been canceled, prior to DR */
2632 mutex_exit(&dca->dca_intrlock);
2633 return;
2636 /* check again in 1 second */
2637 dca->dca_jobtid = timeout(dca_jobtimeout, arg,
2638 drv_usectohz(SECOND));
2639 mutex_exit(&dca->dca_intrlock);
2643 * This returns all jobs back to kCF. It assumes that processing
2644 * on the worklist has halted.
2646 void
2647 dca_rejectjobs(dca_t *dca)
2649 int mcr;
2650 int have_mutex;
2651 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2652 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2653 dca_request_t *reqp;
2655 if (wlp == NULL || wlp->dwl_waitq.dl_prev == NULL) {
2656 continue;
2658 have_mutex = mutex_tryenter(&wlp->dwl_lock);
2659 for (;;) {
2660 reqp = (dca_request_t *)dca_unqueue(&wlp->dwl_waitq);
2661 if (reqp == NULL) {
2662 break;
2664 /* update flow control */
2665 wlp->dwl_count--;
2666 if ((wlp->dwl_count == wlp->dwl_lowater) &&
2667 (wlp->dwl_busy)) {
2668 wlp->dwl_busy = 0;
2669 crypto_prov_notify(wlp->dwl_prov,
2670 CRYPTO_PROVIDER_READY);
2672 mutex_exit(&wlp->dwl_lock);
2674 (void) dca_unbindchains(reqp);
2675 reqp->dr_callback(reqp, EAGAIN);
2676 mutex_enter(&wlp->dwl_lock);
2678 if (have_mutex)
2679 mutex_exit(&wlp->dwl_lock);
2684 dca_drain(dca_t *dca)
2686 int mcr;
2687 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2688 #ifdef SCHEDDELAY
2689 timeout_id_t tid;
2690 #endif
2691 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2693 mutex_enter(&wlp->dwl_lock);
2694 wlp->dwl_drain = 1;
2696 /* give it up to a second to drain from the chip */
2697 if (!QEMPTY(&wlp->dwl_runq)) {
2698 (void) cv_reltimedwait(&wlp->dwl_cv, &wlp->dwl_lock,
2699 drv_usectohz(STALETIME), TR_CLOCK_TICK);
2701 if (!QEMPTY(&wlp->dwl_runq)) {
2702 dca_error(dca, "unable to drain device");
2703 mutex_exit(&wlp->dwl_lock);
2704 dca_undrain(dca);
2705 return (EBUSY);
2709 #ifdef SCHEDDELAY
2710 tid = wlp->dwl_schedtid;
2711 mutex_exit(&wlp->dwl_lock);
2714 * untimeout outside the lock -- this is safe because we
2715 * have set the drain flag, so dca_schedule() will not
2716 * reschedule another timeout
2718 if (tid) {
2719 untimeout(tid);
2721 #else
2722 mutex_exit(&wlp->dwl_lock);
2723 #endif
2725 return (0);
2728 void
2729 dca_undrain(dca_t *dca)
2731 int mcr;
2733 for (mcr = MCR1; mcr <= MCR2; mcr++) {
2734 dca_worklist_t *wlp = WORKLIST(dca, mcr);
2735 mutex_enter(&wlp->dwl_lock);
2736 wlp->dwl_drain = 0;
2737 dca_schedule(dca, mcr);
2738 mutex_exit(&wlp->dwl_lock);
2743 * Duplicate the crypto_data_t structure, but point to the original
2744 * buffers.
2747 dca_dupcrypto(crypto_data_t *input, crypto_data_t *ninput)
2749 ninput->cd_format = input->cd_format;
2750 ninput->cd_offset = input->cd_offset;
2751 ninput->cd_length = input->cd_length;
2752 ninput->cd_miscdata = input->cd_miscdata;
2754 switch (input->cd_format) {
2755 case CRYPTO_DATA_RAW:
2756 ninput->cd_raw.iov_base = input->cd_raw.iov_base;
2757 ninput->cd_raw.iov_len = input->cd_raw.iov_len;
2758 break;
2760 case CRYPTO_DATA_UIO:
2761 ninput->cd_uio = input->cd_uio;
2762 break;
2764 case CRYPTO_DATA_MBLK:
2765 ninput->cd_mp = input->cd_mp;
2766 break;
2768 default:
2769 DBG(NULL, DWARN,
2770 "dca_dupcrypto: unrecognised crypto data format");
2771 return (CRYPTO_FAILED);
2774 return (CRYPTO_SUCCESS);
2778 * Performs validation checks on the input and output data structures.
2781 dca_verifyio(crypto_data_t *input, crypto_data_t *output)
2783 int rv = CRYPTO_SUCCESS;
2785 switch (input->cd_format) {
2786 case CRYPTO_DATA_RAW:
2787 break;
2789 case CRYPTO_DATA_UIO:
2790 /* we support only kernel buffer */
2791 if (input->cd_uio->uio_segflg != UIO_SYSSPACE) {
2792 DBG(NULL, DWARN, "non kernel input uio buffer");
2793 rv = CRYPTO_ARGUMENTS_BAD;
2795 break;
2797 case CRYPTO_DATA_MBLK:
2798 break;
2800 default:
2801 DBG(NULL, DWARN, "unrecognised input crypto data format");
2802 rv = CRYPTO_ARGUMENTS_BAD;
2805 switch (output->cd_format) {
2806 case CRYPTO_DATA_RAW:
2807 break;
2809 case CRYPTO_DATA_UIO:
2810 /* we support only kernel buffer */
2811 if (output->cd_uio->uio_segflg != UIO_SYSSPACE) {
2812 DBG(NULL, DWARN, "non kernel output uio buffer");
2813 rv = CRYPTO_ARGUMENTS_BAD;
2815 break;
2817 case CRYPTO_DATA_MBLK:
2818 break;
2820 default:
2821 DBG(NULL, DWARN, "unrecognised output crypto data format");
2822 rv = CRYPTO_ARGUMENTS_BAD;
2825 return (rv);
2829 * data: source crypto_data_t struct
2830 * off: offset into the source before commencing copy
2831 * count: the amount of data to copy
2832 * dest: destination buffer
2835 dca_getbufbytes(crypto_data_t *data, size_t off, int count, uchar_t *dest)
2837 int rv = CRYPTO_SUCCESS;
2838 uio_t *uiop;
2839 uint_t vec_idx;
2840 size_t cur_len;
2841 mblk_t *mp;
2843 if (count == 0) {
2844 /* We don't want anything so we're done. */
2845 return (rv);
2849 * Sanity check that we haven't specified a length greater than the
2850 * offset adjusted size of the buffer.
2852 if (count > (data->cd_length - off)) {
2853 return (CRYPTO_DATA_LEN_RANGE);
2856 /* Add the internal crypto_data offset to the requested offset. */
2857 off += data->cd_offset;
2859 switch (data->cd_format) {
2860 case CRYPTO_DATA_RAW:
2861 bcopy(data->cd_raw.iov_base + off, dest, count);
2862 break;
2864 case CRYPTO_DATA_UIO:
2866 * Jump to the first iovec containing data to be
2867 * processed.
2869 uiop = data->cd_uio;
2870 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
2871 off >= uiop->uio_iov[vec_idx].iov_len;
2872 off -= uiop->uio_iov[vec_idx++].iov_len)
2874 if (vec_idx == uiop->uio_iovcnt) {
2876 * The caller specified an offset that is larger than
2877 * the total size of the buffers it provided.
2879 return (CRYPTO_DATA_LEN_RANGE);
2883 * Now process the iovecs.
2885 while (vec_idx < uiop->uio_iovcnt && count > 0) {
2886 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
2887 off, count);
2888 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
2889 cur_len);
2890 count -= cur_len;
2891 dest += cur_len;
2892 vec_idx++;
2893 off = 0;
2896 if (vec_idx == uiop->uio_iovcnt && count > 0) {
2898 * The end of the specified iovec's was reached but
2899 * the length requested could not be processed
2900 * (requested to digest more data than it provided).
2902 return (CRYPTO_DATA_LEN_RANGE);
2904 break;
2906 case CRYPTO_DATA_MBLK:
2908 * Jump to the first mblk_t containing data to be processed.
2910 for (mp = data->cd_mp; mp != NULL && off >= MBLKL(mp);
2911 off -= MBLKL(mp), mp = mp->b_cont)
2913 if (mp == NULL) {
2915 * The caller specified an offset that is larger than
2916 * the total size of the buffers it provided.
2918 return (CRYPTO_DATA_LEN_RANGE);
2922 * Now do the processing on the mblk chain.
2924 while (mp != NULL && count > 0) {
2925 cur_len = min(MBLKL(mp) - off, count);
2926 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
2927 count -= cur_len;
2928 dest += cur_len;
2929 mp = mp->b_cont;
2930 off = 0;
2933 if (mp == NULL && count > 0) {
2935 * The end of the mblk was reached but the length
2936 * requested could not be processed, (requested to
2937 * digest more data than it provided).
2939 return (CRYPTO_DATA_LEN_RANGE);
2941 break;
2943 default:
2944 DBG(NULL, DWARN, "unrecognised crypto data format");
2945 rv = CRYPTO_ARGUMENTS_BAD;
2947 return (rv);
2952 * Performs the input, output or hard scatter/gather checks on the specified
2953 * crypto_data_t struct. Returns true if the data is scatter/gather in nature
2954 * ie fails the test.
2957 dca_sgcheck(dca_t *dca, crypto_data_t *data, dca_sg_param_t val)
2959 uio_t *uiop;
2960 mblk_t *mp;
2961 int rv = FALSE;
2963 switch (val) {
2964 case DCA_SG_CONTIG:
2966 * Check for a contiguous data buffer.
2968 switch (data->cd_format) {
2969 case CRYPTO_DATA_RAW:
2970 /* Contiguous in nature */
2971 break;
2973 case CRYPTO_DATA_UIO:
2974 if (data->cd_uio->uio_iovcnt > 1)
2975 rv = TRUE;
2976 break;
2978 case CRYPTO_DATA_MBLK:
2979 mp = data->cd_mp;
2980 if (mp->b_cont != NULL)
2981 rv = TRUE;
2982 break;
2984 default:
2985 DBG(NULL, DWARN, "unrecognised crypto data format");
2987 break;
2989 case DCA_SG_WALIGN:
2991 * Check for a contiguous data buffer that is 32-bit word
2992 * aligned and is of word multiples in size.
2994 switch (data->cd_format) {
2995 case CRYPTO_DATA_RAW:
2996 if ((data->cd_raw.iov_len % sizeof (uint32_t)) ||
2997 ((uintptr_t)data->cd_raw.iov_base %
2998 sizeof (uint32_t))) {
2999 rv = TRUE;
3001 break;
3003 case CRYPTO_DATA_UIO:
3004 uiop = data->cd_uio;
3005 if (uiop->uio_iovcnt > 1) {
3006 return (TRUE);
3008 /* So there is only one iovec */
3009 if ((uiop->uio_iov[0].iov_len % sizeof (uint32_t)) ||
3010 ((uintptr_t)uiop->uio_iov[0].iov_base %
3011 sizeof (uint32_t))) {
3012 rv = TRUE;
3014 break;
3016 case CRYPTO_DATA_MBLK:
3017 mp = data->cd_mp;
3018 if (mp->b_cont != NULL) {
3019 return (TRUE);
3021 /* So there is only one mblk in the chain */
3022 if ((MBLKL(mp) % sizeof (uint32_t)) ||
3023 ((uintptr_t)mp->b_rptr % sizeof (uint32_t))) {
3024 rv = TRUE;
3026 break;
3028 default:
3029 DBG(NULL, DWARN, "unrecognised crypto data format");
3031 break;
3033 case DCA_SG_PALIGN:
3035 * Check that the data buffer is page aligned and is of
3036 * page multiples in size.
3038 switch (data->cd_format) {
3039 case CRYPTO_DATA_RAW:
3040 if ((data->cd_length % dca->dca_pagesize) ||
3041 ((uintptr_t)data->cd_raw.iov_base %
3042 dca->dca_pagesize)) {
3043 rv = TRUE;
3045 break;
3047 case CRYPTO_DATA_UIO:
3048 uiop = data->cd_uio;
3049 if ((uiop->uio_iov[0].iov_len % dca->dca_pagesize) ||
3050 ((uintptr_t)uiop->uio_iov[0].iov_base %
3051 dca->dca_pagesize)) {
3052 rv = TRUE;
3054 break;
3056 case CRYPTO_DATA_MBLK:
3057 mp = data->cd_mp;
3058 if ((MBLKL(mp) % dca->dca_pagesize) ||
3059 ((uintptr_t)mp->b_rptr % dca->dca_pagesize)) {
3060 rv = TRUE;
3062 break;
3064 default:
3065 DBG(NULL, DWARN, "unrecognised crypto data format");
3067 break;
3069 default:
3070 DBG(NULL, DWARN, "unrecognised scatter/gather param type");
3073 return (rv);
3077 * Increments the cd_offset and decrements the cd_length as the data is
3078 * gathered from the crypto_data_t struct.
3079 * The data is reverse-copied into the dest buffer if the flag is true.
3082 dca_gather(crypto_data_t *in, char *dest, int count, int reverse)
3084 int rv = CRYPTO_SUCCESS;
3085 uint_t vec_idx;
3086 uio_t *uiop;
3087 off_t off = in->cd_offset;
3088 size_t cur_len;
3089 mblk_t *mp;
3091 switch (in->cd_format) {
3092 case CRYPTO_DATA_RAW:
3093 if (count > in->cd_length) {
3095 * The caller specified a length greater than the
3096 * size of the buffer.
3098 return (CRYPTO_DATA_LEN_RANGE);
3100 if (reverse)
3101 dca_reverse(in->cd_raw.iov_base + off, dest, count,
3102 count);
3103 else
3104 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3105 in->cd_offset += count;
3106 in->cd_length -= count;
3107 break;
3109 case CRYPTO_DATA_UIO:
3111 * Jump to the first iovec containing data to be processed.
3113 uiop = in->cd_uio;
3114 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3115 off >= uiop->uio_iov[vec_idx].iov_len;
3116 off -= uiop->uio_iov[vec_idx++].iov_len)
3118 if (vec_idx == uiop->uio_iovcnt) {
3120 * The caller specified an offset that is larger than
3121 * the total size of the buffers it provided.
3123 return (CRYPTO_DATA_LEN_RANGE);
3127 * Now process the iovecs.
3129 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3130 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3131 off, count);
3132 count -= cur_len;
3133 if (reverse) {
3134 /* Fill the dest buffer from the end */
3135 dca_reverse(uiop->uio_iov[vec_idx].iov_base +
3136 off, dest+count, cur_len, cur_len);
3137 } else {
3138 bcopy(uiop->uio_iov[vec_idx].iov_base + off,
3139 dest, cur_len);
3140 dest += cur_len;
3142 in->cd_offset += cur_len;
3143 in->cd_length -= cur_len;
3144 vec_idx++;
3145 off = 0;
3148 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3150 * The end of the specified iovec's was reached but
3151 * the length requested could not be processed
3152 * (requested to digest more data than it provided).
3154 return (CRYPTO_DATA_LEN_RANGE);
3156 break;
3158 case CRYPTO_DATA_MBLK:
3160 * Jump to the first mblk_t containing data to be processed.
3162 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3163 off -= MBLKL(mp), mp = mp->b_cont)
3165 if (mp == NULL) {
3167 * The caller specified an offset that is larger than
3168 * the total size of the buffers it provided.
3170 return (CRYPTO_DATA_LEN_RANGE);
3174 * Now do the processing on the mblk chain.
3176 while (mp != NULL && count > 0) {
3177 cur_len = min(MBLKL(mp) - off, count);
3178 count -= cur_len;
3179 if (reverse) {
3180 /* Fill the dest buffer from the end */
3181 dca_reverse((char *)(mp->b_rptr + off),
3182 dest+count, cur_len, cur_len);
3183 } else {
3184 bcopy((char *)(mp->b_rptr + off), dest,
3185 cur_len);
3186 dest += cur_len;
3188 in->cd_offset += cur_len;
3189 in->cd_length -= cur_len;
3190 mp = mp->b_cont;
3191 off = 0;
3194 if (mp == NULL && count > 0) {
3196 * The end of the mblk was reached but the length
3197 * requested could not be processed, (requested to
3198 * digest more data than it provided).
3200 return (CRYPTO_DATA_LEN_RANGE);
3202 break;
3204 default:
3205 DBG(NULL, DWARN, "dca_gather: unrecognised crypto data format");
3206 rv = CRYPTO_ARGUMENTS_BAD;
3208 return (rv);
3212 * Increments the cd_offset and decrements the cd_length as the data is
3213 * gathered from the crypto_data_t struct.
3216 dca_resid_gather(crypto_data_t *in, char *resid, int *residlen, char *dest,
3217 int count)
3219 int rv = CRYPTO_SUCCESS;
3220 caddr_t baddr;
3221 uint_t vec_idx;
3222 uio_t *uiop;
3223 off_t off = in->cd_offset;
3224 size_t cur_len;
3225 mblk_t *mp;
3227 /* Process the residual first */
3228 if (*residlen > 0) {
3229 uint_t num = min(count, *residlen);
3230 bcopy(resid, dest, num);
3231 *residlen -= num;
3232 if (*residlen > 0) {
3234 * Requested amount 'count' is less than what's in
3235 * the residual, so shuffle any remaining resid to
3236 * the front.
3238 baddr = resid + num;
3239 bcopy(baddr, resid, *residlen);
3241 dest += num;
3242 count -= num;
3245 /* Now process what's in the crypto_data_t structs */
3246 switch (in->cd_format) {
3247 case CRYPTO_DATA_RAW:
3248 if (count > in->cd_length) {
3250 * The caller specified a length greater than the
3251 * size of the buffer.
3253 return (CRYPTO_DATA_LEN_RANGE);
3255 bcopy(in->cd_raw.iov_base + in->cd_offset, dest, count);
3256 in->cd_offset += count;
3257 in->cd_length -= count;
3258 break;
3260 case CRYPTO_DATA_UIO:
3262 * Jump to the first iovec containing data to be processed.
3264 uiop = in->cd_uio;
3265 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3266 off >= uiop->uio_iov[vec_idx].iov_len;
3267 off -= uiop->uio_iov[vec_idx++].iov_len)
3269 if (vec_idx == uiop->uio_iovcnt) {
3271 * The caller specified an offset that is larger than
3272 * the total size of the buffers it provided.
3274 return (CRYPTO_DATA_LEN_RANGE);
3278 * Now process the iovecs.
3280 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3281 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3282 off, count);
3283 bcopy(uiop->uio_iov[vec_idx].iov_base + off, dest,
3284 cur_len);
3285 count -= cur_len;
3286 dest += cur_len;
3287 in->cd_offset += cur_len;
3288 in->cd_length -= cur_len;
3289 vec_idx++;
3290 off = 0;
3293 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3295 * The end of the specified iovec's was reached but
3296 * the length requested could not be processed
3297 * (requested to digest more data than it provided).
3299 return (CRYPTO_DATA_LEN_RANGE);
3301 break;
3303 case CRYPTO_DATA_MBLK:
3305 * Jump to the first mblk_t containing data to be processed.
3307 for (mp = in->cd_mp; mp != NULL && off >= MBLKL(mp);
3308 off -= MBLKL(mp), mp = mp->b_cont)
3310 if (mp == NULL) {
3312 * The caller specified an offset that is larger than
3313 * the total size of the buffers it provided.
3315 return (CRYPTO_DATA_LEN_RANGE);
3319 * Now do the processing on the mblk chain.
3321 while (mp != NULL && count > 0) {
3322 cur_len = min(MBLKL(mp) - off, count);
3323 bcopy((char *)(mp->b_rptr + off), dest, cur_len);
3324 count -= cur_len;
3325 dest += cur_len;
3326 in->cd_offset += cur_len;
3327 in->cd_length -= cur_len;
3328 mp = mp->b_cont;
3329 off = 0;
3332 if (mp == NULL && count > 0) {
3334 * The end of the mblk was reached but the length
3335 * requested could not be processed, (requested to
3336 * digest more data than it provided).
3338 return (CRYPTO_DATA_LEN_RANGE);
3340 break;
3342 default:
3343 DBG(NULL, DWARN,
3344 "dca_resid_gather: unrecognised crypto data format");
3345 rv = CRYPTO_ARGUMENTS_BAD;
3347 return (rv);
3351 * Appends the data to the crypto_data_t struct increasing cd_length.
3352 * cd_offset is left unchanged.
3353 * Data is reverse-copied if the flag is TRUE.
3356 dca_scatter(const char *src, crypto_data_t *out, int count, int reverse)
3358 int rv = CRYPTO_SUCCESS;
3359 off_t offset = out->cd_offset + out->cd_length;
3360 uint_t vec_idx;
3361 uio_t *uiop;
3362 size_t cur_len;
3363 mblk_t *mp;
3365 switch (out->cd_format) {
3366 case CRYPTO_DATA_RAW:
3367 if (out->cd_raw.iov_len - offset < count) {
3368 /* Trying to write out more than space available. */
3369 return (CRYPTO_DATA_LEN_RANGE);
3371 if (reverse)
3372 dca_reverse((void*) src, out->cd_raw.iov_base + offset,
3373 count, count);
3374 else
3375 bcopy(src, out->cd_raw.iov_base + offset, count);
3376 out->cd_length += count;
3377 break;
3379 case CRYPTO_DATA_UIO:
3381 * Jump to the first iovec that can be written to.
3383 uiop = out->cd_uio;
3384 for (vec_idx = 0; vec_idx < uiop->uio_iovcnt &&
3385 offset >= uiop->uio_iov[vec_idx].iov_len;
3386 offset -= uiop->uio_iov[vec_idx++].iov_len)
3388 if (vec_idx == uiop->uio_iovcnt) {
3390 * The caller specified an offset that is larger than
3391 * the total size of the buffers it provided.
3393 return (CRYPTO_DATA_LEN_RANGE);
3397 * Now process the iovecs.
3399 while (vec_idx < uiop->uio_iovcnt && count > 0) {
3400 cur_len = min(uiop->uio_iov[vec_idx].iov_len -
3401 offset, count);
3402 count -= cur_len;
3403 if (reverse) {
3404 dca_reverse((void*) (src+count),
3405 uiop->uio_iov[vec_idx].iov_base +
3406 offset, cur_len, cur_len);
3407 } else {
3408 bcopy(src, uiop->uio_iov[vec_idx].iov_base +
3409 offset, cur_len);
3410 src += cur_len;
3412 out->cd_length += cur_len;
3413 vec_idx++;
3414 offset = 0;
3417 if (vec_idx == uiop->uio_iovcnt && count > 0) {
3419 * The end of the specified iovec's was reached but
3420 * the length requested could not be processed
3421 * (requested to write more data than space provided).
3423 return (CRYPTO_DATA_LEN_RANGE);
3425 break;
3427 case CRYPTO_DATA_MBLK:
3429 * Jump to the first mblk_t that can be written to.
3431 for (mp = out->cd_mp; mp != NULL && offset >= MBLKL(mp);
3432 offset -= MBLKL(mp), mp = mp->b_cont)
3434 if (mp == NULL) {
3436 * The caller specified an offset that is larger than
3437 * the total size of the buffers it provided.
3439 return (CRYPTO_DATA_LEN_RANGE);
3443 * Now do the processing on the mblk chain.
3445 while (mp != NULL && count > 0) {
3446 cur_len = min(MBLKL(mp) - offset, count);
3447 count -= cur_len;
3448 if (reverse) {
3449 dca_reverse((void*) (src+count),
3450 (char *)(mp->b_rptr + offset), cur_len,
3451 cur_len);
3452 } else {
3453 bcopy(src, (char *)(mp->b_rptr + offset),
3454 cur_len);
3455 src += cur_len;
3457 out->cd_length += cur_len;
3458 mp = mp->b_cont;
3459 offset = 0;
3462 if (mp == NULL && count > 0) {
3464 * The end of the mblk was reached but the length
3465 * requested could not be processed, (requested to
3466 * digest more data than it provided).
3468 return (CRYPTO_DATA_LEN_RANGE);
3470 break;
3472 default:
3473 DBG(NULL, DWARN, "unrecognised crypto data format");
3474 rv = CRYPTO_ARGUMENTS_BAD;
3476 return (rv);
3480 * Compare two byte arrays in reverse order.
3481 * Return 0 if they are identical, 1 otherwise.
3484 dca_bcmp_reverse(const void *s1, const void *s2, size_t n)
3486 int i;
3487 caddr_t src, dst;
3489 if (!n)
3490 return (0);
3492 src = ((caddr_t)s1) + n - 1;
3493 dst = (caddr_t)s2;
3494 for (i = 0; i < n; i++) {
3495 if (*src != *dst)
3496 return (1);
3497 src--;
3498 dst++;
3501 return (0);
3506 * This calculates the size of a bignum in bits, specifically not counting
3507 * leading zero bits. This size calculation must be done *before* any
3508 * endian reversal takes place (i.e. the numbers are in absolute big-endian
3509 * order.)
3512 dca_bitlen(unsigned char *bignum, int bytelen)
3514 unsigned char msbyte;
3515 int i, j;
3517 for (i = 0; i < bytelen - 1; i++) {
3518 if (bignum[i] != 0) {
3519 break;
3522 msbyte = bignum[i];
3523 for (j = 8; j > 1; j--) {
3524 if (msbyte & 0x80) {
3525 break;
3527 msbyte <<= 1;
3529 return ((8 * (bytelen - i - 1)) + j);
3533 * This compares to bignums (in big-endian order). It ignores leading
3534 * null bytes. The result semantics follow bcmp, mempcmp, strcmp, etc.
3537 dca_numcmp(caddr_t n1, int n1len, caddr_t n2, int n2len)
3539 while ((n1len > 1) && (*n1 == 0)) {
3540 n1len--;
3541 n1++;
3543 while ((n2len > 1) && (*n2 == 0)) {
3544 n2len--;
3545 n2++;
3547 if (n1len != n2len) {
3548 return (n1len - n2len);
3550 while ((n1len > 1) && (*n1 == *n2)) {
3551 n1++;
3552 n2++;
3553 n1len--;
3555 return ((int)(*(uchar_t *)n1) - (int)(*(uchar_t *)n2));
3559 * Return array of key attributes.
3561 crypto_object_attribute_t *
3562 dca_get_key_attr(crypto_key_t *key)
3564 if ((key->ck_format != CRYPTO_KEY_ATTR_LIST) ||
3565 (key->ck_count == 0)) {
3566 return (NULL);
3569 return (key->ck_attrs);
3573 * If attribute type exists valp points to it's 32-bit value.
3576 dca_attr_lookup_uint32(crypto_object_attribute_t *attrp, uint_t atnum,
3577 uint64_t atype, uint32_t *valp)
3579 crypto_object_attribute_t *bap;
3581 bap = dca_find_attribute(attrp, atnum, atype);
3582 if (bap == NULL) {
3583 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3586 *valp = *bap->oa_value;
3588 return (CRYPTO_SUCCESS);
3592 * If attribute type exists data contains the start address of the value,
3593 * and numelems contains it's length.
3596 dca_attr_lookup_uint8_array(crypto_object_attribute_t *attrp, uint_t atnum,
3597 uint64_t atype, void **data, unsigned int *numelems)
3599 crypto_object_attribute_t *bap;
3601 bap = dca_find_attribute(attrp, atnum, atype);
3602 if (bap == NULL) {
3603 return (CRYPTO_ATTRIBUTE_TYPE_INVALID);
3606 *data = bap->oa_value;
3607 *numelems = bap->oa_value_len;
3609 return (CRYPTO_SUCCESS);
3613 * Finds entry of specified name. If it is not found dca_find_attribute returns
3614 * NULL.
3616 crypto_object_attribute_t *
3617 dca_find_attribute(crypto_object_attribute_t *attrp, uint_t atnum,
3618 uint64_t atype)
3620 while (atnum) {
3621 if (attrp->oa_type == atype)
3622 return (attrp);
3623 atnum--;
3624 attrp++;
3626 return (NULL);
3630 * Return the address of the first data buffer. If the data format is
3631 * unrecognised return NULL.
3633 caddr_t
3634 dca_bufdaddr(crypto_data_t *data)
3636 switch (data->cd_format) {
3637 case CRYPTO_DATA_RAW:
3638 return (data->cd_raw.iov_base + data->cd_offset);
3639 case CRYPTO_DATA_UIO:
3640 return (data->cd_uio->uio_iov[0].iov_base + data->cd_offset);
3641 case CRYPTO_DATA_MBLK:
3642 return ((char *)data->cd_mp->b_rptr + data->cd_offset);
3643 default:
3644 DBG(NULL, DWARN,
3645 "dca_bufdaddr: unrecognised crypto data format");
3646 return (NULL);
3650 static caddr_t
3651 dca_bufdaddr_out(crypto_data_t *data)
3653 size_t offset = data->cd_offset + data->cd_length;
3655 switch (data->cd_format) {
3656 case CRYPTO_DATA_RAW:
3657 return (data->cd_raw.iov_base + offset);
3658 case CRYPTO_DATA_UIO:
3659 return (data->cd_uio->uio_iov[0].iov_base + offset);
3660 case CRYPTO_DATA_MBLK:
3661 return ((char *)data->cd_mp->b_rptr + offset);
3662 default:
3663 DBG(NULL, DWARN,
3664 "dca_bufdaddr_out: unrecognised crypto data format");
3665 return (NULL);
3670 * Control entry points.
3673 /* ARGSUSED */
3674 static void
3675 dca_provider_status(crypto_provider_handle_t provider, uint_t *status)
3677 *status = CRYPTO_PROVIDER_READY;
3681 * Cipher (encrypt/decrypt) entry points.
3684 /* ARGSUSED */
3685 static int
3686 dca_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3687 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3688 crypto_req_handle_t req)
3690 int error = CRYPTO_FAILED;
3691 dca_t *softc;
3693 softc = DCA_SOFTC_FROM_CTX(ctx);
3694 DBG(softc, DENTRY, "dca_encrypt_init: started");
3696 /* check mechanism */
3697 switch (mechanism->cm_type) {
3698 case DES_CBC_MECH_INFO_TYPE:
3699 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3700 DR_ENCRYPT);
3701 break;
3702 case DES3_CBC_MECH_INFO_TYPE:
3703 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3704 DR_ENCRYPT | DR_TRIPLE);
3705 break;
3706 case RSA_PKCS_MECH_INFO_TYPE:
3707 case RSA_X_509_MECH_INFO_TYPE:
3708 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3709 break;
3710 default:
3711 cmn_err(CE_WARN, "dca_encrypt_init: unexpected mech type "
3712 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3713 error = CRYPTO_MECHANISM_INVALID;
3716 DBG(softc, DENTRY, "dca_encrypt_init: done, err = 0x%x", error);
3718 if (error == CRYPTO_SUCCESS)
3719 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3720 &softc->dca_ctx_list_lock);
3722 return (error);
3725 /* ARGSUSED */
3726 static int
3727 dca_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3728 crypto_data_t *ciphertext, crypto_req_handle_t req)
3730 int error = CRYPTO_FAILED;
3731 dca_t *softc;
3733 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3734 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3736 softc = DCA_SOFTC_FROM_CTX(ctx);
3737 DBG(softc, DENTRY, "dca_encrypt: started");
3739 /* handle inplace ops */
3740 if (!ciphertext) {
3741 dca_request_t *reqp = ctx->cc_provider_private;
3742 reqp->dr_flags |= DR_INPLACE;
3743 ciphertext = plaintext;
3746 /* check mechanism */
3747 switch (DCA_MECH_FROM_CTX(ctx)) {
3748 case DES_CBC_MECH_INFO_TYPE:
3749 error = dca_3des(ctx, plaintext, ciphertext, req, DR_ENCRYPT);
3750 break;
3751 case DES3_CBC_MECH_INFO_TYPE:
3752 error = dca_3des(ctx, plaintext, ciphertext, req,
3753 DR_ENCRYPT | DR_TRIPLE);
3754 break;
3755 case RSA_PKCS_MECH_INFO_TYPE:
3756 case RSA_X_509_MECH_INFO_TYPE:
3757 error = dca_rsastart(ctx, plaintext, ciphertext, req,
3758 DCA_RSA_ENC);
3759 break;
3760 default:
3761 /* Should never reach here */
3762 cmn_err(CE_WARN, "dca_encrypt: unexpected mech type "
3763 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3764 error = CRYPTO_MECHANISM_INVALID;
3767 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3768 (error != CRYPTO_BUFFER_TOO_SMALL)) {
3769 ciphertext->cd_length = 0;
3772 DBG(softc, DENTRY, "dca_encrypt: done, err = 0x%x", error);
3774 return (error);
3777 /* ARGSUSED */
3778 static int
3779 dca_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
3780 crypto_data_t *ciphertext, crypto_req_handle_t req)
3782 int error = CRYPTO_FAILED;
3783 dca_t *softc;
3785 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3786 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3788 softc = DCA_SOFTC_FROM_CTX(ctx);
3789 DBG(softc, DENTRY, "dca_encrypt_update: started");
3791 /* handle inplace ops */
3792 if (!ciphertext) {
3793 dca_request_t *reqp = ctx->cc_provider_private;
3794 reqp->dr_flags |= DR_INPLACE;
3795 ciphertext = plaintext;
3798 /* check mechanism */
3799 switch (DCA_MECH_FROM_CTX(ctx)) {
3800 case DES_CBC_MECH_INFO_TYPE:
3801 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3802 DR_ENCRYPT);
3803 break;
3804 case DES3_CBC_MECH_INFO_TYPE:
3805 error = dca_3desupdate(ctx, plaintext, ciphertext, req,
3806 DR_ENCRYPT | DR_TRIPLE);
3807 break;
3808 default:
3809 /* Should never reach here */
3810 cmn_err(CE_WARN, "dca_encrypt_update: unexpected mech type "
3811 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3812 error = CRYPTO_MECHANISM_INVALID;
3815 DBG(softc, DENTRY, "dca_encrypt_update: done, err = 0x%x", error);
3817 return (error);
3820 /* ARGSUSED */
3821 static int
3822 dca_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3823 crypto_req_handle_t req)
3825 int error = CRYPTO_FAILED;
3826 dca_t *softc;
3828 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3829 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3831 softc = DCA_SOFTC_FROM_CTX(ctx);
3832 DBG(softc, DENTRY, "dca_encrypt_final: started");
3834 /* check mechanism */
3835 switch (DCA_MECH_FROM_CTX(ctx)) {
3836 case DES_CBC_MECH_INFO_TYPE:
3837 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT);
3838 break;
3839 case DES3_CBC_MECH_INFO_TYPE:
3840 error = dca_3desfinal(ctx, ciphertext, DR_ENCRYPT | DR_TRIPLE);
3841 break;
3842 default:
3843 /* Should never reach here */
3844 cmn_err(CE_WARN, "dca_encrypt_final: unexpected mech type "
3845 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3846 error = CRYPTO_MECHANISM_INVALID;
3849 DBG(softc, DENTRY, "dca_encrypt_final: done, err = 0x%x", error);
3851 return (error);
3854 /* ARGSUSED */
3855 static int
3856 dca_encrypt_atomic(crypto_provider_handle_t provider,
3857 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
3858 crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
3859 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
3861 int error = CRYPTO_FAILED;
3862 dca_t *softc = (dca_t *)provider;
3864 DBG(softc, DENTRY, "dca_encrypt_atomic: started");
3866 if (ctx_template != NULL)
3867 return (CRYPTO_ARGUMENTS_BAD);
3869 /* handle inplace ops */
3870 if (!ciphertext) {
3871 ciphertext = plaintext;
3874 /* check mechanism */
3875 switch (mechanism->cm_type) {
3876 case DES_CBC_MECH_INFO_TYPE:
3877 error = dca_3desatomic(provider, session_id, mechanism, key,
3878 plaintext, ciphertext, KM_SLEEP, req,
3879 DR_ENCRYPT | DR_ATOMIC);
3880 break;
3881 case DES3_CBC_MECH_INFO_TYPE:
3882 error = dca_3desatomic(provider, session_id, mechanism, key,
3883 plaintext, ciphertext, KM_SLEEP, req,
3884 DR_ENCRYPT | DR_TRIPLE | DR_ATOMIC);
3885 break;
3886 case RSA_PKCS_MECH_INFO_TYPE:
3887 case RSA_X_509_MECH_INFO_TYPE:
3888 error = dca_rsaatomic(provider, session_id, mechanism, key,
3889 plaintext, ciphertext, KM_SLEEP, req, DCA_RSA_ENC);
3890 break;
3891 default:
3892 cmn_err(CE_WARN, "dca_encrypt_atomic: unexpected mech type "
3893 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3894 error = CRYPTO_MECHANISM_INVALID;
3897 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
3898 ciphertext->cd_length = 0;
3901 DBG(softc, DENTRY, "dca_encrypt_atomic: done, err = 0x%x", error);
3903 return (error);
3906 /* ARGSUSED */
3907 static int
3908 dca_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
3909 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
3910 crypto_req_handle_t req)
3912 int error = CRYPTO_FAILED;
3913 dca_t *softc;
3915 softc = DCA_SOFTC_FROM_CTX(ctx);
3916 DBG(softc, DENTRY, "dca_decrypt_init: started");
3918 /* check mechanism */
3919 switch (mechanism->cm_type) {
3920 case DES_CBC_MECH_INFO_TYPE:
3921 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3922 DR_DECRYPT);
3923 break;
3924 case DES3_CBC_MECH_INFO_TYPE:
3925 error = dca_3desctxinit(ctx, mechanism, key, KM_SLEEP,
3926 DR_DECRYPT | DR_TRIPLE);
3927 break;
3928 case RSA_PKCS_MECH_INFO_TYPE:
3929 case RSA_X_509_MECH_INFO_TYPE:
3930 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
3931 break;
3932 default:
3933 cmn_err(CE_WARN, "dca_decrypt_init: unexpected mech type "
3934 "0x%llx\n", (unsigned long long)mechanism->cm_type);
3935 error = CRYPTO_MECHANISM_INVALID;
3938 DBG(softc, DENTRY, "dca_decrypt_init: done, err = 0x%x", error);
3940 if (error == CRYPTO_SUCCESS)
3941 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
3942 &softc->dca_ctx_list_lock);
3944 return (error);
3947 /* ARGSUSED */
3948 static int
3949 dca_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
3950 crypto_data_t *plaintext, crypto_req_handle_t req)
3952 int error = CRYPTO_FAILED;
3953 dca_t *softc;
3955 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
3956 return (CRYPTO_OPERATION_NOT_INITIALIZED);
3958 softc = DCA_SOFTC_FROM_CTX(ctx);
3959 DBG(softc, DENTRY, "dca_decrypt: started");
3961 /* handle inplace ops */
3962 if (!plaintext) {
3963 dca_request_t *reqp = ctx->cc_provider_private;
3964 reqp->dr_flags |= DR_INPLACE;
3965 plaintext = ciphertext;
3968 /* check mechanism */
3969 switch (DCA_MECH_FROM_CTX(ctx)) {
3970 case DES_CBC_MECH_INFO_TYPE:
3971 error = dca_3des(ctx, ciphertext, plaintext, req, DR_DECRYPT);
3972 break;
3973 case DES3_CBC_MECH_INFO_TYPE:
3974 error = dca_3des(ctx, ciphertext, plaintext, req,
3975 DR_DECRYPT | DR_TRIPLE);
3976 break;
3977 case RSA_PKCS_MECH_INFO_TYPE:
3978 case RSA_X_509_MECH_INFO_TYPE:
3979 error = dca_rsastart(ctx, ciphertext, plaintext, req,
3980 DCA_RSA_DEC);
3981 break;
3982 default:
3983 /* Should never reach here */
3984 cmn_err(CE_WARN, "dca_decrypt: unexpected mech type "
3985 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
3986 error = CRYPTO_MECHANISM_INVALID;
3989 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS) &&
3990 (error != CRYPTO_BUFFER_TOO_SMALL)) {
3991 if (plaintext)
3992 plaintext->cd_length = 0;
3995 DBG(softc, DENTRY, "dca_decrypt: done, err = 0x%x", error);
3997 return (error);
4000 /* ARGSUSED */
4001 static int
4002 dca_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
4003 crypto_data_t *plaintext, crypto_req_handle_t req)
4005 int error = CRYPTO_FAILED;
4006 dca_t *softc;
4008 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4009 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4011 softc = DCA_SOFTC_FROM_CTX(ctx);
4012 DBG(softc, DENTRY, "dca_decrypt_update: started");
4014 /* handle inplace ops */
4015 if (!plaintext) {
4016 dca_request_t *reqp = ctx->cc_provider_private;
4017 reqp->dr_flags |= DR_INPLACE;
4018 plaintext = ciphertext;
4021 /* check mechanism */
4022 switch (DCA_MECH_FROM_CTX(ctx)) {
4023 case DES_CBC_MECH_INFO_TYPE:
4024 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4025 DR_DECRYPT);
4026 break;
4027 case DES3_CBC_MECH_INFO_TYPE:
4028 error = dca_3desupdate(ctx, ciphertext, plaintext, req,
4029 DR_DECRYPT | DR_TRIPLE);
4030 break;
4031 default:
4032 /* Should never reach here */
4033 cmn_err(CE_WARN, "dca_decrypt_update: unexpected mech type "
4034 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4035 error = CRYPTO_MECHANISM_INVALID;
4038 DBG(softc, DENTRY, "dca_decrypt_update: done, err = 0x%x", error);
4040 return (error);
4043 /* ARGSUSED */
4044 static int
4045 dca_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *plaintext,
4046 crypto_req_handle_t req)
4048 int error = CRYPTO_FAILED;
4049 dca_t *softc;
4051 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4052 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4054 softc = DCA_SOFTC_FROM_CTX(ctx);
4055 DBG(softc, DENTRY, "dca_decrypt_final: started");
4057 /* check mechanism */
4058 switch (DCA_MECH_FROM_CTX(ctx)) {
4059 case DES_CBC_MECH_INFO_TYPE:
4060 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT);
4061 break;
4062 case DES3_CBC_MECH_INFO_TYPE:
4063 error = dca_3desfinal(ctx, plaintext, DR_DECRYPT | DR_TRIPLE);
4064 break;
4065 default:
4066 /* Should never reach here */
4067 cmn_err(CE_WARN, "dca_decrypt_final: unexpected mech type "
4068 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4069 error = CRYPTO_MECHANISM_INVALID;
4072 DBG(softc, DENTRY, "dca_decrypt_final: done, err = 0x%x", error);
4074 return (error);
4077 /* ARGSUSED */
4078 static int
4079 dca_decrypt_atomic(crypto_provider_handle_t provider,
4080 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4081 crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
4082 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4084 int error = CRYPTO_FAILED;
4085 dca_t *softc = (dca_t *)provider;
4087 DBG(softc, DENTRY, "dca_decrypt_atomic: started");
4089 if (ctx_template != NULL)
4090 return (CRYPTO_ARGUMENTS_BAD);
4092 /* handle inplace ops */
4093 if (!plaintext) {
4094 plaintext = ciphertext;
4097 /* check mechanism */
4098 switch (mechanism->cm_type) {
4099 case DES_CBC_MECH_INFO_TYPE:
4100 error = dca_3desatomic(provider, session_id, mechanism, key,
4101 ciphertext, plaintext, KM_SLEEP, req,
4102 DR_DECRYPT | DR_ATOMIC);
4103 break;
4104 case DES3_CBC_MECH_INFO_TYPE:
4105 error = dca_3desatomic(provider, session_id, mechanism, key,
4106 ciphertext, plaintext, KM_SLEEP, req,
4107 DR_DECRYPT | DR_TRIPLE | DR_ATOMIC);
4108 break;
4109 case RSA_PKCS_MECH_INFO_TYPE:
4110 case RSA_X_509_MECH_INFO_TYPE:
4111 error = dca_rsaatomic(provider, session_id, mechanism, key,
4112 ciphertext, plaintext, KM_SLEEP, req, DCA_RSA_DEC);
4113 break;
4114 default:
4115 cmn_err(CE_WARN, "dca_decrypt_atomic: unexpected mech type "
4116 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4117 error = CRYPTO_MECHANISM_INVALID;
4120 if ((error != CRYPTO_QUEUED) && (error != CRYPTO_SUCCESS)) {
4121 plaintext->cd_length = 0;
4124 DBG(softc, DENTRY, "dca_decrypt_atomic: done, err = 0x%x", error);
4126 return (error);
4130 * Sign entry points.
4133 /* ARGSUSED */
4134 static int
4135 dca_sign_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4136 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4137 crypto_req_handle_t req)
4139 int error = CRYPTO_FAILED;
4140 dca_t *softc;
4142 softc = DCA_SOFTC_FROM_CTX(ctx);
4143 DBG(softc, DENTRY, "dca_sign_init: started\n");
4145 if (ctx_template != NULL)
4146 return (CRYPTO_ARGUMENTS_BAD);
4148 /* check mechanism */
4149 switch (mechanism->cm_type) {
4150 case RSA_PKCS_MECH_INFO_TYPE:
4151 case RSA_X_509_MECH_INFO_TYPE:
4152 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4153 break;
4154 case DSA_MECH_INFO_TYPE:
4155 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4156 DCA_DSA_SIGN);
4157 break;
4158 default:
4159 cmn_err(CE_WARN, "dca_sign_init: unexpected mech type "
4160 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4161 error = CRYPTO_MECHANISM_INVALID;
4164 DBG(softc, DENTRY, "dca_sign_init: done, err = 0x%x", error);
4166 if (error == CRYPTO_SUCCESS)
4167 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4168 &softc->dca_ctx_list_lock);
4170 return (error);
4173 static int
4174 dca_sign(crypto_ctx_t *ctx, crypto_data_t *data,
4175 crypto_data_t *signature, crypto_req_handle_t req)
4177 int error = CRYPTO_FAILED;
4178 dca_t *softc;
4180 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4181 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4183 softc = DCA_SOFTC_FROM_CTX(ctx);
4184 DBG(softc, DENTRY, "dca_sign: started\n");
4186 /* check mechanism */
4187 switch (DCA_MECH_FROM_CTX(ctx)) {
4188 case RSA_PKCS_MECH_INFO_TYPE:
4189 case RSA_X_509_MECH_INFO_TYPE:
4190 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGN);
4191 break;
4192 case DSA_MECH_INFO_TYPE:
4193 error = dca_dsa_sign(ctx, data, signature, req);
4194 break;
4195 default:
4196 cmn_err(CE_WARN, "dca_sign: unexpected mech type "
4197 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4198 error = CRYPTO_MECHANISM_INVALID;
4201 DBG(softc, DENTRY, "dca_sign: done, err = 0x%x", error);
4203 return (error);
4206 /* ARGSUSED */
4207 static int
4208 dca_sign_update(crypto_ctx_t *ctx, crypto_data_t *data,
4209 crypto_req_handle_t req)
4211 int error = CRYPTO_MECHANISM_INVALID;
4212 dca_t *softc;
4214 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4215 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4217 softc = DCA_SOFTC_FROM_CTX(ctx);
4218 DBG(softc, DENTRY, "dca_sign_update: started\n");
4220 cmn_err(CE_WARN, "dca_sign_update: unexpected mech type "
4221 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4223 DBG(softc, DENTRY, "dca_sign_update: done, err = 0x%x", error);
4225 return (error);
4228 /* ARGSUSED */
4229 static int
4230 dca_sign_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4231 crypto_req_handle_t req)
4233 int error = CRYPTO_MECHANISM_INVALID;
4234 dca_t *softc;
4236 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4237 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4239 softc = DCA_SOFTC_FROM_CTX(ctx);
4240 DBG(softc, DENTRY, "dca_sign_final: started\n");
4242 cmn_err(CE_WARN, "dca_sign_final: unexpected mech type "
4243 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4245 DBG(softc, DENTRY, "dca_sign_final: done, err = 0x%x", error);
4247 return (error);
4250 static int
4251 dca_sign_atomic(crypto_provider_handle_t provider,
4252 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4253 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4254 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4256 int error = CRYPTO_FAILED;
4257 dca_t *softc = (dca_t *)provider;
4259 DBG(softc, DENTRY, "dca_sign_atomic: started\n");
4261 if (ctx_template != NULL)
4262 return (CRYPTO_ARGUMENTS_BAD);
4264 /* check mechanism */
4265 switch (mechanism->cm_type) {
4266 case RSA_PKCS_MECH_INFO_TYPE:
4267 case RSA_X_509_MECH_INFO_TYPE:
4268 error = dca_rsaatomic(provider, session_id, mechanism, key,
4269 data, signature, KM_SLEEP, req, DCA_RSA_SIGN);
4270 break;
4271 case DSA_MECH_INFO_TYPE:
4272 error = dca_dsaatomic(provider, session_id, mechanism, key,
4273 data, signature, KM_SLEEP, req, DCA_DSA_SIGN);
4274 break;
4275 default:
4276 cmn_err(CE_WARN, "dca_sign_atomic: unexpected mech type "
4277 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4278 error = CRYPTO_MECHANISM_INVALID;
4281 DBG(softc, DENTRY, "dca_sign_atomic: done, err = 0x%x", error);
4283 return (error);
4286 /* ARGSUSED */
4287 static int
4288 dca_sign_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4289 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4290 crypto_req_handle_t req)
4292 int error = CRYPTO_FAILED;
4293 dca_t *softc;
4295 softc = DCA_SOFTC_FROM_CTX(ctx);
4296 DBG(softc, DENTRY, "dca_sign_recover_init: started\n");
4298 if (ctx_template != NULL)
4299 return (CRYPTO_ARGUMENTS_BAD);
4301 /* check mechanism */
4302 switch (mechanism->cm_type) {
4303 case RSA_PKCS_MECH_INFO_TYPE:
4304 case RSA_X_509_MECH_INFO_TYPE:
4305 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4306 break;
4307 default:
4308 cmn_err(CE_WARN, "dca_sign_recover_init: unexpected mech type "
4309 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4310 error = CRYPTO_MECHANISM_INVALID;
4313 DBG(softc, DENTRY, "dca_sign_recover_init: done, err = 0x%x", error);
4315 if (error == CRYPTO_SUCCESS)
4316 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4317 &softc->dca_ctx_list_lock);
4319 return (error);
4322 static int
4323 dca_sign_recover(crypto_ctx_t *ctx, crypto_data_t *data,
4324 crypto_data_t *signature, crypto_req_handle_t req)
4326 int error = CRYPTO_FAILED;
4327 dca_t *softc;
4329 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4330 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4332 softc = DCA_SOFTC_FROM_CTX(ctx);
4333 DBG(softc, DENTRY, "dca_sign_recover: started\n");
4335 /* check mechanism */
4336 switch (DCA_MECH_FROM_CTX(ctx)) {
4337 case RSA_PKCS_MECH_INFO_TYPE:
4338 case RSA_X_509_MECH_INFO_TYPE:
4339 error = dca_rsastart(ctx, data, signature, req, DCA_RSA_SIGNR);
4340 break;
4341 default:
4342 cmn_err(CE_WARN, "dca_sign_recover: unexpected mech type "
4343 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4344 error = CRYPTO_MECHANISM_INVALID;
4347 DBG(softc, DENTRY, "dca_sign_recover: done, err = 0x%x", error);
4349 return (error);
4352 static int
4353 dca_sign_recover_atomic(crypto_provider_handle_t provider,
4354 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4355 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4356 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4358 int error = CRYPTO_FAILED;
4359 dca_t *softc = (dca_t *)provider;
4361 DBG(softc, DENTRY, "dca_sign_recover_atomic: started\n");
4363 if (ctx_template != NULL)
4364 return (CRYPTO_ARGUMENTS_BAD);
4366 /* check mechanism */
4367 switch (mechanism->cm_type) {
4368 case RSA_PKCS_MECH_INFO_TYPE:
4369 case RSA_X_509_MECH_INFO_TYPE:
4370 error = dca_rsaatomic(provider, session_id, mechanism, key,
4371 data, signature, KM_SLEEP, req, DCA_RSA_SIGNR);
4372 break;
4373 default:
4374 cmn_err(CE_WARN, "dca_sign_recover_atomic: unexpected mech type"
4375 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4376 error = CRYPTO_MECHANISM_INVALID;
4379 DBG(softc, DENTRY, "dca_sign_recover_atomic: done, err = 0x%x", error);
4381 return (error);
4385 * Verify entry points.
4388 /* ARGSUSED */
4389 static int
4390 dca_verify_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4391 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4392 crypto_req_handle_t req)
4394 int error = CRYPTO_FAILED;
4395 dca_t *softc;
4397 softc = DCA_SOFTC_FROM_CTX(ctx);
4398 DBG(softc, DENTRY, "dca_verify_init: started\n");
4400 if (ctx_template != NULL)
4401 return (CRYPTO_ARGUMENTS_BAD);
4403 /* check mechanism */
4404 switch (mechanism->cm_type) {
4405 case RSA_PKCS_MECH_INFO_TYPE:
4406 case RSA_X_509_MECH_INFO_TYPE:
4407 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4408 break;
4409 case DSA_MECH_INFO_TYPE:
4410 error = dca_dsainit(ctx, mechanism, key, KM_SLEEP,
4411 DCA_DSA_VRFY);
4412 break;
4413 default:
4414 cmn_err(CE_WARN, "dca_verify_init: unexpected mech type "
4415 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4416 error = CRYPTO_MECHANISM_INVALID;
4419 DBG(softc, DENTRY, "dca_verify_init: done, err = 0x%x", error);
4421 if (error == CRYPTO_SUCCESS)
4422 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4423 &softc->dca_ctx_list_lock);
4425 return (error);
4428 static int
4429 dca_verify(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *signature,
4430 crypto_req_handle_t req)
4432 int error = CRYPTO_FAILED;
4433 dca_t *softc;
4435 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4436 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4438 softc = DCA_SOFTC_FROM_CTX(ctx);
4439 DBG(softc, DENTRY, "dca_verify: started\n");
4441 /* check mechanism */
4442 switch (DCA_MECH_FROM_CTX(ctx)) {
4443 case RSA_PKCS_MECH_INFO_TYPE:
4444 case RSA_X_509_MECH_INFO_TYPE:
4445 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFY);
4446 break;
4447 case DSA_MECH_INFO_TYPE:
4448 error = dca_dsa_verify(ctx, data, signature, req);
4449 break;
4450 default:
4451 cmn_err(CE_WARN, "dca_verify: unexpected mech type "
4452 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4453 error = CRYPTO_MECHANISM_INVALID;
4456 DBG(softc, DENTRY, "dca_verify: done, err = 0x%x", error);
4458 return (error);
4461 /* ARGSUSED */
4462 static int
4463 dca_verify_update(crypto_ctx_t *ctx, crypto_data_t *data,
4464 crypto_req_handle_t req)
4466 int error = CRYPTO_MECHANISM_INVALID;
4467 dca_t *softc;
4469 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4470 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4472 softc = DCA_SOFTC_FROM_CTX(ctx);
4473 DBG(softc, DENTRY, "dca_verify_update: started\n");
4475 cmn_err(CE_WARN, "dca_verify_update: unexpected mech type "
4476 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4478 DBG(softc, DENTRY, "dca_verify_update: done, err = 0x%x", error);
4480 return (error);
4483 /* ARGSUSED */
4484 static int
4485 dca_verify_final(crypto_ctx_t *ctx, crypto_data_t *signature,
4486 crypto_req_handle_t req)
4488 int error = CRYPTO_MECHANISM_INVALID;
4489 dca_t *softc;
4491 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4492 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4494 softc = DCA_SOFTC_FROM_CTX(ctx);
4495 DBG(softc, DENTRY, "dca_verify_final: started\n");
4497 cmn_err(CE_WARN, "dca_verify_final: unexpected mech type "
4498 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4500 DBG(softc, DENTRY, "dca_verify_final: done, err = 0x%x", error);
4502 return (error);
4505 static int
4506 dca_verify_atomic(crypto_provider_handle_t provider,
4507 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4508 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4509 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4511 int error = CRYPTO_FAILED;
4512 dca_t *softc = (dca_t *)provider;
4514 DBG(softc, DENTRY, "dca_verify_atomic: started\n");
4516 if (ctx_template != NULL)
4517 return (CRYPTO_ARGUMENTS_BAD);
4519 /* check mechanism */
4520 switch (mechanism->cm_type) {
4521 case RSA_PKCS_MECH_INFO_TYPE:
4522 case RSA_X_509_MECH_INFO_TYPE:
4523 error = dca_rsaatomic(provider, session_id, mechanism, key,
4524 signature, data, KM_SLEEP, req, DCA_RSA_VRFY);
4525 break;
4526 case DSA_MECH_INFO_TYPE:
4527 error = dca_dsaatomic(provider, session_id, mechanism, key,
4528 data, signature, KM_SLEEP, req, DCA_DSA_VRFY);
4529 break;
4530 default:
4531 cmn_err(CE_WARN, "dca_verify_atomic: unexpected mech type "
4532 "0x%llx\n", (unsigned long long)mechanism->cm_type);
4533 error = CRYPTO_MECHANISM_INVALID;
4536 DBG(softc, DENTRY, "dca_verify_atomic: done, err = 0x%x", error);
4538 return (error);
4541 /* ARGSUSED */
4542 static int
4543 dca_verify_recover_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
4544 crypto_key_t *key, crypto_spi_ctx_template_t ctx_template,
4545 crypto_req_handle_t req)
4547 int error = CRYPTO_MECHANISM_INVALID;
4548 dca_t *softc;
4550 softc = DCA_SOFTC_FROM_CTX(ctx);
4551 DBG(softc, DENTRY, "dca_verify_recover_init: started\n");
4553 if (ctx_template != NULL)
4554 return (CRYPTO_ARGUMENTS_BAD);
4556 /* check mechanism */
4557 switch (mechanism->cm_type) {
4558 case RSA_PKCS_MECH_INFO_TYPE:
4559 case RSA_X_509_MECH_INFO_TYPE:
4560 error = dca_rsainit(ctx, mechanism, key, KM_SLEEP);
4561 break;
4562 default:
4563 cmn_err(CE_WARN, "dca_verify_recover_init: unexpected mech type"
4564 " 0x%llx\n", (unsigned long long)mechanism->cm_type);
4567 DBG(softc, DENTRY, "dca_verify_recover_init: done, err = 0x%x", error);
4569 if (error == CRYPTO_SUCCESS)
4570 dca_enlist2(&softc->dca_ctx_list, ctx->cc_provider_private,
4571 &softc->dca_ctx_list_lock);
4573 return (error);
4576 static int
4577 dca_verify_recover(crypto_ctx_t *ctx, crypto_data_t *signature,
4578 crypto_data_t *data, crypto_req_handle_t req)
4580 int error = CRYPTO_MECHANISM_INVALID;
4581 dca_t *softc;
4583 if (!ctx || !ctx->cc_provider || !ctx->cc_provider_private)
4584 return (CRYPTO_OPERATION_NOT_INITIALIZED);
4586 softc = DCA_SOFTC_FROM_CTX(ctx);
4587 DBG(softc, DENTRY, "dca_verify_recover: started\n");
4589 /* check mechanism */
4590 switch (DCA_MECH_FROM_CTX(ctx)) {
4591 case RSA_PKCS_MECH_INFO_TYPE:
4592 case RSA_X_509_MECH_INFO_TYPE:
4593 error = dca_rsastart(ctx, signature, data, req, DCA_RSA_VRFYR);
4594 break;
4595 default:
4596 cmn_err(CE_WARN, "dca_verify_recover: unexpected mech type "
4597 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4600 DBG(softc, DENTRY, "dca_verify_recover: done, err = 0x%x", error);
4602 return (error);
4605 static int
4606 dca_verify_recover_atomic(crypto_provider_handle_t provider,
4607 crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
4608 crypto_key_t *key, crypto_data_t *data, crypto_data_t *signature,
4609 crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req)
4611 int error = CRYPTO_MECHANISM_INVALID;
4612 dca_t *softc = (dca_t *)provider;
4614 DBG(softc, DENTRY, "dca_verify_recover_atomic: started\n");
4616 if (ctx_template != NULL)
4617 return (CRYPTO_ARGUMENTS_BAD);
4619 /* check mechanism */
4620 switch (mechanism->cm_type) {
4621 case RSA_PKCS_MECH_INFO_TYPE:
4622 case RSA_X_509_MECH_INFO_TYPE:
4623 error = dca_rsaatomic(provider, session_id, mechanism, key,
4624 signature, data, KM_SLEEP, req, DCA_RSA_VRFYR);
4625 break;
4626 default:
4627 cmn_err(CE_WARN, "dca_verify_recover_atomic: unexpected mech "
4628 "type 0x%llx\n", (unsigned long long)mechanism->cm_type);
4629 error = CRYPTO_MECHANISM_INVALID;
4632 DBG(softc, DENTRY,
4633 "dca_verify_recover_atomic: done, err = 0x%x", error);
4635 return (error);
4639 * Random number entry points.
4642 /* ARGSUSED */
4643 static int
4644 dca_generate_random(crypto_provider_handle_t provider,
4645 crypto_session_id_t session_id,
4646 uchar_t *buf, size_t len, crypto_req_handle_t req)
4648 int error = CRYPTO_FAILED;
4649 dca_t *softc = (dca_t *)provider;
4651 DBG(softc, DENTRY, "dca_generate_random: started");
4653 error = dca_rng(softc, buf, len, req);
4655 DBG(softc, DENTRY, "dca_generate_random: done, err = 0x%x", error);
4657 return (error);
4661 * Context management entry points.
4665 dca_free_context(crypto_ctx_t *ctx)
4667 int error = CRYPTO_SUCCESS;
4668 dca_t *softc;
4670 softc = DCA_SOFTC_FROM_CTX(ctx);
4671 DBG(softc, DENTRY, "dca_free_context: entered");
4673 if (ctx->cc_provider_private == NULL)
4674 return (error);
4676 dca_rmlist2(ctx->cc_provider_private, &softc->dca_ctx_list_lock);
4678 error = dca_free_context_low(ctx);
4680 DBG(softc, DENTRY, "dca_free_context: done, err = 0x%x", error);
4682 return (error);
4685 static int
4686 dca_free_context_low(crypto_ctx_t *ctx)
4688 int error = CRYPTO_SUCCESS;
4690 /* check mechanism */
4691 switch (DCA_MECH_FROM_CTX(ctx)) {
4692 case DES_CBC_MECH_INFO_TYPE:
4693 case DES3_CBC_MECH_INFO_TYPE:
4694 dca_3desctxfree(ctx);
4695 break;
4696 case RSA_PKCS_MECH_INFO_TYPE:
4697 case RSA_X_509_MECH_INFO_TYPE:
4698 dca_rsactxfree(ctx);
4699 break;
4700 case DSA_MECH_INFO_TYPE:
4701 dca_dsactxfree(ctx);
4702 break;
4703 default:
4704 /* Should never reach here */
4705 cmn_err(CE_WARN, "dca_free_context_low: unexpected mech type "
4706 "0x%llx\n", (unsigned long long)DCA_MECH_FROM_CTX(ctx));
4707 error = CRYPTO_MECHANISM_INVALID;
4710 return (error);
4714 /* Free any unfreed private context. It is called in detach. */
4715 static void
4716 dca_free_context_list(dca_t *dca)
4718 dca_listnode_t *node;
4719 crypto_ctx_t ctx;
4721 (void) memset(&ctx, 0, sizeof (ctx));
4722 ctx.cc_provider = dca;
4724 while ((node = dca_delist2(&dca->dca_ctx_list,
4725 &dca->dca_ctx_list_lock)) != NULL) {
4726 ctx.cc_provider_private = node;
4727 (void) dca_free_context_low(&ctx);
4731 static int
4732 ext_info_sym(crypto_provider_handle_t prov,
4733 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4735 return (ext_info_base(prov, ext_info, cfreq, IDENT_SYM));
4738 static int
4739 ext_info_asym(crypto_provider_handle_t prov,
4740 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq)
4742 int rv;
4744 rv = ext_info_base(prov, ext_info, cfreq, IDENT_ASYM);
4745 /* The asymmetric cipher slot supports random */
4746 ext_info->ei_flags |= CRYPTO_EXTF_RNG;
4748 return (rv);
4751 /* ARGSUSED */
4752 static int
4753 ext_info_base(crypto_provider_handle_t prov,
4754 crypto_provider_ext_info_t *ext_info, crypto_req_handle_t cfreq, char *id)
4756 dca_t *dca = (dca_t *)prov;
4757 int len;
4759 /* Label */
4760 (void) sprintf((char *)ext_info->ei_label, "%s/%d %s",
4761 ddi_driver_name(dca->dca_dip), ddi_get_instance(dca->dca_dip), id);
4762 len = strlen((char *)ext_info->ei_label);
4763 (void) memset(ext_info->ei_label + len, ' ',
4764 CRYPTO_EXT_SIZE_LABEL - len);
4766 /* Manufacturer ID */
4767 (void) sprintf((char *)ext_info->ei_manufacturerID, "%s",
4768 DCA_MANUFACTURER_ID);
4769 len = strlen((char *)ext_info->ei_manufacturerID);
4770 (void) memset(ext_info->ei_manufacturerID + len, ' ',
4771 CRYPTO_EXT_SIZE_MANUF - len);
4773 /* Model */
4774 (void) sprintf((char *)ext_info->ei_model, dca->dca_model);
4776 DBG(dca, DWARN, "kCF MODEL: %s", (char *)ext_info->ei_model);
4778 len = strlen((char *)ext_info->ei_model);
4779 (void) memset(ext_info->ei_model + len, ' ',
4780 CRYPTO_EXT_SIZE_MODEL - len);
4782 /* Serial Number. Blank for Deimos */
4783 (void) memset(ext_info->ei_serial_number, ' ', CRYPTO_EXT_SIZE_SERIAL);
4785 ext_info->ei_flags = CRYPTO_EXTF_WRITE_PROTECTED;
4787 ext_info->ei_max_session_count = CRYPTO_UNAVAILABLE_INFO;
4788 ext_info->ei_max_pin_len = CRYPTO_UNAVAILABLE_INFO;
4789 ext_info->ei_min_pin_len = CRYPTO_UNAVAILABLE_INFO;
4790 ext_info->ei_total_public_memory = CRYPTO_UNAVAILABLE_INFO;
4791 ext_info->ei_free_public_memory = CRYPTO_UNAVAILABLE_INFO;
4792 ext_info->ei_total_private_memory = CRYPTO_UNAVAILABLE_INFO;
4793 ext_info->ei_free_private_memory = CRYPTO_UNAVAILABLE_INFO;
4794 ext_info->ei_hardware_version.cv_major = 0;
4795 ext_info->ei_hardware_version.cv_minor = 0;
4796 ext_info->ei_firmware_version.cv_major = 0;
4797 ext_info->ei_firmware_version.cv_minor = 0;
4799 /* Time. No need to be supplied for token without a clock */
4800 ext_info->ei_time[0] = '\000';
4802 return (CRYPTO_SUCCESS);
4805 static void
4806 dca_fma_init(dca_t *dca)
4808 ddi_iblock_cookie_t fm_ibc;
4809 int fm_capabilities = DDI_FM_EREPORT_CAPABLE |
4810 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
4811 DDI_FM_ERRCB_CAPABLE;
4813 /* Read FMA capabilities from dca.conf file (if present) */
4814 dca->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, dca->dca_dip,
4815 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
4816 fm_capabilities);
4818 DBG(dca, DWARN, "dca->fm_capabilities = 0x%x", dca->fm_capabilities);
4820 /* Only register with IO Fault Services if we have some capability */
4821 if (dca->fm_capabilities) {
4822 dca_regsattr.devacc_attr_access = DDI_FLAGERR_ACC;
4823 dca_dmaattr.dma_attr_flags = DDI_DMA_FLAGERR;
4825 /* Register capabilities with IO Fault Services */
4826 ddi_fm_init(dca->dca_dip, &dca->fm_capabilities, &fm_ibc);
4827 DBG(dca, DWARN, "fm_capable() = 0x%x",
4828 ddi_fm_capable(dca->dca_dip));
4831 * Initialize pci ereport capabilities if ereport capable
4833 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4834 DDI_FM_ERRCB_CAP(dca->fm_capabilities))
4835 pci_ereport_setup(dca->dca_dip);
4838 * Initialize callback mutex and register error callback if
4839 * error callback capable.
4841 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4842 ddi_fm_handler_register(dca->dca_dip, dca_fm_error_cb,
4843 (void *)dca);
4845 } else {
4847 * These fields have to be cleared of FMA if there are no
4848 * FMA capabilities at runtime.
4850 dca_regsattr.devacc_attr_access = DDI_DEFAULT_ACC;
4851 dca_dmaattr.dma_attr_flags = 0;
4856 static void
4857 dca_fma_fini(dca_t *dca)
4859 /* Only unregister FMA capabilities if we registered some */
4860 if (dca->fm_capabilities) {
4863 * Release any resources allocated by pci_ereport_setup()
4865 if (DDI_FM_EREPORT_CAP(dca->fm_capabilities) ||
4866 DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4867 pci_ereport_teardown(dca->dca_dip);
4871 * Free callback mutex and un-register error callback if
4872 * error callback capable.
4874 if (DDI_FM_ERRCB_CAP(dca->fm_capabilities)) {
4875 ddi_fm_handler_unregister(dca->dca_dip);
4878 /* Unregister from IO Fault Services */
4879 ddi_fm_fini(dca->dca_dip);
4880 DBG(dca, DWARN, "fm_capable() = 0x%x",
4881 ddi_fm_capable(dca->dca_dip));
4887 * The IO fault service error handling callback function
4889 /*ARGSUSED*/
4890 static int
4891 dca_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4893 dca_t *dca = (dca_t *)impl_data;
4895 pci_ereport_post(dip, err, NULL);
4896 if (err->fme_status == DDI_FM_FATAL) {
4897 dca_failure(dca, DDI_DATAPATH_FAULT,
4898 DCA_FM_ECLASS_NONE, dca_ena(0), CRYPTO_DEVICE_ERROR,
4899 "fault PCI in FMA callback.");
4901 return (err->fme_status);
4905 static int
4906 dca_check_acc_handle(dca_t *dca, ddi_acc_handle_t handle,
4907 dca_fma_eclass_t eclass_index)
4909 ddi_fm_error_t de;
4910 int version = 0;
4912 ddi_fm_acc_err_get(handle, &de, version);
4913 if (de.fme_status != DDI_FM_OK) {
4914 dca_failure(dca, DDI_DATAPATH_FAULT,
4915 eclass_index, fm_ena_increment(de.fme_ena),
4916 CRYPTO_DEVICE_ERROR, "");
4917 return (DDI_FAILURE);
4920 return (DDI_SUCCESS);
4924 dca_check_dma_handle(dca_t *dca, ddi_dma_handle_t handle,
4925 dca_fma_eclass_t eclass_index)
4927 ddi_fm_error_t de;
4928 int version = 0;
4930 ddi_fm_dma_err_get(handle, &de, version);
4931 if (de.fme_status != DDI_FM_OK) {
4932 dca_failure(dca, DDI_DATAPATH_FAULT,
4933 eclass_index, fm_ena_increment(de.fme_ena),
4934 CRYPTO_DEVICE_ERROR, "");
4935 return (DDI_FAILURE);
4937 return (DDI_SUCCESS);
4940 static uint64_t
4941 dca_ena(uint64_t ena)
4943 if (ena == 0)
4944 ena = fm_ena_generate(0, FM_ENA_FMT1);
4945 else
4946 ena = fm_ena_increment(ena);
4947 return (ena);
4950 static char *
4951 dca_fma_eclass_string(char *model, dca_fma_eclass_t index)
4953 if (strstr(model, "500"))
4954 return (dca_fma_eclass_sca500[index]);
4955 else
4956 return (dca_fma_eclass_sca1000[index]);