1 /* $NetBSD: glxsb.c,v 1.8 2009/03/18 17:06:44 cegger Exp $ */
2 /* $OpenBSD: glxsb.c,v 1.7 2007/02/12 14:31:45 tom Exp $ */
5 * Copyright (c) 2006 Tom Cosgrove <tom@openbsd.org>
6 * Copyright (c) 2003, 2004 Theo de Raadt
7 * Copyright (c) 2003 Jason Wright
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * Driver for the security block on the AMD Geode LX processors
24 * http://www.amd.com/files/connectivitysolutions/geode/geode_lx/33234d_lx_ds.pdf
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: glxsb.c,v 1.8 2009/03/18 17:06:44 cegger Exp $");
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/device.h>
33 #include <sys/malloc.h>
35 #include <sys/types.h>
36 #include <sys/callout.h>
40 #include <machine/cpufunc.h>
42 #include <dev/pci/pcivar.h>
43 #include <dev/pci/pcidevs.h>
45 #include <opencrypto/cryptodev.h>
46 #include <crypto/rijndael/rijndael.h>
48 #define SB_GLD_MSR_CAP 0x58002000 /* RO - Capabilities */
49 #define SB_GLD_MSR_CONFIG 0x58002001 /* RW - Master Config */
50 #define SB_GLD_MSR_SMI 0x58002002 /* RW - SMI */
51 #define SB_GLD_MSR_ERROR 0x58002003 /* RW - Error */
52 #define SB_GLD_MSR_PM 0x58002004 /* RW - Power Mgmt */
53 #define SB_GLD_MSR_DIAG 0x58002005 /* RW - Diagnostic */
54 #define SB_GLD_MSR_CTRL 0x58002006 /* RW - Security Block Cntrl */
56 /* For GLD_MSR_CTRL: */
57 #define SB_GMC_DIV0 0x0000 /* AES update divisor values */
58 #define SB_GMC_DIV1 0x0001
59 #define SB_GMC_DIV2 0x0002
60 #define SB_GMC_DIV3 0x0003
61 #define SB_GMC_DIV_MASK 0x0003
62 #define SB_GMC_SBI 0x0004 /* AES swap bits */
63 #define SB_GMC_SBY 0x0008 /* AES swap bytes */
64 #define SB_GMC_TW 0x0010 /* Time write (EEPROM) */
65 #define SB_GMC_T_SEL0 0x0000 /* RNG post-proc: none */
66 #define SB_GMC_T_SEL1 0x0100 /* RNG post-proc: LFSR */
67 #define SB_GMC_T_SEL2 0x0200 /* RNG post-proc: whitener */
68 #define SB_GMC_T_SEL3 0x0300 /* RNG LFSR+whitener */
69 #define SB_GMC_T_SEL_MASK 0x0300
70 #define SB_GMC_T_NE 0x0400 /* Noise (generator) Enable */
71 #define SB_GMC_T_TM 0x0800 /* RNG test mode */
74 /* Security Block configuration/control registers (offsets from base) */
76 #define SB_CTL_A 0x0000 /* RW - SB Control A */
77 #define SB_CTL_B 0x0004 /* RW - SB Control B */
78 #define SB_AES_INT 0x0008 /* RW - SB AES Interrupt */
79 #define SB_SOURCE_A 0x0010 /* RW - Source A */
80 #define SB_DEST_A 0x0014 /* RW - Destination A */
81 #define SB_LENGTH_A 0x0018 /* RW - Length A */
82 #define SB_SOURCE_B 0x0020 /* RW - Source B */
83 #define SB_DEST_B 0x0024 /* RW - Destination B */
84 #define SB_LENGTH_B 0x0028 /* RW - Length B */
85 #define SB_WKEY 0x0030 /* WO - Writable Key 0-3 */
86 #define SB_WKEY_0 0x0030 /* WO - Writable Key 0 */
87 #define SB_WKEY_1 0x0034 /* WO - Writable Key 1 */
88 #define SB_WKEY_2 0x0038 /* WO - Writable Key 2 */
89 #define SB_WKEY_3 0x003C /* WO - Writable Key 3 */
90 #define SB_CBC_IV 0x0040 /* RW - CBC IV 0-3 */
91 #define SB_CBC_IV_0 0x0040 /* RW - CBC IV 0 */
92 #define SB_CBC_IV_1 0x0044 /* RW - CBC IV 1 */
93 #define SB_CBC_IV_2 0x0048 /* RW - CBC IV 2 */
94 #define SB_CBC_IV_3 0x004C /* RW - CBC IV 3 */
95 #define SB_RANDOM_NUM 0x0050 /* RW - Random Number */
96 #define SB_RANDOM_NUM_STATUS 0x0054 /* RW - Random Number Status */
97 #define SB_EEPROM_COMM 0x0800 /* RW - EEPROM Command */
98 #define SB_EEPROM_ADDR 0x0804 /* RW - EEPROM Address */
99 #define SB_EEPROM_DATA 0x0808 /* RW - EEPROM Data */
100 #define SB_EEPROM_SEC_STATE 0x080C /* RW - EEPROM Security State */
102 /* For SB_CTL_A and _B */
103 #define SB_CTL_ST 0x0001 /* Start operation (enc/dec) */
104 #define SB_CTL_ENC 0x0002 /* Encrypt (0 is decrypt) */
105 #define SB_CTL_DEC 0x0000 /* Decrypt */
106 #define SB_CTL_WK 0x0004 /* Use writable key (we set) */
107 #define SB_CTL_DC 0x0008 /* Destination coherent */
108 #define SB_CTL_SC 0x0010 /* Source coherent */
109 #define SB_CTL_CBC 0x0020 /* CBC (0 is ECB) */
112 #define SB_AI_DISABLE_AES_A 0x0001 /* Disable AES A compl int */
113 #define SB_AI_ENABLE_AES_A 0x0000 /* Enable AES A compl int */
114 #define SB_AI_DISABLE_AES_B 0x0002 /* Disable AES B compl int */
115 #define SB_AI_ENABLE_AES_B 0x0000 /* Enable AES B compl int */
116 #define SB_AI_DISABLE_EEPROM 0x0004 /* Disable EEPROM op comp int */
117 #define SB_AI_ENABLE_EEPROM 0x0000 /* Enable EEPROM op compl int */
118 #define SB_AI_AES_A_COMPLETE 0x0100 /* AES A operation complete */
119 #define SB_AI_AES_B_COMPLETE 0x0200 /* AES B operation complete */
120 #define SB_AI_EEPROM_COMPLETE 0x0400 /* EEPROM operation complete */
122 #define SB_RNS_TRNG_VALID 0x0001 /* in SB_RANDOM_NUM_STATUS */
124 #define SB_MEM_SIZE 0x0810 /* Size of memory block */
126 #define SB_AES_ALIGN 0x0010 /* Source and dest buffers */
127 /* must be 16-byte aligned */
128 #define SB_AES_BLOCK_SIZE 0x0010
131 * The Geode LX security block AES acceleration doesn't perform scatter-
132 * gather: it just takes source and destination addresses. Therefore the
133 * plain- and ciphertexts need to be contiguous. To this end, we allocate
134 * a buffer for both, and accept the overhead of copying in and out. If
135 * the number of bytes in one operation is bigger than allowed for by the
136 * buffer (buffer is twice the size of the max length, as it has both input
137 * and output) then we have to perform multiple encryptions/decryptions.
139 #define GLXSB_MAX_AES_LEN 16384
141 struct glxsb_dma_map
{
142 bus_dmamap_t dma_map
;
143 bus_dma_segment_t dma_seg
;
149 struct glxsb_session
{
151 uint8_t ses_iv
[SB_AES_BLOCK_SIZE
];
158 bus_space_tag_t sc_iot
;
159 bus_space_handle_t sc_ioh
;
160 struct callout sc_co
;
162 bus_dma_tag_t sc_dmat
;
163 struct glxsb_dma_map sc_dma
;
166 struct glxsb_session
*sc_sessions
;
168 rndsource_element_t sc_rnd_source
;
171 int glxsb_match(device_t
, cfdata_t
, void *);
172 void glxsb_attach(device_t
, device_t
, void *);
173 void glxsb_rnd(void *);
175 CFATTACH_DECL_NEW(glxsb
, sizeof(struct glxsb_softc
),
176 glxsb_match
, glxsb_attach
, NULL
, NULL
);
178 #define GLXSB_SESSION(sid) ((sid) & 0x0fffffff)
179 #define GLXSB_SID(crd,ses) (((crd) << 28) | ((ses) & 0x0fffffff))
181 int glxsb_crypto_setup(struct glxsb_softc
*);
182 int glxsb_crypto_newsession(void *, uint32_t *, struct cryptoini
*);
183 int glxsb_crypto_process(void *, struct cryptop
*, int);
184 int glxsb_crypto_freesession(void *, uint64_t);
185 static __inline
void glxsb_aes(struct glxsb_softc
*, uint32_t, uint32_t,
186 uint32_t, void *, int, void *);
188 int glxsb_dma_alloc(struct glxsb_softc
*, int, struct glxsb_dma_map
*);
189 void glxsb_dma_pre_op(struct glxsb_softc
*, struct glxsb_dma_map
*);
190 void glxsb_dma_post_op(struct glxsb_softc
*, struct glxsb_dma_map
*);
191 void glxsb_dma_free(struct glxsb_softc
*, struct glxsb_dma_map
*);
194 glxsb_match(device_t parent
, cfdata_t match
, void *aux
)
196 struct pci_attach_args
*pa
= aux
;
198 if (PCI_VENDOR(pa
->pa_id
) == PCI_VENDOR_AMD
&&
199 PCI_PRODUCT(pa
->pa_id
) == PCI_PRODUCT_AMD_GEODELX_AES
)
206 glxsb_attach(device_t parent
, device_t self
, void *aux
)
208 struct glxsb_softc
*sc
= device_private(self
);
209 struct pci_attach_args
*pa
= aux
;
215 msr
= rdmsr(SB_GLD_MSR_CAP
);
216 if ((msr
& 0xFFFF00) != 0x130400) {
217 printf(": unknown ID 0x%x\n", (int) ((msr
& 0xFFFF00) >> 16));
221 /* printf(": revision %d", (int) (msr & 0xFF)); */
223 /* Map in the security block configuration/control registers */
224 if (pci_mapreg_map(pa
, PCI_MAPREG_START
,
225 PCI_MAPREG_TYPE_MEM
| PCI_MAPREG_MEM_TYPE_32BIT
, 0,
226 &sc
->sc_iot
, &sc
->sc_ioh
, &membase
, &memsize
)) {
227 printf(": can't find mem space\n");
234 * Configure the Security Block.
236 * We want to enable the noise generator (T_NE), and enable the
237 * linear feedback shift register and whitener post-processing
238 * (T_SEL = 3). Also ensure that test mode (deterministic values)
241 msr
= rdmsr(SB_GLD_MSR_CTRL
);
242 msr
&= ~(SB_GMC_T_TM
| SB_GMC_T_SEL_MASK
);
243 msr
|= SB_GMC_T_NE
| SB_GMC_T_SEL3
;
245 msr
|= SB_GMC_SBI
| SB_GMC_SBY
; /* for AES, if necessary */
247 wrmsr(SB_GLD_MSR_CTRL
, msr
);
249 rnd_attach_source(&sc
->sc_rnd_source
, device_xname(self
),
250 RND_TYPE_RNG
, RND_FLAG_NO_ESTIMATE
);
252 /* Install a periodic collector for the "true" (AMD's word) RNG */
253 callout_init(&sc
->sc_co
, 0);
254 callout_setfunc(&sc
->sc_co
, glxsb_rnd
, sc
);
258 /* We don't have an interrupt handler, so disable completion INTs */
259 intr
= SB_AI_DISABLE_AES_A
| SB_AI_DISABLE_AES_B
|
260 SB_AI_DISABLE_EEPROM
| SB_AI_AES_A_COMPLETE
|
261 SB_AI_AES_B_COMPLETE
| SB_AI_EEPROM_COMPLETE
;
262 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, SB_AES_INT
, intr
);
264 sc
->sc_dmat
= pa
->pa_dmat
;
266 if (glxsb_crypto_setup(sc
))
275 struct glxsb_softc
*sc
= v
;
276 uint32_t status
, value
;
279 status
= bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, SB_RANDOM_NUM_STATUS
);
280 if (status
& SB_RNS_TRNG_VALID
) {
281 value
= bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, SB_RANDOM_NUM
);
282 rnd_add_uint32(&sc
->sc_rnd_source
, value
);
285 callout_schedule(&sc
->sc_co
, (hz
> 100) ? (hz
/ 100) : 1);
289 glxsb_crypto_setup(struct glxsb_softc
*sc
)
292 /* Allocate a contiguous DMA-able buffer to work in */
293 if (glxsb_dma_alloc(sc
, GLXSB_MAX_AES_LEN
* 2, &sc
->sc_dma
) != 0)
296 sc
->sc_cid
= crypto_get_driverid(0);
300 crypto_register(sc
->sc_cid
, CRYPTO_AES_CBC
, 0, 0,
301 glxsb_crypto_newsession
, glxsb_crypto_freesession
,
302 glxsb_crypto_process
, sc
);
304 sc
->sc_nsessions
= 0;
310 glxsb_crypto_newsession(void *aux
, uint32_t *sidp
, struct cryptoini
*cri
)
312 struct glxsb_softc
*sc
= aux
;
313 struct glxsb_session
*ses
= NULL
;
316 if (sc
== NULL
|| sidp
== NULL
|| cri
== NULL
||
317 cri
->cri_next
!= NULL
|| cri
->cri_alg
!= CRYPTO_AES_CBC
||
318 cri
->cri_klen
!= 128)
321 for (sesn
= 0; sesn
< sc
->sc_nsessions
; sesn
++) {
322 if (sc
->sc_sessions
[sesn
].ses_used
== 0) {
323 ses
= &sc
->sc_sessions
[sesn
];
329 sesn
= sc
->sc_nsessions
;
330 ses
= malloc((sesn
+ 1) * sizeof(*ses
), M_DEVBUF
, M_NOWAIT
);
334 memcpy(ses
, sc
->sc_sessions
, sesn
* sizeof(*ses
));
335 memset(sc
->sc_sessions
, 0, sesn
* sizeof(*ses
));
336 free(sc
->sc_sessions
, M_DEVBUF
);
338 sc
->sc_sessions
= ses
;
339 ses
= &sc
->sc_sessions
[sesn
];
343 memset(ses
, 0, sizeof(*ses
));
346 arc4randbytes(ses
->ses_iv
, sizeof(ses
->ses_iv
));
347 ses
->ses_klen
= cri
->cri_klen
;
349 /* Copy the key (Geode LX wants the primary key only) */
350 memcpy(ses
->ses_key
, cri
->cri_key
, sizeof(ses
->ses_key
));
352 *sidp
= GLXSB_SID(0, sesn
);
357 glxsb_crypto_freesession(void *aux
, uint64_t tid
)
359 struct glxsb_softc
*sc
= aux
;
361 uint32_t sid
= ((uint32_t)tid
) & 0xffffffff;
365 sesn
= GLXSB_SESSION(sid
);
366 if (sesn
>= sc
->sc_nsessions
)
368 memset(&sc
->sc_sessions
[sesn
], 0, sizeof(sc
->sc_sessions
[sesn
]));
373 * Must be called at splnet() or higher
376 glxsb_aes(struct glxsb_softc
*sc
, uint32_t control
, uint32_t psrc
,
377 uint32_t pdst
, void *key
, int len
, void *iv
)
383 printf("%s: len must be a multiple of 16 (not %d)\n",
384 device_xname(sc
->sc_dev
), len
);
389 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, SB_SOURCE_A
, psrc
);
391 /* Set the destination address */
392 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, SB_DEST_A
, pdst
);
394 /* Set the data length */
395 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, SB_LENGTH_A
, len
);
399 bus_space_write_region_4(sc
->sc_iot
, sc
->sc_ioh
,
401 control
|= SB_CTL_CBC
;
405 bus_space_write_region_4(sc
->sc_iot
, sc
->sc_ioh
, SB_WKEY
, key
, 4);
407 /* Ask the security block to do it */
408 bus_space_write_4(sc
->sc_iot
, sc
->sc_ioh
, SB_CTL_A
,
409 control
| SB_CTL_WK
| SB_CTL_DC
| SB_CTL_SC
| SB_CTL_ST
);
412 * Now wait until it is done.
414 * We do a busy wait. Obviously the number of iterations of
415 * the loop required to perform the AES operation depends upon
416 * the number of bytes to process.
418 * On a 500 MHz Geode LX we see
420 * length (bytes) typical max iterations
427 * Since we have a maximum size of operation defined in
428 * GLXSB_MAX_AES_LEN, we use this constant to decide how long
429 * to wait. Allow an order of magnitude longer than it should
430 * really take, just in case.
432 for (i
= 0; i
< GLXSB_MAX_AES_LEN
* 10; i
++) {
433 status
= bus_space_read_4(sc
->sc_iot
, sc
->sc_ioh
, SB_CTL_A
);
435 if ((status
& SB_CTL_ST
) == 0) /* Done */
439 aprint_error_dev(sc
->sc_dev
, "operation failed to complete\n");
443 glxsb_crypto_process(void *aux
, struct cryptop
*crp
, int hint
)
445 struct glxsb_softc
*sc
= aux
;
446 struct glxsb_session
*ses
;
447 struct cryptodesc
*crd
;
448 char *op_src
, *op_dst
;
449 uint32_t op_psrc
, op_pdst
;
450 uint8_t op_iv
[SB_AES_BLOCK_SIZE
], *piv
;
459 if (crp
== NULL
|| crp
->crp_callback
== NULL
) {
464 if (crd
== NULL
|| crd
->crd_next
!= NULL
||
465 crd
->crd_alg
!= CRYPTO_AES_CBC
||
466 (crd
->crd_len
% SB_AES_BLOCK_SIZE
) != 0) {
471 sesn
= GLXSB_SESSION(crp
->crp_sid
);
472 if (sesn
>= sc
->sc_nsessions
) {
476 ses
= &sc
->sc_sessions
[sesn
];
478 /* How much of our buffer will we need to use? */
479 xlen
= crd
->crd_len
> GLXSB_MAX_AES_LEN
?
480 GLXSB_MAX_AES_LEN
: crd
->crd_len
;
483 * XXX Check if we can have input == output on Geode LX.
484 * XXX In the meantime, use two separate (adjacent) buffers.
486 op_src
= sc
->sc_dma
.dma_vaddr
;
487 op_dst
= (char *)sc
->sc_dma
.dma_vaddr
+ xlen
;
489 op_psrc
= sc
->sc_dma
.dma_paddr
;
490 op_pdst
= sc
->sc_dma
.dma_paddr
+ xlen
;
492 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
493 control
= SB_CTL_ENC
;
494 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
495 memcpy(op_iv
, crd
->crd_iv
, sizeof(op_iv
));
497 memcpy(op_iv
, ses
->ses_iv
, sizeof(op_iv
));
499 if ((crd
->crd_flags
& CRD_F_IV_PRESENT
) == 0) {
500 if (crp
->crp_flags
& CRYPTO_F_IMBUF
)
501 m_copyback((struct mbuf
*)crp
->crp_buf
,
502 crd
->crd_inject
, sizeof(op_iv
), op_iv
);
503 else if (crp
->crp_flags
& CRYPTO_F_IOV
)
504 cuio_copyback((struct uio
*)crp
->crp_buf
,
505 crd
->crd_inject
, sizeof(op_iv
), op_iv
);
508 (char *)crp
->crp_buf
+ crd
->crd_inject
,
512 control
= SB_CTL_DEC
;
513 if (crd
->crd_flags
& CRD_F_IV_EXPLICIT
)
514 memcpy(op_iv
, crd
->crd_iv
, sizeof(op_iv
));
516 if (crp
->crp_flags
& CRYPTO_F_IMBUF
)
517 m_copydata((struct mbuf
*)crp
->crp_buf
,
518 crd
->crd_inject
, sizeof(op_iv
), op_iv
);
519 else if (crp
->crp_flags
& CRYPTO_F_IOV
)
520 cuio_copydata((struct uio
*)crp
->crp_buf
,
521 crd
->crd_inject
, sizeof(op_iv
), op_iv
);
523 bcopy((char *)crp
->crp_buf
+ crd
->crd_inject
,
524 op_iv
, sizeof(op_iv
));
532 /* Process the data in GLXSB_MAX_AES_LEN chunks */
534 len
= (tlen
> GLXSB_MAX_AES_LEN
) ? GLXSB_MAX_AES_LEN
: tlen
;
536 if (crp
->crp_flags
& CRYPTO_F_IMBUF
)
537 m_copydata((struct mbuf
*)crp
->crp_buf
,
538 crd
->crd_skip
+ offset
, len
, op_src
);
539 else if (crp
->crp_flags
& CRYPTO_F_IOV
)
540 cuio_copydata((struct uio
*)crp
->crp_buf
,
541 crd
->crd_skip
+ offset
, len
, op_src
);
543 bcopy((char *)crp
->crp_buf
+ crd
->crd_skip
+ offset
,
546 glxsb_dma_pre_op(sc
, &sc
->sc_dma
);
548 glxsb_aes(sc
, control
, op_psrc
, op_pdst
, ses
->ses_key
,
551 glxsb_dma_post_op(sc
, &sc
->sc_dma
);
553 if (crp
->crp_flags
& CRYPTO_F_IMBUF
)
554 m_copyback((struct mbuf
*)crp
->crp_buf
,
555 crd
->crd_skip
+ offset
, len
, op_dst
);
556 else if (crp
->crp_flags
& CRYPTO_F_IOV
)
557 cuio_copyback((struct uio
*)crp
->crp_buf
,
558 crd
->crd_skip
+ offset
, len
, op_dst
);
560 memcpy((char *)crp
->crp_buf
+ crd
->crd_skip
+ offset
, op_dst
,
566 if (tlen
<= 0) { /* Ideally, just == 0 */
567 /* Finished - put the IV in session IV */
572 * Copy out last block for use as next iteration/session IV.
574 * piv is set to op_iv[] before the loop starts, but is
575 * set to ses->ses_iv if we're going to exit the loop this
578 if (crd
->crd_flags
& CRD_F_ENCRYPT
) {
579 memcpy(piv
, op_dst
+ len
- sizeof(op_iv
), sizeof(op_iv
));
581 /* Decryption, only need this if another iteration */
583 memcpy(piv
, op_src
+ len
- sizeof(op_iv
),
589 /* All AES processing has now been done. */
591 memset(sc
->sc_dma
.dma_vaddr
, 0, xlen
* 2);
593 crp
->crp_etype
= err
;
600 glxsb_dma_alloc(struct glxsb_softc
*sc
, int size
, struct glxsb_dma_map
*dma
)
605 dma
->dma_size
= size
;
607 rc
= bus_dmamap_create(sc
->sc_dmat
, size
, dma
->dma_nsegs
, size
,
608 0, BUS_DMA_NOWAIT
, &dma
->dma_map
);
610 aprint_error_dev(sc
->sc_dev
, "couldn't create DMA map for %d bytes (%d)\n",
616 rc
= bus_dmamem_alloc(sc
->sc_dmat
, size
, SB_AES_ALIGN
, 0,
617 &dma
->dma_seg
, dma
->dma_nsegs
, &dma
->dma_nsegs
, BUS_DMA_NOWAIT
);
619 aprint_error_dev(sc
->sc_dev
, "couldn't allocate DMA memory of %d bytes (%d)\n",
625 rc
= bus_dmamem_map(sc
->sc_dmat
, &dma
->dma_seg
, 1, size
,
626 &dma
->dma_vaddr
, BUS_DMA_NOWAIT
);
628 aprint_error_dev(sc
->sc_dev
, "couldn't map DMA memory for %d bytes (%d)\n",
634 rc
= bus_dmamap_load(sc
->sc_dmat
, dma
->dma_map
, dma
->dma_vaddr
,
635 size
, NULL
, BUS_DMA_NOWAIT
);
637 aprint_error_dev(sc
->sc_dev
, "couldn't load DMA memory for %d bytes (%d)\n",
643 dma
->dma_paddr
= dma
->dma_map
->dm_segs
[0].ds_addr
;
648 bus_dmamem_unmap(sc
->sc_dmat
, dma
->dma_vaddr
, size
);
650 bus_dmamem_free(sc
->sc_dmat
, &dma
->dma_seg
, dma
->dma_nsegs
);
652 bus_dmamap_destroy(sc
->sc_dmat
, dma
->dma_map
);
658 glxsb_dma_pre_op(struct glxsb_softc
*sc
, struct glxsb_dma_map
*dma
)
660 bus_dmamap_sync(sc
->sc_dmat
, dma
->dma_map
, 0, dma
->dma_size
,
661 BUS_DMASYNC_PREREAD
| BUS_DMASYNC_PREWRITE
);
665 glxsb_dma_post_op(struct glxsb_softc
*sc
, struct glxsb_dma_map
*dma
)
667 bus_dmamap_sync(sc
->sc_dmat
, dma
->dma_map
, 0, dma
->dma_size
,
668 BUS_DMASYNC_POSTREAD
| BUS_DMASYNC_POSTWRITE
);
672 glxsb_dma_free(struct glxsb_softc
*sc
, struct glxsb_dma_map
*dma
)
674 bus_dmamap_unload(sc
->sc_dmat
, dma
->dma_map
);
675 bus_dmamem_unmap(sc
->sc_dmat
, dma
->dma_vaddr
, dma
->dma_size
);
676 bus_dmamem_free(sc
->sc_dmat
, &dma
->dma_seg
, dma
->dma_nsegs
);
677 bus_dmamap_destroy(sc
->sc_dmat
, dma
->dma_map
);