1 /* $NetBSD: cgd.c,v 1.63 2009/11/10 20:24:30 christos Exp $ */
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.63 2009/11/10 20:24:30 christos Exp $");
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/errno.h>
42 #include <sys/malloc.h>
44 #include <sys/ioctl.h>
45 #include <sys/device.h>
47 #include <sys/disklabel.h>
48 #include <sys/fcntl.h>
49 #include <sys/vnode.h>
51 #include <sys/syslog.h>
53 #include <dev/dkvar.h>
54 #include <dev/cgdvar.h>
56 /* Entry Point Functions */
60 static dev_type_open(cgdopen
);
61 static dev_type_close(cgdclose
);
62 static dev_type_read(cgdread
);
63 static dev_type_write(cgdwrite
);
64 static dev_type_ioctl(cgdioctl
);
65 static dev_type_strategy(cgdstrategy
);
66 static dev_type_dump(cgddump
);
67 static dev_type_size(cgdsize
);
69 const struct bdevsw cgd_bdevsw
= {
70 cgdopen
, cgdclose
, cgdstrategy
, cgdioctl
,
71 cgddump
, cgdsize
, D_DISK
74 const struct cdevsw cgd_cdevsw
= {
75 cgdopen
, cgdclose
, cgdread
, cgdwrite
, cgdioctl
,
76 nostop
, notty
, nopoll
, nommap
, nokqfilter
, D_DISK
79 /* Internal Functions */
81 static int cgdstart(struct dk_softc
*, struct buf
*);
82 static void cgdiodone(struct buf
*);
84 static int cgd_ioctl_set(struct cgd_softc
*, void *, struct lwp
*);
85 static int cgd_ioctl_clr(struct cgd_softc
*, void *, struct lwp
*);
86 static int cgdinit(struct cgd_softc
*, const char *, struct vnode
*,
88 static void cgd_cipher(struct cgd_softc
*, void *, void *,
89 size_t, daddr_t
, size_t, int);
91 /* Pseudo-disk Interface */
93 static struct dk_intf the_dkintf
= {
101 static struct dk_intf
*di
= &the_dkintf
;
103 static struct dkdriver cgddkdriver
= {
104 .d_strategy
= cgdstrategy
,
105 .d_minphys
= minphys
,
108 /* DIAGNOSTIC and DEBUG definitions */
110 #if defined(CGDDEBUG) && !defined(DEBUG)
117 #define CGDB_FOLLOW 0x1
119 #define CGDB_CRYPTO 0x4
121 #define IFDEBUG(x,y) if (cgddebug & (x)) y
122 #define DPRINTF(x,y) IFDEBUG(x, printf y)
123 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
125 static void hexprint(const char *, void *, int);
130 #define DPRINTF_FOLLOW(y)
134 #define DIAGPANIC(x) panic x
135 #define DIAGCONDPANIC(x,y) if (x) panic y
138 #define DIAGCONDPANIC(x,y)
141 /* Global variables */
143 struct cgd_softc
*cgd_softc
;
146 /* Utility Functions */
148 #define CGDUNIT(x) DISKUNIT(x)
149 #define GETCGD_SOFTC(_cs, x) if (!((_cs) = getcgd_softc(x))) return ENXIO
151 static struct cgd_softc
*
152 getcgd_softc(dev_t dev
)
154 int unit
= CGDUNIT(dev
);
156 DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64
"): unit = %d\n", dev
, unit
));
159 return &cgd_softc
[unit
];
165 cgdsoftc_init(struct cgd_softc
*cs
, int num
)
167 char sbuf
[DK_XNAME_SIZE
];
169 memset(cs
, 0x0, sizeof(*cs
));
170 snprintf(sbuf
, DK_XNAME_SIZE
, "cgd%d", num
);
171 simple_lock_init(&cs
->sc_slock
);
172 dk_sc_init(&cs
->sc_dksc
, cs
, sbuf
);
173 disk_init(&cs
->sc_dksc
.sc_dkdev
, cs
->sc_dksc
.sc_xname
, &cgddkdriver
);
181 DPRINTF_FOLLOW(("cgdattach(%d)\n", num
));
183 DIAGPANIC(("cgdattach: count <= 0"));
187 cgd_softc
= malloc(num
* sizeof(*cgd_softc
), M_DEVBUF
, M_NOWAIT
);
189 DPRINTF_FOLLOW(("WARNING: unable to malloc(9) memory for %d "
190 "crypt disks\n", num
));
191 DIAGPANIC(("cgdattach: cannot malloc(9) enough memory"));
196 for (i
= 0; i
< num
; i
++)
197 cgdsoftc_init(&cgd_softc
[i
], i
);
201 cgdopen(dev_t dev
, int flags
, int fmt
, struct lwp
*l
)
203 struct cgd_softc
*cs
;
205 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64
", %d)\n", dev
, flags
));
206 GETCGD_SOFTC(cs
, dev
);
207 return dk_open(di
, &cs
->sc_dksc
, dev
, flags
, fmt
, l
);
211 cgdclose(dev_t dev
, int flags
, int fmt
, struct lwp
*l
)
213 struct cgd_softc
*cs
;
215 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64
", %d)\n", dev
, flags
));
216 GETCGD_SOFTC(cs
, dev
);
217 return dk_close(di
, &cs
->sc_dksc
, dev
, flags
, fmt
, l
);
221 cgdstrategy(struct buf
*bp
)
223 struct cgd_softc
*cs
= getcgd_softc(bp
->b_dev
);
225 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp
,
226 (long)bp
->b_bcount
));
227 /* XXXrcd: Should we test for (cs != NULL)? */
228 dk_strategy(di
, &cs
->sc_dksc
, bp
);
235 struct cgd_softc
*cs
= getcgd_softc(dev
);
237 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64
")\n", dev
));
240 return dk_size(di
, &cs
->sc_dksc
, dev
);
244 * cgd_{get,put}data are functions that deal with getting a buffer
245 * for the new encrypted data. We have a buffer per device so that
246 * we can ensure that we can always have a transaction in flight.
247 * We use this buffer first so that we have one less piece of
248 * malloc'ed data at any given point.
252 cgd_getdata(struct dk_softc
*dksc
, unsigned long size
)
254 struct cgd_softc
*cs
=dksc
->sc_osc
;
257 simple_lock(&cs
->sc_slock
);
258 if (cs
->sc_data_used
== 0) {
259 cs
->sc_data_used
= 1;
262 simple_unlock(&cs
->sc_slock
);
267 return malloc(size
, M_DEVBUF
, M_NOWAIT
);
271 cgd_putdata(struct dk_softc
*dksc
, void *data
)
273 struct cgd_softc
*cs
=dksc
->sc_osc
;
275 if (data
== cs
->sc_data
) {
276 simple_lock(&cs
->sc_slock
);
277 cs
->sc_data_used
= 0;
278 simple_unlock(&cs
->sc_slock
);
280 free(data
, M_DEVBUF
);
285 cgdstart(struct dk_softc
*dksc
, struct buf
*bp
)
287 struct cgd_softc
*cs
= dksc
->sc_osc
;
294 DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc
, bp
));
295 disk_busy(&dksc
->sc_dkdev
); /* XXX: put in dksubr.c */
300 * We attempt to allocate all of our resources up front, so that
301 * we can fail quickly if they are unavailable.
304 nbp
= getiobuf(cs
->sc_tvn
, false);
306 disk_unbusy(&dksc
->sc_dkdev
, 0, (bp
->b_flags
& B_READ
));
311 * If we are writing, then we need to encrypt the outgoing
312 * block into a new block of memory. If we fail, then we
313 * return an error and let the dksubr framework deal with it.
315 newaddr
= addr
= bp
->b_data
;
316 if ((bp
->b_flags
& B_READ
) == 0) {
317 newaddr
= cgd_getdata(dksc
, bp
->b_bcount
);
320 disk_unbusy(&dksc
->sc_dkdev
, 0, (bp
->b_flags
& B_READ
));
323 cgd_cipher(cs
, newaddr
, addr
, bp
->b_bcount
, bn
,
324 DEV_BSIZE
, CGD_CIPHER_ENCRYPT
);
327 nbp
->b_data
= newaddr
;
328 nbp
->b_flags
= bp
->b_flags
;
329 nbp
->b_oflags
= bp
->b_oflags
;
330 nbp
->b_cflags
= bp
->b_cflags
;
331 nbp
->b_iodone
= cgdiodone
;
332 nbp
->b_proc
= bp
->b_proc
;
334 nbp
->b_bcount
= bp
->b_bcount
;
337 BIO_COPYPRIO(nbp
, bp
);
339 if ((nbp
->b_flags
& B_READ
) == 0) {
341 mutex_enter(&vp
->v_interlock
);
343 mutex_exit(&vp
->v_interlock
);
345 VOP_STRATEGY(cs
->sc_tvn
, nbp
);
349 /* expected to be called at splbio() */
351 cgdiodone(struct buf
*nbp
)
353 struct buf
*obp
= nbp
->b_private
;
354 struct cgd_softc
*cs
= getcgd_softc(obp
->b_dev
);
355 struct dk_softc
*dksc
= &cs
->sc_dksc
;
359 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp
));
360 DPRINTF(CGDB_IO
, ("cgdiodone: bp %p bcount %d resid %d\n",
361 obp
, obp
->b_bcount
, obp
->b_resid
));
362 DPRINTF(CGDB_IO
, (" dev 0x%"PRIx64
", nbp %p bn %" PRId64
" addr %p bcnt %d\n",
363 nbp
->b_dev
, nbp
, nbp
->b_blkno
, nbp
->b_data
,
365 if (nbp
->b_error
!= 0) {
366 obp
->b_error
= nbp
->b_error
;
367 DPRINTF(CGDB_IO
, ("%s: error %d\n", dksc
->sc_xname
,
371 /* Perform the decryption if we are reading.
373 * Note: use the blocknumber from nbp, since it is what
374 * we used to encrypt the blocks.
377 if (nbp
->b_flags
& B_READ
)
378 cgd_cipher(cs
, obp
->b_data
, obp
->b_data
, obp
->b_bcount
,
379 nbp
->b_blkno
, DEV_BSIZE
, CGD_CIPHER_DECRYPT
);
381 /* If we allocated memory, free it now... */
382 if (nbp
->b_data
!= obp
->b_data
)
383 cgd_putdata(dksc
, nbp
->b_data
);
387 /* Request is complete for whatever reason */
389 if (obp
->b_error
!= 0)
390 obp
->b_resid
= obp
->b_bcount
;
391 disk_unbusy(&dksc
->sc_dkdev
, obp
->b_bcount
- obp
->b_resid
,
392 (obp
->b_flags
& B_READ
));
397 /* XXX: we should probably put these into dksubr.c, mostly */
399 cgdread(dev_t dev
, struct uio
*uio
, int flags
)
401 struct cgd_softc
*cs
;
402 struct dk_softc
*dksc
;
404 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
405 (unsigned long long)dev
, uio
, flags
));
406 GETCGD_SOFTC(cs
, dev
);
408 if ((dksc
->sc_flags
& DKF_INITED
) == 0)
410 return physio(cgdstrategy
, NULL
, dev
, B_READ
, minphys
, uio
);
413 /* XXX: we should probably put these into dksubr.c, mostly */
415 cgdwrite(dev_t dev
, struct uio
*uio
, int flags
)
417 struct cgd_softc
*cs
;
418 struct dk_softc
*dksc
;
420 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64
", %p, %d)\n", dev
, uio
, flags
));
421 GETCGD_SOFTC(cs
, dev
);
423 if ((dksc
->sc_flags
& DKF_INITED
) == 0)
425 return physio(cgdstrategy
, NULL
, dev
, B_WRITE
, minphys
, uio
);
429 cgdioctl(dev_t dev
, u_long cmd
, void *data
, int flag
, struct lwp
*l
)
431 struct cgd_softc
*cs
;
432 struct dk_softc
*dksc
;
435 int part
= DISKPART(dev
);
436 int pmask
= 1 << part
;
438 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64
", %ld, %p, %d, %p)\n",
439 dev
, cmd
, data
, flag
, l
));
440 GETCGD_SOFTC(cs
, dev
);
442 dk
= &dksc
->sc_dkdev
;
446 if ((flag
& FWRITE
) == 0)
452 if (dksc
->sc_flags
& DKF_INITED
)
455 ret
= cgd_ioctl_set(cs
, data
, l
);
458 if (!(dksc
->sc_flags
& DKF_INITED
)) {
462 if (DK_BUSY(&cs
->sc_dksc
, pmask
)) {
466 ret
= cgd_ioctl_clr(cs
, data
, l
);
471 * XXX Do we really need to care about having a writable
472 * file descriptor here?
474 if ((flag
& FWRITE
) == 0)
478 * We pass this call down to the underlying disk.
480 ret
= VOP_IOCTL(cs
->sc_tvn
, cmd
, data
, flag
, l
->l_cred
);
484 ret
= dk_ioctl(di
, dksc
, dev
, cmd
, data
, flag
, l
);
492 cgddump(dev_t dev
, daddr_t blkno
, void *va
, size_t size
)
494 struct cgd_softc
*cs
;
496 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64
", %" PRId64
", %p, %lu)\n",
497 dev
, blkno
, va
, (unsigned long)size
));
498 GETCGD_SOFTC(cs
, dev
);
499 return dk_dump(di
, &cs
->sc_dksc
, dev
, blkno
, va
, size
);
504 * for now we hardcode the maximum key length.
506 #define MAX_KEYSIZE 1024
508 static const struct {
513 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8
, 1 },
514 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8
, 1 },
515 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1
, 8 },
520 cgd_ioctl_set(struct cgd_softc
*cs
, void *data
, struct lwp
*l
)
522 struct cgd_ioctl
*ci
= data
;
526 size_t keybytes
; /* key length in bytes */
531 if ((ret
= dk_lookup(cp
, l
, &vp
, UIO_USERSPACE
)) != 0)
534 inbuf
= malloc(MAX_KEYSIZE
, M_TEMP
, M_WAITOK
);
536 if ((ret
= cgdinit(cs
, cp
, vp
, l
)) != 0)
539 (void)memset(inbuf
, 0, MAX_KEYSIZE
);
540 ret
= copyinstr(ci
->ci_alg
, inbuf
, 256, NULL
);
543 cs
->sc_cfuncs
= cryptfuncs_find(inbuf
);
544 if (!cs
->sc_cfuncs
) {
549 (void)memset(inbuf
, 0, MAX_KEYSIZE
);
550 ret
= copyinstr(ci
->ci_ivmethod
, inbuf
, MAX_KEYSIZE
, NULL
);
554 for (i
= 0; i
< __arraycount(encblkno
); i
++)
555 if (strcmp(encblkno
[i
].n
, inbuf
) == 0)
558 if (i
== __arraycount(encblkno
)) {
563 keybytes
= ci
->ci_keylen
/ 8 + 1;
564 if (keybytes
> MAX_KEYSIZE
) {
569 (void)memset(inbuf
, 0, MAX_KEYSIZE
);
570 ret
= copyin(ci
->ci_key
, inbuf
, keybytes
);
574 cs
->sc_cdata
.cf_blocksize
= ci
->ci_blocksize
;
575 cs
->sc_cdata
.cf_mode
= encblkno
[i
].v
;
576 cs
->sc_cdata
.cf_priv
= cs
->sc_cfuncs
->cf_init(ci
->ci_keylen
, inbuf
,
577 &cs
->sc_cdata
.cf_blocksize
);
578 if (cs
->sc_cdata
.cf_blocksize
> CGD_MAXBLOCKSIZE
) {
579 log(LOG_WARNING
, "cgd: Disallowed cipher with blocksize %zu > %u\n",
580 cs
->sc_cdata
.cf_blocksize
, CGD_MAXBLOCKSIZE
);
581 cs
->sc_cdata
.cf_priv
= NULL
;
585 * The blocksize is supposed to be in bytes. Unfortunately originally
586 * it was expressed in bits. For compatibility we maintain encblkno
589 cs
->sc_cdata
.cf_blocksize
/= encblkno
[i
].d
;
590 (void)memset(inbuf
, 0, MAX_KEYSIZE
);
591 if (!cs
->sc_cdata
.cf_priv
) {
592 ret
= EINVAL
; /* XXX is this the right error? */
597 bufq_alloc(&cs
->sc_dksc
.sc_bufq
, "fcfs", 0);
599 cs
->sc_data
= malloc(MAXPHYS
, M_DEVBUF
, M_WAITOK
);
600 cs
->sc_data_used
= 0;
602 cs
->sc_dksc
.sc_flags
|= DKF_INITED
;
604 /* Attach the disk. */
605 disk_attach(&cs
->sc_dksc
.sc_dkdev
);
607 /* Try and read the disklabel. */
608 dk_getdisklabel(di
, &cs
->sc_dksc
, 0 /* XXX ? */);
610 /* Discover wedges on this disk. */
611 dkwedge_discover(&cs
->sc_dksc
.sc_dkdev
);
617 (void)vn_close(vp
, FREAD
|FWRITE
, l
->l_cred
);
623 cgd_ioctl_clr(struct cgd_softc
*cs
, void *data
, struct lwp
*l
)
627 /* Delete all of our wedges. */
628 dkwedge_delall(&cs
->sc_dksc
.sc_dkdev
);
630 /* Kill off any queued buffers. */
632 bufq_drain(cs
->sc_dksc
.sc_bufq
);
634 bufq_free(cs
->sc_dksc
.sc_bufq
);
636 (void)vn_close(cs
->sc_tvn
, FREAD
|FWRITE
, l
->l_cred
);
637 cs
->sc_cfuncs
->cf_destroy(cs
->sc_cdata
.cf_priv
);
638 free(cs
->sc_tpath
, M_DEVBUF
);
639 free(cs
->sc_data
, M_DEVBUF
);
640 cs
->sc_data_used
= 0;
641 cs
->sc_dksc
.sc_flags
&= ~DKF_INITED
;
642 disk_detach(&cs
->sc_dksc
.sc_dkdev
);
648 getsize(struct lwp
*l
, struct vnode
*vp
, size_t *size
)
650 struct partinfo dpart
;
651 struct dkwedge_info dkw
;
654 if ((ret
= VOP_IOCTL(vp
, DIOCGWEDGEINFO
, &dkw
, FREAD
,
656 *size
= dkw
.dkw_size
;
660 if ((ret
= VOP_IOCTL(vp
, DIOCGPART
, &dpart
, FREAD
, l
->l_cred
)) == 0) {
661 *size
= dpart
.part
->p_size
;
670 cgdinit(struct cgd_softc
*cs
, const char *cpath
, struct vnode
*vp
,
679 cs
->sc_dksc
.sc_size
= 0;
683 tmppath
= malloc(MAXPATHLEN
, M_TEMP
, M_WAITOK
);
684 ret
= copyinstr(cpath
, tmppath
, MAXPATHLEN
, &cs
->sc_tpathlen
);
687 cs
->sc_tpath
= malloc(cs
->sc_tpathlen
, M_DEVBUF
, M_WAITOK
);
688 memcpy(cs
->sc_tpath
, tmppath
, cs
->sc_tpathlen
);
690 if ((ret
= VOP_GETATTR(vp
, &va
, l
->l_cred
)) != 0)
693 cs
->sc_tdev
= va
.va_rdev
;
695 if ((ret
= getsize(l
, vp
, &size
)) != 0)
703 cs
->sc_dksc
.sc_size
= size
;
706 * XXX here we should probe the underlying device. If we
707 * are accessing a partition of type RAW_PART, then
708 * we should populate our initial geometry with the
709 * geometry that we discover from the device.
711 pdg
= &cs
->sc_dksc
.sc_geom
;
712 pdg
->pdg_secsize
= DEV_BSIZE
;
713 pdg
->pdg_ntracks
= 1;
714 pdg
->pdg_nsectors
= 1024 * (1024 / pdg
->pdg_secsize
);
715 pdg
->pdg_ncylinders
= cs
->sc_dksc
.sc_size
/ pdg
->pdg_nsectors
;
718 free(tmppath
, M_TEMP
);
719 if (ret
&& cs
->sc_tpath
)
720 free(cs
->sc_tpath
, M_DEVBUF
);
725 * Our generic cipher entry point. This takes care of the
726 * IV mode and passes off the work to the specific cipher.
727 * We implement here the IV method ``encrypted block
730 * For the encryption case, we accomplish this by setting
731 * up a struct uio where the first iovec of the source is
732 * the blocknumber and the first iovec of the dest is a
733 * sink. We then call the cipher with an IV of zero, and
734 * the right thing happens.
736 * For the decryption case, we use the same basic mechanism
737 * for symmetry, but we encrypt the block number in the
740 * We mainly do this to avoid requiring the definition of
743 * XXXrcd: for now we rely on our own crypto framework defined
744 * in dev/cgd_crypto.c. This will change when we
745 * get a generic kernel crypto framework.
749 blkno2blkno_buf(char *sbuf
, daddr_t blkno
)
753 /* Set up the blkno in blkno_buf, here we do not care much
754 * about the final layout of the information as long as we
755 * can guarantee that each sector will have a different IV
756 * and that the endianness of the machine will not affect
757 * the representation that we have chosen.
759 * We choose this representation, because it does not rely
760 * on the size of buf (which is the blocksize of the cipher),
761 * but allows daddr_t to grow without breaking existing
764 * Note that blkno2blkno_buf does not take a size as input,
765 * and hence must be called on a pre-zeroed buffer of length
766 * greater than or equal to sizeof(daddr_t).
768 for (i
=0; i
< sizeof(daddr_t
); i
++) {
769 *sbuf
++ = blkno
& 0xff;
775 cgd_cipher(struct cgd_softc
*cs
, void *dstv
, void *srcv
,
776 size_t len
, daddr_t blkno
, size_t secsize
, int dir
)
780 cfunc_cipher
*cipher
= cs
->sc_cfuncs
->cf_cipher
;
783 struct iovec dstiov
[2];
784 struct iovec srciov
[2];
785 size_t blocksize
= cs
->sc_cdata
.cf_blocksize
;
786 char sink
[CGD_MAXBLOCKSIZE
];
787 char zero_iv
[CGD_MAXBLOCKSIZE
];
788 char blkno_buf
[CGD_MAXBLOCKSIZE
];
790 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir
));
792 DIAGCONDPANIC(len
% blocksize
!= 0,
793 ("cgd_cipher: len %% blocksize != 0"));
795 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
796 DIAGCONDPANIC(sizeof(daddr_t
) > blocksize
,
797 ("cgd_cipher: sizeof(daddr_t) > blocksize"));
799 memset(zero_iv
, 0x0, blocksize
);
801 dstuio
.uio_iov
= dstiov
;
802 dstuio
.uio_iovcnt
= 2;
804 srcuio
.uio_iov
= srciov
;
805 srcuio
.uio_iovcnt
= 2;
807 dstiov
[0].iov_base
= sink
;
808 dstiov
[0].iov_len
= blocksize
;
809 srciov
[0].iov_base
= blkno_buf
;
810 srciov
[0].iov_len
= blocksize
;
811 dstiov
[1].iov_len
= secsize
;
812 srciov
[1].iov_len
= secsize
;
814 for (; len
> 0; len
-= secsize
) {
815 dstiov
[1].iov_base
= dst
;
816 srciov
[1].iov_base
= src
;
818 memset(blkno_buf
, 0x0, blocksize
);
819 blkno2blkno_buf(blkno_buf
, blkno
);
820 if (dir
== CGD_CIPHER_DECRYPT
) {
821 dstuio
.uio_iovcnt
= 1;
822 srcuio
.uio_iovcnt
= 1;
823 IFDEBUG(CGDB_CRYPTO
, hexprint("step 0: blkno_buf",
824 blkno_buf
, blocksize
));
825 cipher(cs
->sc_cdata
.cf_priv
, &dstuio
, &srcuio
,
826 zero_iv
, CGD_CIPHER_ENCRYPT
);
827 memcpy(blkno_buf
, sink
, blocksize
);
828 dstuio
.uio_iovcnt
= 2;
829 srcuio
.uio_iovcnt
= 2;
832 IFDEBUG(CGDB_CRYPTO
, hexprint("step 1: blkno_buf",
833 blkno_buf
, blocksize
));
834 cipher(cs
->sc_cdata
.cf_priv
, &dstuio
, &srcuio
, zero_iv
, dir
);
835 IFDEBUG(CGDB_CRYPTO
, hexprint("step 2: sink",
846 hexprint(const char *start
, void *buf
, int len
)
850 DIAGCONDPANIC(len
< 0, ("hexprint: called with len < 0"));
851 printf("%s: len=%06d 0x", start
, len
);
853 printf("%02x", (unsigned char) *c
++);
859 #include <sys/module.h>
861 MODULE(MODULE_CLASS_DRIVER
, cgd
, NULL
);
864 cgd_modcmd(modcmd_t cmd
, void *arg
)
866 int bmajor
= -1, cmajor
= -1, error
= 0;
869 case MODULE_CMD_INIT
:
872 return devsw_attach("cgd", &cgd_bdevsw
, &bmajor
,
873 &cgd_cdevsw
, &cmajor
);
876 case MODULE_CMD_FINI
:
877 return devsw_detach(&cgd_bdevsw
, &cgd_cdevsw
);
880 case MODULE_CMD_STAT
: