Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / dev / cgd.c
blob8b428ffdfa403f512fec3b9cadf425c427cb477d
1 /* $NetBSD: cgd.c,v 1.63 2009/11/10 20:24:30 christos Exp $ */
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Roland C. Dowdeswell.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.63 2009/11/10 20:24:30 christos Exp $");
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/malloc.h>
43 #include <sys/pool.h>
44 #include <sys/ioctl.h>
45 #include <sys/device.h>
46 #include <sys/disk.h>
47 #include <sys/disklabel.h>
48 #include <sys/fcntl.h>
49 #include <sys/vnode.h>
50 #include <sys/conf.h>
51 #include <sys/syslog.h>
53 #include <dev/dkvar.h>
54 #include <dev/cgdvar.h>
56 /* Entry Point Functions */
58 void cgdattach(int);
60 static dev_type_open(cgdopen);
61 static dev_type_close(cgdclose);
62 static dev_type_read(cgdread);
63 static dev_type_write(cgdwrite);
64 static dev_type_ioctl(cgdioctl);
65 static dev_type_strategy(cgdstrategy);
66 static dev_type_dump(cgddump);
67 static dev_type_size(cgdsize);
69 const struct bdevsw cgd_bdevsw = {
70 cgdopen, cgdclose, cgdstrategy, cgdioctl,
71 cgddump, cgdsize, D_DISK
74 const struct cdevsw cgd_cdevsw = {
75 cgdopen, cgdclose, cgdread, cgdwrite, cgdioctl,
76 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
79 /* Internal Functions */
81 static int cgdstart(struct dk_softc *, struct buf *);
82 static void cgdiodone(struct buf *);
84 static int cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
85 static int cgd_ioctl_clr(struct cgd_softc *, void *, struct lwp *);
86 static int cgdinit(struct cgd_softc *, const char *, struct vnode *,
87 struct lwp *);
88 static void cgd_cipher(struct cgd_softc *, void *, void *,
89 size_t, daddr_t, size_t, int);
91 /* Pseudo-disk Interface */
93 static struct dk_intf the_dkintf = {
94 DTYPE_CGD,
95 "cgd",
96 cgdopen,
97 cgdclose,
98 cgdstrategy,
99 cgdstart,
101 static struct dk_intf *di = &the_dkintf;
103 static struct dkdriver cgddkdriver = {
104 .d_strategy = cgdstrategy,
105 .d_minphys = minphys,
108 /* DIAGNOSTIC and DEBUG definitions */
110 #if defined(CGDDEBUG) && !defined(DEBUG)
111 #define DEBUG
112 #endif
114 #ifdef DEBUG
115 int cgddebug = 0;
117 #define CGDB_FOLLOW 0x1
118 #define CGDB_IO 0x2
119 #define CGDB_CRYPTO 0x4
121 #define IFDEBUG(x,y) if (cgddebug & (x)) y
122 #define DPRINTF(x,y) IFDEBUG(x, printf y)
123 #define DPRINTF_FOLLOW(y) DPRINTF(CGDB_FOLLOW, y)
125 static void hexprint(const char *, void *, int);
127 #else
128 #define IFDEBUG(x,y)
129 #define DPRINTF(x,y)
130 #define DPRINTF_FOLLOW(y)
131 #endif
133 #ifdef DIAGNOSTIC
134 #define DIAGPANIC(x) panic x
135 #define DIAGCONDPANIC(x,y) if (x) panic y
136 #else
137 #define DIAGPANIC(x)
138 #define DIAGCONDPANIC(x,y)
139 #endif
141 /* Global variables */
143 struct cgd_softc *cgd_softc;
144 int numcgd = 0;
146 /* Utility Functions */
148 #define CGDUNIT(x) DISKUNIT(x)
149 #define GETCGD_SOFTC(_cs, x) if (!((_cs) = getcgd_softc(x))) return ENXIO
151 static struct cgd_softc *
152 getcgd_softc(dev_t dev)
154 int unit = CGDUNIT(dev);
156 DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit));
157 if (unit >= numcgd)
158 return NULL;
159 return &cgd_softc[unit];
162 /* The code */
164 static void
165 cgdsoftc_init(struct cgd_softc *cs, int num)
167 char sbuf[DK_XNAME_SIZE];
169 memset(cs, 0x0, sizeof(*cs));
170 snprintf(sbuf, DK_XNAME_SIZE, "cgd%d", num);
171 simple_lock_init(&cs->sc_slock);
172 dk_sc_init(&cs->sc_dksc, cs, sbuf);
173 disk_init(&cs->sc_dksc.sc_dkdev, cs->sc_dksc.sc_xname, &cgddkdriver);
176 void
177 cgdattach(int num)
179 int i;
181 DPRINTF_FOLLOW(("cgdattach(%d)\n", num));
182 if (num <= 0) {
183 DIAGPANIC(("cgdattach: count <= 0"));
184 return;
187 cgd_softc = malloc(num * sizeof(*cgd_softc), M_DEVBUF, M_NOWAIT);
188 if (!cgd_softc) {
189 DPRINTF_FOLLOW(("WARNING: unable to malloc(9) memory for %d "
190 "crypt disks\n", num));
191 DIAGPANIC(("cgdattach: cannot malloc(9) enough memory"));
192 return;
195 numcgd = num;
196 for (i = 0; i < num; i++)
197 cgdsoftc_init(&cgd_softc[i], i);
200 static int
201 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
203 struct cgd_softc *cs;
205 DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
206 GETCGD_SOFTC(cs, dev);
207 return dk_open(di, &cs->sc_dksc, dev, flags, fmt, l);
210 static int
211 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
213 struct cgd_softc *cs;
215 DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
216 GETCGD_SOFTC(cs, dev);
217 return dk_close(di, &cs->sc_dksc, dev, flags, fmt, l);
220 static void
221 cgdstrategy(struct buf *bp)
223 struct cgd_softc *cs = getcgd_softc(bp->b_dev);
225 DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
226 (long)bp->b_bcount));
227 /* XXXrcd: Should we test for (cs != NULL)? */
228 dk_strategy(di, &cs->sc_dksc, bp);
229 return;
232 static int
233 cgdsize(dev_t dev)
235 struct cgd_softc *cs = getcgd_softc(dev);
237 DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
238 if (!cs)
239 return -1;
240 return dk_size(di, &cs->sc_dksc, dev);
244 * cgd_{get,put}data are functions that deal with getting a buffer
245 * for the new encrypted data. We have a buffer per device so that
246 * we can ensure that we can always have a transaction in flight.
247 * We use this buffer first so that we have one less piece of
248 * malloc'ed data at any given point.
251 static void *
252 cgd_getdata(struct dk_softc *dksc, unsigned long size)
254 struct cgd_softc *cs =dksc->sc_osc;
255 void * data = NULL;
257 simple_lock(&cs->sc_slock);
258 if (cs->sc_data_used == 0) {
259 cs->sc_data_used = 1;
260 data = cs->sc_data;
262 simple_unlock(&cs->sc_slock);
264 if (data)
265 return data;
267 return malloc(size, M_DEVBUF, M_NOWAIT);
270 static void
271 cgd_putdata(struct dk_softc *dksc, void *data)
273 struct cgd_softc *cs =dksc->sc_osc;
275 if (data == cs->sc_data) {
276 simple_lock(&cs->sc_slock);
277 cs->sc_data_used = 0;
278 simple_unlock(&cs->sc_slock);
279 } else {
280 free(data, M_DEVBUF);
284 static int
285 cgdstart(struct dk_softc *dksc, struct buf *bp)
287 struct cgd_softc *cs = dksc->sc_osc;
288 struct buf *nbp;
289 void * addr;
290 void * newaddr;
291 daddr_t bn;
292 struct vnode *vp;
294 DPRINTF_FOLLOW(("cgdstart(%p, %p)\n", dksc, bp));
295 disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */
297 bn = bp->b_rawblkno;
300 * We attempt to allocate all of our resources up front, so that
301 * we can fail quickly if they are unavailable.
304 nbp = getiobuf(cs->sc_tvn, false);
305 if (nbp == NULL) {
306 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
307 return -1;
311 * If we are writing, then we need to encrypt the outgoing
312 * block into a new block of memory. If we fail, then we
313 * return an error and let the dksubr framework deal with it.
315 newaddr = addr = bp->b_data;
316 if ((bp->b_flags & B_READ) == 0) {
317 newaddr = cgd_getdata(dksc, bp->b_bcount);
318 if (!newaddr) {
319 putiobuf(nbp);
320 disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
321 return -1;
323 cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn,
324 DEV_BSIZE, CGD_CIPHER_ENCRYPT);
327 nbp->b_data = newaddr;
328 nbp->b_flags = bp->b_flags;
329 nbp->b_oflags = bp->b_oflags;
330 nbp->b_cflags = bp->b_cflags;
331 nbp->b_iodone = cgdiodone;
332 nbp->b_proc = bp->b_proc;
333 nbp->b_blkno = bn;
334 nbp->b_bcount = bp->b_bcount;
335 nbp->b_private = bp;
337 BIO_COPYPRIO(nbp, bp);
339 if ((nbp->b_flags & B_READ) == 0) {
340 vp = nbp->b_vp;
341 mutex_enter(&vp->v_interlock);
342 vp->v_numoutput++;
343 mutex_exit(&vp->v_interlock);
345 VOP_STRATEGY(cs->sc_tvn, nbp);
346 return 0;
349 /* expected to be called at splbio() */
350 static void
351 cgdiodone(struct buf *nbp)
353 struct buf *obp = nbp->b_private;
354 struct cgd_softc *cs = getcgd_softc(obp->b_dev);
355 struct dk_softc *dksc = &cs->sc_dksc;
357 KDASSERT(cs);
359 DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
360 DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
361 obp, obp->b_bcount, obp->b_resid));
362 DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 " addr %p bcnt %d\n",
363 nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
364 nbp->b_bcount));
365 if (nbp->b_error != 0) {
366 obp->b_error = nbp->b_error;
367 DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
368 obp->b_error));
371 /* Perform the decryption if we are reading.
373 * Note: use the blocknumber from nbp, since it is what
374 * we used to encrypt the blocks.
377 if (nbp->b_flags & B_READ)
378 cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount,
379 nbp->b_blkno, DEV_BSIZE, CGD_CIPHER_DECRYPT);
381 /* If we allocated memory, free it now... */
382 if (nbp->b_data != obp->b_data)
383 cgd_putdata(dksc, nbp->b_data);
385 putiobuf(nbp);
387 /* Request is complete for whatever reason */
388 obp->b_resid = 0;
389 if (obp->b_error != 0)
390 obp->b_resid = obp->b_bcount;
391 disk_unbusy(&dksc->sc_dkdev, obp->b_bcount - obp->b_resid,
392 (obp->b_flags & B_READ));
393 biodone(obp);
394 dk_iodone(di, dksc);
397 /* XXX: we should probably put these into dksubr.c, mostly */
398 static int
399 cgdread(dev_t dev, struct uio *uio, int flags)
401 struct cgd_softc *cs;
402 struct dk_softc *dksc;
404 DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
405 (unsigned long long)dev, uio, flags));
406 GETCGD_SOFTC(cs, dev);
407 dksc = &cs->sc_dksc;
408 if ((dksc->sc_flags & DKF_INITED) == 0)
409 return ENXIO;
410 return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
413 /* XXX: we should probably put these into dksubr.c, mostly */
414 static int
415 cgdwrite(dev_t dev, struct uio *uio, int flags)
417 struct cgd_softc *cs;
418 struct dk_softc *dksc;
420 DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
421 GETCGD_SOFTC(cs, dev);
422 dksc = &cs->sc_dksc;
423 if ((dksc->sc_flags & DKF_INITED) == 0)
424 return ENXIO;
425 return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
428 static int
429 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
431 struct cgd_softc *cs;
432 struct dk_softc *dksc;
433 struct disk *dk;
434 int ret;
435 int part = DISKPART(dev);
436 int pmask = 1 << part;
438 DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
439 dev, cmd, data, flag, l));
440 GETCGD_SOFTC(cs, dev);
441 dksc = &cs->sc_dksc;
442 dk = &dksc->sc_dkdev;
443 switch (cmd) {
444 case CGDIOCSET:
445 case CGDIOCCLR:
446 if ((flag & FWRITE) == 0)
447 return EBADF;
450 switch (cmd) {
451 case CGDIOCSET:
452 if (dksc->sc_flags & DKF_INITED)
453 ret = EBUSY;
454 else
455 ret = cgd_ioctl_set(cs, data, l);
456 break;
457 case CGDIOCCLR:
458 if (!(dksc->sc_flags & DKF_INITED)) {
459 ret = ENXIO;
460 break;
462 if (DK_BUSY(&cs->sc_dksc, pmask)) {
463 ret = EBUSY;
464 break;
466 ret = cgd_ioctl_clr(cs, data, l);
467 break;
469 case DIOCCACHESYNC:
471 * XXX Do we really need to care about having a writable
472 * file descriptor here?
474 if ((flag & FWRITE) == 0)
475 return (EBADF);
478 * We pass this call down to the underlying disk.
480 ret = VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred);
481 break;
483 default:
484 ret = dk_ioctl(di, dksc, dev, cmd, data, flag, l);
485 break;
488 return ret;
491 static int
492 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
494 struct cgd_softc *cs;
496 DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
497 dev, blkno, va, (unsigned long)size));
498 GETCGD_SOFTC(cs, dev);
499 return dk_dump(di, &cs->sc_dksc, dev, blkno, va, size);
503 * XXXrcd:
504 * for now we hardcode the maximum key length.
506 #define MAX_KEYSIZE 1024
508 static const struct {
509 const char *n;
510 int v;
511 int d;
512 } encblkno[] = {
513 { "encblkno", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
514 { "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
515 { "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
518 /* ARGSUSED */
519 static int
520 cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
522 struct cgd_ioctl *ci = data;
523 struct vnode *vp;
524 int ret;
525 size_t i;
526 size_t keybytes; /* key length in bytes */
527 const char *cp;
528 char *inbuf;
530 cp = ci->ci_disk;
531 if ((ret = dk_lookup(cp, l, &vp, UIO_USERSPACE)) != 0)
532 return ret;
534 inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);
536 if ((ret = cgdinit(cs, cp, vp, l)) != 0)
537 goto bail;
539 (void)memset(inbuf, 0, MAX_KEYSIZE);
540 ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
541 if (ret)
542 goto bail;
543 cs->sc_cfuncs = cryptfuncs_find(inbuf);
544 if (!cs->sc_cfuncs) {
545 ret = EINVAL;
546 goto bail;
549 (void)memset(inbuf, 0, MAX_KEYSIZE);
550 ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
551 if (ret)
552 goto bail;
554 for (i = 0; i < __arraycount(encblkno); i++)
555 if (strcmp(encblkno[i].n, inbuf) == 0)
556 break;
558 if (i == __arraycount(encblkno)) {
559 ret = EINVAL;
560 goto bail;
563 keybytes = ci->ci_keylen / 8 + 1;
564 if (keybytes > MAX_KEYSIZE) {
565 ret = EINVAL;
566 goto bail;
569 (void)memset(inbuf, 0, MAX_KEYSIZE);
570 ret = copyin(ci->ci_key, inbuf, keybytes);
571 if (ret)
572 goto bail;
574 cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
575 cs->sc_cdata.cf_mode = encblkno[i].v;
576 cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
577 &cs->sc_cdata.cf_blocksize);
578 if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
579 log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
580 cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
581 cs->sc_cdata.cf_priv = NULL;
585 * The blocksize is supposed to be in bytes. Unfortunately originally
586 * it was expressed in bits. For compatibility we maintain encblkno
587 * and encblkno8.
589 cs->sc_cdata.cf_blocksize /= encblkno[i].d;
590 (void)memset(inbuf, 0, MAX_KEYSIZE);
591 if (!cs->sc_cdata.cf_priv) {
592 ret = EINVAL; /* XXX is this the right error? */
593 goto bail;
595 free(inbuf, M_TEMP);
597 bufq_alloc(&cs->sc_dksc.sc_bufq, "fcfs", 0);
599 cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
600 cs->sc_data_used = 0;
602 cs->sc_dksc.sc_flags |= DKF_INITED;
604 /* Attach the disk. */
605 disk_attach(&cs->sc_dksc.sc_dkdev);
607 /* Try and read the disklabel. */
608 dk_getdisklabel(di, &cs->sc_dksc, 0 /* XXX ? */);
610 /* Discover wedges on this disk. */
611 dkwedge_discover(&cs->sc_dksc.sc_dkdev);
613 return 0;
615 bail:
616 free(inbuf, M_TEMP);
617 (void)vn_close(vp, FREAD|FWRITE, l->l_cred);
618 return ret;
621 /* ARGSUSED */
622 static int
623 cgd_ioctl_clr(struct cgd_softc *cs, void *data, struct lwp *l)
625 int s;
627 /* Delete all of our wedges. */
628 dkwedge_delall(&cs->sc_dksc.sc_dkdev);
630 /* Kill off any queued buffers. */
631 s = splbio();
632 bufq_drain(cs->sc_dksc.sc_bufq);
633 splx(s);
634 bufq_free(cs->sc_dksc.sc_bufq);
636 (void)vn_close(cs->sc_tvn, FREAD|FWRITE, l->l_cred);
637 cs->sc_cfuncs->cf_destroy(cs->sc_cdata.cf_priv);
638 free(cs->sc_tpath, M_DEVBUF);
639 free(cs->sc_data, M_DEVBUF);
640 cs->sc_data_used = 0;
641 cs->sc_dksc.sc_flags &= ~DKF_INITED;
642 disk_detach(&cs->sc_dksc.sc_dkdev);
644 return 0;
647 static int
648 getsize(struct lwp *l, struct vnode *vp, size_t *size)
650 struct partinfo dpart;
651 struct dkwedge_info dkw;
652 int ret;
654 if ((ret = VOP_IOCTL(vp, DIOCGWEDGEINFO, &dkw, FREAD,
655 l->l_cred)) == 0) {
656 *size = dkw.dkw_size;
657 return 0;
660 if ((ret = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, l->l_cred)) == 0) {
661 *size = dpart.part->p_size;
662 return 0;
665 return ret;
669 static int
670 cgdinit(struct cgd_softc *cs, const char *cpath, struct vnode *vp,
671 struct lwp *l)
673 struct dk_geom *pdg;
674 struct vattr va;
675 size_t size;
676 int ret;
677 char *tmppath;
679 cs->sc_dksc.sc_size = 0;
680 cs->sc_tvn = vp;
681 cs->sc_tpath = NULL;
683 tmppath = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
684 ret = copyinstr(cpath, tmppath, MAXPATHLEN, &cs->sc_tpathlen);
685 if (ret)
686 goto bail;
687 cs->sc_tpath = malloc(cs->sc_tpathlen, M_DEVBUF, M_WAITOK);
688 memcpy(cs->sc_tpath, tmppath, cs->sc_tpathlen);
690 if ((ret = VOP_GETATTR(vp, &va, l->l_cred)) != 0)
691 goto bail;
693 cs->sc_tdev = va.va_rdev;
695 if ((ret = getsize(l, vp, &size)) != 0)
696 goto bail;
698 if (!size) {
699 ret = ENODEV;
700 goto bail;
703 cs->sc_dksc.sc_size = size;
706 * XXX here we should probe the underlying device. If we
707 * are accessing a partition of type RAW_PART, then
708 * we should populate our initial geometry with the
709 * geometry that we discover from the device.
711 pdg = &cs->sc_dksc.sc_geom;
712 pdg->pdg_secsize = DEV_BSIZE;
713 pdg->pdg_ntracks = 1;
714 pdg->pdg_nsectors = 1024 * (1024 / pdg->pdg_secsize);
715 pdg->pdg_ncylinders = cs->sc_dksc.sc_size / pdg->pdg_nsectors;
717 bail:
718 free(tmppath, M_TEMP);
719 if (ret && cs->sc_tpath)
720 free(cs->sc_tpath, M_DEVBUF);
721 return ret;
725 * Our generic cipher entry point. This takes care of the
726 * IV mode and passes off the work to the specific cipher.
727 * We implement here the IV method ``encrypted block
728 * number''.
730 * For the encryption case, we accomplish this by setting
731 * up a struct uio where the first iovec of the source is
732 * the blocknumber and the first iovec of the dest is a
733 * sink. We then call the cipher with an IV of zero, and
734 * the right thing happens.
736 * For the decryption case, we use the same basic mechanism
737 * for symmetry, but we encrypt the block number in the
738 * first iovec.
740 * We mainly do this to avoid requiring the definition of
741 * an ECB mode.
743 * XXXrcd: for now we rely on our own crypto framework defined
744 * in dev/cgd_crypto.c. This will change when we
745 * get a generic kernel crypto framework.
748 static void
749 blkno2blkno_buf(char *sbuf, daddr_t blkno)
751 int i;
753 /* Set up the blkno in blkno_buf, here we do not care much
754 * about the final layout of the information as long as we
755 * can guarantee that each sector will have a different IV
756 * and that the endianness of the machine will not affect
757 * the representation that we have chosen.
759 * We choose this representation, because it does not rely
760 * on the size of buf (which is the blocksize of the cipher),
761 * but allows daddr_t to grow without breaking existing
762 * disks.
764 * Note that blkno2blkno_buf does not take a size as input,
765 * and hence must be called on a pre-zeroed buffer of length
766 * greater than or equal to sizeof(daddr_t).
768 for (i=0; i < sizeof(daddr_t); i++) {
769 *sbuf++ = blkno & 0xff;
770 blkno >>= 8;
774 static void
775 cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv,
776 size_t len, daddr_t blkno, size_t secsize, int dir)
778 char *dst = dstv;
779 char *src = srcv;
780 cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher;
781 struct uio dstuio;
782 struct uio srcuio;
783 struct iovec dstiov[2];
784 struct iovec srciov[2];
785 size_t blocksize = cs->sc_cdata.cf_blocksize;
786 char sink[CGD_MAXBLOCKSIZE];
787 char zero_iv[CGD_MAXBLOCKSIZE];
788 char blkno_buf[CGD_MAXBLOCKSIZE];
790 DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
792 DIAGCONDPANIC(len % blocksize != 0,
793 ("cgd_cipher: len %% blocksize != 0"));
795 /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
796 DIAGCONDPANIC(sizeof(daddr_t) > blocksize,
797 ("cgd_cipher: sizeof(daddr_t) > blocksize"));
799 memset(zero_iv, 0x0, blocksize);
801 dstuio.uio_iov = dstiov;
802 dstuio.uio_iovcnt = 2;
804 srcuio.uio_iov = srciov;
805 srcuio.uio_iovcnt = 2;
807 dstiov[0].iov_base = sink;
808 dstiov[0].iov_len = blocksize;
809 srciov[0].iov_base = blkno_buf;
810 srciov[0].iov_len = blocksize;
811 dstiov[1].iov_len = secsize;
812 srciov[1].iov_len = secsize;
814 for (; len > 0; len -= secsize) {
815 dstiov[1].iov_base = dst;
816 srciov[1].iov_base = src;
818 memset(blkno_buf, 0x0, blocksize);
819 blkno2blkno_buf(blkno_buf, blkno);
820 if (dir == CGD_CIPHER_DECRYPT) {
821 dstuio.uio_iovcnt = 1;
822 srcuio.uio_iovcnt = 1;
823 IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf",
824 blkno_buf, blocksize));
825 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio,
826 zero_iv, CGD_CIPHER_ENCRYPT);
827 memcpy(blkno_buf, sink, blocksize);
828 dstuio.uio_iovcnt = 2;
829 srcuio.uio_iovcnt = 2;
832 IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
833 blkno_buf, blocksize));
834 cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir);
835 IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink",
836 sink, blocksize));
838 dst += secsize;
839 src += secsize;
840 blkno++;
844 #ifdef DEBUG
845 static void
846 hexprint(const char *start, void *buf, int len)
848 char *c = buf;
850 DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0"));
851 printf("%s: len=%06d 0x", start, len);
852 while (len--)
853 printf("%02x", (unsigned char) *c++);
855 #endif
857 #ifdef _MODULE
859 #include <sys/module.h>
861 MODULE(MODULE_CLASS_DRIVER, cgd, NULL);
863 static int
864 cgd_modcmd(modcmd_t cmd, void *arg)
866 int bmajor = -1, cmajor = -1, error = 0;
868 switch (cmd) {
869 case MODULE_CMD_INIT:
870 cgdattach(4);
872 return devsw_attach("cgd", &cgd_bdevsw, &bmajor,
873 &cgd_cdevsw, &cmajor);
874 break;
876 case MODULE_CMD_FINI:
877 return devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
878 break;
880 case MODULE_CMD_STAT:
881 return ENOTTY;
883 default:
884 return ENOTTY;
887 return error;
890 #endif