1 /* $NetBSD: ld.c,v 1.65 2009/05/07 09:11:42 cegger Exp $ */
4 * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Charles M. Hannum.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Disk driver for use by RAID controllers.
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: ld.c,v 1.65 2009/05/07 09:11:42 cegger Exp $");
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
49 #include <sys/endian.h>
50 #include <sys/disklabel.h>
55 #include <sys/fcntl.h>
56 #include <sys/vnode.h>
57 #include <sys/syslog.h>
58 #include <sys/mutex.h>
63 #include <dev/ldvar.h>
65 #include <prop/proplib.h>
67 static void ldgetdefaultlabel(struct ld_softc
*, struct disklabel
*);
68 static void ldgetdisklabel(struct ld_softc
*);
69 static void ldminphys(struct buf
*bp
);
70 static bool ld_shutdown(device_t
, int);
71 static void ldstart(struct ld_softc
*, struct buf
*);
72 static void ld_set_properties(struct ld_softc
*);
73 static void ld_config_interrupts (device_t
);
74 static int ldlastclose(device_t
);
76 extern struct cfdriver ld_cd
;
78 static dev_type_open(ldopen
);
79 static dev_type_close(ldclose
);
80 static dev_type_read(ldread
);
81 static dev_type_write(ldwrite
);
82 static dev_type_ioctl(ldioctl
);
83 static dev_type_strategy(ldstrategy
);
84 static dev_type_dump(lddump
);
85 static dev_type_size(ldsize
);
87 const struct bdevsw ld_bdevsw
= {
88 ldopen
, ldclose
, ldstrategy
, ldioctl
, lddump
, ldsize
, D_DISK
91 const struct cdevsw ld_cdevsw
= {
92 ldopen
, ldclose
, ldread
, ldwrite
, ldioctl
,
93 nostop
, notty
, nopoll
, nommap
, nokqfilter
, D_DISK
96 static struct dkdriver lddkdriver
= { ldstrategy
, ldminphys
};
99 ldattach(struct ld_softc
*sc
)
103 mutex_init(&sc
->sc_mutex
, MUTEX_DEFAULT
, IPL_VM
);
105 if ((sc
->sc_flags
& LDF_ENABLED
) == 0) {
106 aprint_normal_dev(sc
->sc_dv
, "disabled\n");
110 /* Initialise and attach the disk structure. */
111 disk_init(&sc
->sc_dk
, device_xname(sc
->sc_dv
), &lddkdriver
);
112 disk_attach(&sc
->sc_dk
);
114 if (sc
->sc_maxxfer
> MAXPHYS
)
115 sc
->sc_maxxfer
= MAXPHYS
;
117 /* Build synthetic geometry if necessary. */
118 if (sc
->sc_nheads
== 0 || sc
->sc_nsectors
== 0 ||
119 sc
->sc_ncylinders
== 0) {
122 if (sc
->sc_secperunit
<= 528 * 2048) /* 528MB */
124 else if (sc
->sc_secperunit
<= 1024 * 2048) /* 1GB */
126 else if (sc
->sc_secperunit
<= 21504 * 2048) /* 21GB */
128 else if (sc
->sc_secperunit
<= 43008 * 2048) /* 42GB */
133 sc
->sc_nsectors
= 63;
134 sc
->sc_ncylinders
= INT_MAX
;
135 ncyl
= sc
->sc_secperunit
/
136 (sc
->sc_nheads
* sc
->sc_nsectors
);
138 sc
->sc_ncylinders
= (int)ncyl
;
141 format_bytes(tbuf
, sizeof(tbuf
), sc
->sc_secperunit
*
143 aprint_normal_dev(sc
->sc_dv
, "%s, %d cyl, %d head, %d sec, "
144 "%d bytes/sect x %"PRIu64
" sectors\n",
145 tbuf
, sc
->sc_ncylinders
, sc
->sc_nheads
,
146 sc
->sc_nsectors
, sc
->sc_secsize
, sc
->sc_secperunit
);
148 ld_set_properties(sc
);
151 /* Attach the device into the rnd source list. */
152 rnd_attach_source(&sc
->sc_rnd_source
, device_xname(sc
->sc_dv
),
156 /* Register with PMF */
157 if (!pmf_device_register1(sc
->sc_dv
, NULL
, NULL
, ld_shutdown
))
158 aprint_error_dev(sc
->sc_dv
,
159 "couldn't establish power handler\n");
161 bufq_alloc(&sc
->sc_bufq
, BUFQ_DISK_DEFAULT_STRAT
, BUFQ_SORT_RAWBLOCK
);
163 /* Discover wedges on this disk. */
164 config_interrupts(sc
->sc_dv
, ld_config_interrupts
);
168 ldadjqparam(struct ld_softc
*sc
, int xmax
)
173 sc
->sc_maxqueuecnt
= xmax
;
180 ldbegindetach(struct ld_softc
*sc
, int flags
)
184 if ((sc
->sc_flags
& LDF_ENABLED
) == 0)
187 rv
= disk_begindetach(&sc
->sc_dk
, ldlastclose
, sc
->sc_dv
, flags
);
193 sc
->sc_maxqueuecnt
= 0;
194 sc
->sc_flags
|= LDF_DETACH
;
195 while (sc
->sc_queuecnt
> 0) {
196 sc
->sc_flags
|= LDF_DRAIN
;
197 rv
= tsleep(&sc
->sc_queuecnt
, PRIBIO
, "lddrn", 0);
207 ldenddetach(struct ld_softc
*sc
)
209 int s
, bmaj
, cmaj
, i
, mn
;
211 if ((sc
->sc_flags
& LDF_ENABLED
) == 0)
214 /* Wait for commands queued with the hardware to complete. */
215 if (sc
->sc_queuecnt
!= 0)
216 if (tsleep(&sc
->sc_queuecnt
, PRIBIO
, "lddtch", 30 * hz
))
217 printf("%s: not drained\n", device_xname(sc
->sc_dv
));
219 /* Locate the major numbers. */
220 bmaj
= bdevsw_lookup_major(&ld_bdevsw
);
221 cmaj
= cdevsw_lookup_major(&ld_cdevsw
);
223 /* Kill off any queued buffers. */
225 bufq_drain(sc
->sc_bufq
);
228 bufq_free(sc
->sc_bufq
);
230 /* Nuke the vnodes for any open instances. */
231 for (i
= 0; i
< MAXPARTITIONS
; i
++) {
232 mn
= DISKMINOR(device_unit(sc
->sc_dv
), i
);
233 vdevgone(bmaj
, mn
, mn
, VBLK
);
234 vdevgone(cmaj
, mn
, mn
, VCHR
);
237 /* Delete all of our wedges. */
238 dkwedge_delall(&sc
->sc_dk
);
240 /* Detach from the disk list. */
241 disk_detach(&sc
->sc_dk
);
242 disk_destroy(&sc
->sc_dk
);
245 /* Unhook the entropy source. */
246 rnd_detach_source(&sc
->sc_rnd_source
);
249 /* Deregister with PMF */
250 pmf_device_deregister(sc
->sc_dv
);
253 * XXX We can't really flush the cache here, beceause the
254 * XXX device may already be non-existent from the controller's
258 /* Flush the device's cache. */
259 if (sc
->sc_flush
!= NULL
)
260 if ((*sc
->sc_flush
)(sc
, 0) != 0)
261 aprint_error_dev(&sc
->sc_dv
, "unable to flush cache\n");
263 mutex_destroy(&sc
->sc_mutex
);
268 ld_shutdown(device_t dev
, int flags
)
270 struct ld_softc
*sc
= device_private(dev
);
272 if (sc
->sc_flush
!= NULL
&& (*sc
->sc_flush
)(sc
, LDFL_POLL
) != 0) {
273 printf("%s: unable to flush cache\n", device_xname(dev
));
282 ldopen(dev_t dev
, int flags
, int fmt
, struct lwp
*l
)
285 int error
, unit
, part
;
287 unit
= DISKUNIT(dev
);
288 if ((sc
= device_lookup_private(&ld_cd
, unit
)) == NULL
)
290 if ((sc
->sc_flags
& LDF_ENABLED
) == 0)
292 part
= DISKPART(dev
);
294 mutex_enter(&sc
->sc_dk
.dk_openlock
);
296 if (sc
->sc_dk
.dk_openmask
== 0) {
297 /* Load the partition info if not already loaded. */
298 if ((sc
->sc_flags
& LDF_VLABEL
) == 0)
302 /* Check that the partition exists. */
303 if (part
!= RAW_PART
&& (part
>= sc
->sc_dk
.dk_label
->d_npartitions
||
304 sc
->sc_dk
.dk_label
->d_partitions
[part
].p_fstype
== FS_UNUSED
)) {
309 /* Ensure only one open at a time. */
312 sc
->sc_dk
.dk_copenmask
|= (1 << part
);
315 sc
->sc_dk
.dk_bopenmask
|= (1 << part
);
318 sc
->sc_dk
.dk_openmask
=
319 sc
->sc_dk
.dk_copenmask
| sc
->sc_dk
.dk_bopenmask
;
323 mutex_exit(&sc
->sc_dk
.dk_openlock
);
328 ldlastclose(device_t self
)
330 struct ld_softc
*sc
= device_private(self
);
332 if (sc
->sc_flush
!= NULL
&& (*sc
->sc_flush
)(sc
, 0) != 0)
333 aprint_error_dev(self
, "unable to flush cache\n");
334 if ((sc
->sc_flags
& LDF_KLABEL
) == 0)
335 sc
->sc_flags
&= ~LDF_VLABEL
;
342 ldclose(dev_t dev
, int flags
, int fmt
, struct lwp
*l
)
347 unit
= DISKUNIT(dev
);
348 part
= DISKPART(dev
);
349 sc
= device_lookup_private(&ld_cd
, unit
);
351 mutex_enter(&sc
->sc_dk
.dk_openlock
);
355 sc
->sc_dk
.dk_copenmask
&= ~(1 << part
);
358 sc
->sc_dk
.dk_bopenmask
&= ~(1 << part
);
361 sc
->sc_dk
.dk_openmask
=
362 sc
->sc_dk
.dk_copenmask
| sc
->sc_dk
.dk_bopenmask
;
364 if (sc
->sc_dk
.dk_openmask
== 0)
365 ldlastclose(sc
->sc_dv
);
367 mutex_exit(&sc
->sc_dk
.dk_openlock
);
373 ldread(dev_t dev
, struct uio
*uio
, int ioflag
)
376 return (physio(ldstrategy
, NULL
, dev
, B_READ
, ldminphys
, uio
));
381 ldwrite(dev_t dev
, struct uio
*uio
, int ioflag
)
384 return (physio(ldstrategy
, NULL
, dev
, B_WRITE
, ldminphys
, uio
));
389 ldioctl(dev_t dev
, u_long cmd
, void *addr
, int32_t flag
, struct lwp
*l
)
392 int part
, unit
, error
;
393 #ifdef __HAVE_OLD_DISKLABEL
394 struct disklabel newlabel
;
396 struct disklabel
*lp
;
398 unit
= DISKUNIT(dev
);
399 part
= DISKPART(dev
);
400 sc
= device_lookup_private(&ld_cd
, unit
);
402 error
= disk_ioctl(&sc
->sc_dk
, cmd
, addr
, flag
, l
);
403 if (error
!= EPASSTHROUGH
)
409 memcpy(addr
, sc
->sc_dk
.dk_label
, sizeof(struct disklabel
));
412 #ifdef __HAVE_OLD_DISKLABEL
414 newlabel
= *(sc
->sc_dk
.dk_label
);
415 if (newlabel
.d_npartitions
> OLDMAXPARTITIONS
)
417 memcpy(addr
, &newlabel
, sizeof(struct olddisklabel
));
422 ((struct partinfo
*)addr
)->disklab
= sc
->sc_dk
.dk_label
;
423 ((struct partinfo
*)addr
)->part
=
424 &sc
->sc_dk
.dk_label
->d_partitions
[part
];
429 #ifdef __HAVE_OLD_DISKLABEL
433 if (cmd
== ODIOCSDINFO
|| cmd
== ODIOCWDINFO
) {
434 memset(&newlabel
, 0, sizeof newlabel
);
435 memcpy(&newlabel
, addr
, sizeof (struct olddisklabel
));
439 lp
= (struct disklabel
*)addr
;
441 if ((flag
& FWRITE
) == 0)
444 mutex_enter(&sc
->sc_dk
.dk_openlock
);
445 sc
->sc_flags
|= LDF_LABELLING
;
447 error
= setdisklabel(sc
->sc_dk
.dk_label
,
448 lp
, /*sc->sc_dk.dk_openmask : */0,
449 sc
->sc_dk
.dk_cpulabel
);
450 if (error
== 0 && (cmd
== DIOCWDINFO
451 #ifdef __HAVE_OLD_DISKLABEL
452 || cmd
== ODIOCWDINFO
455 error
= writedisklabel(
456 MAKEDISKDEV(major(dev
), DISKUNIT(dev
), RAW_PART
),
457 ldstrategy
, sc
->sc_dk
.dk_label
,
458 sc
->sc_dk
.dk_cpulabel
);
460 sc
->sc_flags
&= ~LDF_LABELLING
;
461 mutex_exit(&sc
->sc_dk
.dk_openlock
);
465 if ((flag
& FWRITE
) == 0)
468 sc
->sc_flags
|= LDF_KLABEL
;
470 sc
->sc_flags
&= ~LDF_KLABEL
;
474 if ((flag
& FWRITE
) == 0)
477 sc
->sc_flags
|= LDF_WLABEL
;
479 sc
->sc_flags
&= ~LDF_WLABEL
;
483 ldgetdefaultlabel(sc
, (struct disklabel
*)addr
);
486 #ifdef __HAVE_OLD_DISKLABEL
488 ldgetdefaultlabel(sc
, &newlabel
);
489 if (newlabel
.d_npartitions
> OLDMAXPARTITIONS
)
491 memcpy(addr
, &newlabel
, sizeof (struct olddisklabel
));
497 * XXX Do we really need to care about having a writable
498 * file descriptor here?
500 if ((flag
& FWRITE
) == 0)
502 else if (sc
->sc_flush
)
503 error
= (*sc
->sc_flush
)(sc
, 0);
505 error
= 0; /* XXX Error out instead? */
510 struct dkwedge_info
*dkw
= (void *) addr
;
512 if ((flag
& FWRITE
) == 0)
515 /* If the ioctl happens here, the parent is us. */
516 strlcpy(dkw
->dkw_parent
, device_xname(sc
->sc_dv
),
517 sizeof(dkw
->dkw_parent
));
518 return (dkwedge_add(dkw
));
523 struct dkwedge_info
*dkw
= (void *) addr
;
525 if ((flag
& FWRITE
) == 0)
528 /* If the ioctl happens here, the parent is us. */
529 strlcpy(dkw
->dkw_parent
, device_xname(sc
->sc_dv
),
530 sizeof(dkw
->dkw_parent
));
531 return (dkwedge_del(dkw
));
536 struct dkwedge_list
*dkwl
= (void *) addr
;
538 return (dkwedge_list(&sc
->sc_dk
, dkwl
, l
));
542 struct disk_strategy
*dks
= (void *)addr
;
544 mutex_enter(&sc
->sc_mutex
);
545 strlcpy(dks
->dks_name
, bufq_getstrategyname(sc
->sc_bufq
),
546 sizeof(dks
->dks_name
));
547 mutex_exit(&sc
->sc_mutex
);
548 dks
->dks_paramlen
= 0;
554 struct disk_strategy
*dks
= (void *)addr
;
555 struct bufq_state
*new, *old
;
557 if ((flag
& FWRITE
) == 0)
560 if (dks
->dks_param
!= NULL
)
563 dks
->dks_name
[sizeof(dks
->dks_name
) - 1] = 0; /* ensure term */
564 error
= bufq_alloc(&new, dks
->dks_name
,
565 BUFQ_EXACT
|BUFQ_SORT_RAWBLOCK
);
569 mutex_enter(&sc
->sc_mutex
);
573 mutex_exit(&sc
->sc_mutex
);
587 ldstrategy(struct buf
*bp
)
590 struct disklabel
*lp
;
594 sc
= device_lookup_private(&ld_cd
, DISKUNIT(bp
->b_dev
));
595 part
= DISKPART(bp
->b_dev
);
597 if ((sc
->sc_flags
& LDF_DETACH
) != 0) {
602 lp
= sc
->sc_dk
.dk_label
;
605 * The transfer must be a whole number of blocks and the offset must
608 if ((bp
->b_bcount
% lp
->d_secsize
) != 0 || bp
->b_blkno
< 0) {
609 bp
->b_error
= EINVAL
;
613 /* If it's a null transfer, return immediately. */
614 if (bp
->b_bcount
== 0)
618 * Do bounds checking and adjust the transfer. If error, process.
619 * If past the end of partition, just return.
621 if (part
!= RAW_PART
&&
622 bounds_check_with_label(&sc
->sc_dk
, bp
,
623 (sc
->sc_flags
& (LDF_WLABEL
| LDF_LABELLING
)) != 0) <= 0) {
628 * Convert the block number to absolute and put it in terms
629 * of the device's logical block size.
631 if (lp
->d_secsize
== DEV_BSIZE
)
633 else if (lp
->d_secsize
> DEV_BSIZE
)
634 blkno
= bp
->b_blkno
/ (lp
->d_secsize
/ DEV_BSIZE
);
636 blkno
= bp
->b_blkno
* (DEV_BSIZE
/ lp
->d_secsize
);
638 if (part
!= RAW_PART
)
639 blkno
+= lp
->d_partitions
[part
].p_offset
;
641 bp
->b_rawblkno
= blkno
;
649 bp
->b_resid
= bp
->b_bcount
;
654 ldstart(struct ld_softc
*sc
, struct buf
*bp
)
658 mutex_enter(&sc
->sc_mutex
);
661 bufq_put(sc
->sc_bufq
, bp
);
663 while (sc
->sc_queuecnt
< sc
->sc_maxqueuecnt
) {
664 /* See if there is work to do. */
665 if ((bp
= bufq_peek(sc
->sc_bufq
)) == NULL
)
668 disk_busy(&sc
->sc_dk
);
671 if (__predict_true((error
= (*sc
->sc_start
)(sc
, bp
)) == 0)) {
673 * The back-end is running the job; remove it from
676 (void) bufq_get(sc
->sc_bufq
);
678 disk_unbusy(&sc
->sc_dk
, 0, (bp
->b_flags
& B_READ
));
680 if (error
== EAGAIN
) {
682 * Temporary resource shortage in the
683 * back-end; just defer the job until
686 * XXX We might consider a watchdog timer
687 * XXX to make sure we are kicked into action.
691 (void) bufq_get(sc
->sc_bufq
);
693 bp
->b_resid
= bp
->b_bcount
;
694 mutex_exit(&sc
->sc_mutex
);
696 mutex_enter(&sc
->sc_mutex
);
701 mutex_exit(&sc
->sc_mutex
);
705 lddone(struct ld_softc
*sc
, struct buf
*bp
)
708 if (bp
->b_error
!= 0) {
709 diskerr(bp
, "ld", "error", LOG_PRINTF
, 0, sc
->sc_dk
.dk_label
);
713 disk_unbusy(&sc
->sc_dk
, bp
->b_bcount
- bp
->b_resid
,
714 (bp
->b_flags
& B_READ
));
716 rnd_add_uint32(&sc
->sc_rnd_source
, bp
->b_rawblkno
);
720 mutex_enter(&sc
->sc_mutex
);
721 if (--sc
->sc_queuecnt
<= sc
->sc_maxqueuecnt
) {
722 if ((sc
->sc_flags
& LDF_DRAIN
) != 0) {
723 sc
->sc_flags
&= ~LDF_DRAIN
;
724 wakeup(&sc
->sc_queuecnt
);
726 mutex_exit(&sc
->sc_mutex
);
729 mutex_exit(&sc
->sc_mutex
);
736 int part
, unit
, omask
, size
;
738 unit
= DISKUNIT(dev
);
739 if ((sc
= device_lookup_private(&ld_cd
, unit
)) == NULL
)
741 if ((sc
->sc_flags
& LDF_ENABLED
) == 0)
743 part
= DISKPART(dev
);
745 omask
= sc
->sc_dk
.dk_openmask
& (1 << part
);
747 if (omask
== 0 && ldopen(dev
, 0, S_IFBLK
, NULL
) != 0)
749 else if (sc
->sc_dk
.dk_label
->d_partitions
[part
].p_fstype
!= FS_SWAP
)
752 size
= sc
->sc_dk
.dk_label
->d_partitions
[part
].p_size
*
753 (sc
->sc_dk
.dk_label
->d_secsize
/ DEV_BSIZE
);
754 if (omask
== 0 && ldclose(dev
, 0, S_IFBLK
, NULL
) != 0)
761 * Load the label information from the specified device.
764 ldgetdisklabel(struct ld_softc
*sc
)
766 const char *errstring
;
768 ldgetdefaultlabel(sc
, sc
->sc_dk
.dk_label
);
770 /* Call the generic disklabel extraction routine. */
771 errstring
= readdisklabel(MAKEDISKDEV(0, device_unit(sc
->sc_dv
),
772 RAW_PART
), ldstrategy
, sc
->sc_dk
.dk_label
, sc
->sc_dk
.dk_cpulabel
);
773 if (errstring
!= NULL
)
774 printf("%s: %s\n", device_xname(sc
->sc_dv
), errstring
);
776 /* In-core label now valid. */
777 sc
->sc_flags
|= LDF_VLABEL
;
781 * Construct a ficticious label.
784 ldgetdefaultlabel(struct ld_softc
*sc
, struct disklabel
*lp
)
787 memset(lp
, 0, sizeof(struct disklabel
));
789 lp
->d_secsize
= sc
->sc_secsize
;
790 lp
->d_ntracks
= sc
->sc_nheads
;
791 lp
->d_nsectors
= sc
->sc_nsectors
;
792 lp
->d_ncylinders
= sc
->sc_ncylinders
;
793 lp
->d_secpercyl
= lp
->d_ntracks
* lp
->d_nsectors
;
794 lp
->d_type
= DTYPE_LD
;
795 strlcpy(lp
->d_typename
, "unknown", sizeof(lp
->d_typename
));
796 strlcpy(lp
->d_packname
, "fictitious", sizeof(lp
->d_packname
));
797 lp
->d_secperunit
= sc
->sc_secperunit
;
799 lp
->d_interleave
= 1;
802 lp
->d_partitions
[RAW_PART
].p_offset
= 0;
803 lp
->d_partitions
[RAW_PART
].p_size
=
804 lp
->d_secperunit
* (lp
->d_secsize
/ DEV_BSIZE
);
805 lp
->d_partitions
[RAW_PART
].p_fstype
= FS_UNUSED
;
806 lp
->d_npartitions
= RAW_PART
+ 1;
808 lp
->d_magic
= DISKMAGIC
;
809 lp
->d_magic2
= DISKMAGIC
;
810 lp
->d_checksum
= dkcksum(lp
);
817 lddump(dev_t dev
, daddr_t blkno
, void *vav
, size_t size
)
821 struct disklabel
*lp
;
822 int unit
, part
, nsects
, sectoff
, towrt
, nblk
, maxblkcnt
, rv
;
825 unit
= DISKUNIT(dev
);
826 if ((sc
= device_lookup_private(&ld_cd
, unit
)) == NULL
)
828 if ((sc
->sc_flags
& LDF_ENABLED
) == 0)
830 if (sc
->sc_dump
== NULL
)
833 /* Check if recursive dump; if so, punt. */
838 /* Convert to disk sectors. Request must be a multiple of size. */
839 part
= DISKPART(dev
);
840 lp
= sc
->sc_dk
.dk_label
;
841 if ((size
% lp
->d_secsize
) != 0)
843 towrt
= size
/ lp
->d_secsize
;
844 blkno
= dbtob(blkno
) / lp
->d_secsize
; /* blkno in DEV_BSIZE units */
846 nsects
= lp
->d_partitions
[part
].p_size
;
847 sectoff
= lp
->d_partitions
[part
].p_offset
;
849 /* Check transfer bounds against partition size. */
850 if ((blkno
< 0) || ((blkno
+ towrt
) > nsects
))
853 /* Offset block number to start of partition. */
856 /* Start dumping and return when done. */
857 maxblkcnt
= sc
->sc_maxxfer
/ sc
->sc_secsize
- 1;
859 nblk
= min(maxblkcnt
, towrt
);
861 if ((rv
= (*sc
->sc_dump
)(sc
, va
, blkno
, nblk
)) != 0)
866 va
+= nblk
* sc
->sc_secsize
;
874 * Adjust the size of a transfer.
877 ldminphys(struct buf
*bp
)
881 sc
= device_lookup_private(&ld_cd
, DISKUNIT(bp
->b_dev
));
883 if (bp
->b_bcount
> sc
->sc_maxxfer
)
884 bp
->b_bcount
= sc
->sc_maxxfer
;
889 ld_set_properties(struct ld_softc
*ld
)
891 prop_dictionary_t disk_info
, odisk_info
, geom
;
893 disk_info
= prop_dictionary_create();
895 geom
= prop_dictionary_create();
897 prop_dictionary_set_uint64(geom
, "sectors-per-unit",
900 prop_dictionary_set_uint32(geom
, "sector-size",
903 prop_dictionary_set_uint16(geom
, "sectors-per-track",
906 prop_dictionary_set_uint16(geom
, "tracks-per-cylinder",
909 prop_dictionary_set_uint64(geom
, "cylinders-per-unit",
912 prop_dictionary_set(disk_info
, "geometry", geom
);
913 prop_object_release(geom
);
915 prop_dictionary_set(device_properties(ld
->sc_dv
),
916 "disk-info", disk_info
);
919 * Don't release disk_info here; we keep a reference to it.
920 * disk_detach() will release it when we go away.
923 odisk_info
= ld
->sc_dk
.dk_info
;
924 ld
->sc_dk
.dk_info
= disk_info
;
926 prop_object_release(odisk_info
);
930 ld_config_interrupts(device_t d
)
932 struct ld_softc
*sc
= device_private(d
);
933 dkwedge_discover(&sc
->sc_dk
);