1 /* $NetBSD: ld_iop.c,v 1.32 2008/09/09 12:45:39 tron Exp $ */
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * I2O front-end for ld(4) driver, supporting random block storage class
34 * devices. Currently, this doesn't handle anything more complex than
35 * fixed direct-access devices.
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.32 2008/09/09 12:45:39 tron Exp $");
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/device.h>
49 #include <sys/endian.h>
59 #include <dev/ldvar.h>
61 #include <dev/i2o/i2o.h>
62 #include <dev/i2o/iopio.h>
63 #include <dev/i2o/iopvar.h>
65 #define LD_IOP_TIMEOUT 30*1000
67 #define LD_IOP_CLAIMED 0x01
68 #define LD_IOP_NEW_EVTMASK 0x02
71 struct ld_softc sc_ld
;
72 struct iop_initiator sc_ii
;
73 struct iop_initiator sc_eventii
;
77 static void ld_iop_adjqparam(device_t
, int);
78 static void ld_iop_attach(device_t
, device_t
, void *);
79 static int ld_iop_detach(device_t
, int);
80 static int ld_iop_dump(struct ld_softc
*, void *, int, int);
81 static int ld_iop_flush(struct ld_softc
*, int);
82 static void ld_iop_intr(device_t
, struct iop_msg
*, void *);
83 static void ld_iop_intr_event(device_t
, struct iop_msg
*, void *);
84 static int ld_iop_match(device_t
, cfdata_t
, void *);
85 static int ld_iop_start(struct ld_softc
*, struct buf
*);
86 static void ld_iop_unconfig(struct ld_iop_softc
*, int);
88 CFATTACH_DECL_NEW(ld_iop
, sizeof(struct ld_iop_softc
),
89 ld_iop_match
, ld_iop_attach
, ld_iop_detach
, NULL
);
91 static const char * const ld_iop_errors
[] = {
103 "media write protected",
105 "volume changed, waiting for acknowledgement",
110 ld_iop_match(device_t parent
, cfdata_t match
, void *aux
)
112 struct iop_attach_args
*ia
;
116 return (ia
->ia_class
== I2O_CLASS_RANDOM_BLOCK_STORAGE
);
120 ld_iop_attach(device_t parent
, device_t self
, void *aux
)
122 struct iop_attach_args
*ia
= aux
;
123 struct ld_iop_softc
*sc
= device_private(self
);
124 struct iop_softc
*iop
= device_private(parent
);
125 struct ld_softc
*ld
= &sc
->sc_ld
;
126 int rv
, evreg
, enable
;
127 const char *typestr
, *fixedstr
;
129 u_int32_t timeoutbase
, rwvtimeoutbase
, rwvtimeout
;
131 struct i2o_param_op_results pr
;
132 struct i2o_param_read_results prr
;
134 struct i2o_param_rbs_cache_control cc
;
135 struct i2o_param_rbs_device_info bdi
;
142 /* Register us as an initiator. */
143 sc
->sc_ii
.ii_dv
= self
;
144 sc
->sc_ii
.ii_intr
= ld_iop_intr
;
145 sc
->sc_ii
.ii_adjqparam
= ld_iop_adjqparam
;
146 sc
->sc_ii
.ii_flags
= 0;
147 sc
->sc_ii
.ii_tid
= ia
->ia_tid
;
148 iop_initiator_register(iop
, &sc
->sc_ii
);
150 /* Register another initiator to handle events from the device. */
151 sc
->sc_eventii
.ii_dv
= self
;
152 sc
->sc_eventii
.ii_intr
= ld_iop_intr_event
;
153 sc
->sc_eventii
.ii_flags
= II_NOTCTX
| II_UTILITY
;
154 sc
->sc_eventii
.ii_tid
= ia
->ia_tid
;
155 iop_initiator_register(iop
, &sc
->sc_eventii
);
157 rv
= iop_util_eventreg(iop
, &sc
->sc_eventii
,
158 I2O_EVENT_GEN_EVENT_MASK_MODIFIED
|
159 I2O_EVENT_GEN_DEVICE_RESET
|
160 I2O_EVENT_GEN_STATE_CHANGE
|
161 I2O_EVENT_GEN_GENERAL_WARNING
);
163 aprint_error_dev(self
, "unable to register for events");
169 * Start out with one queued command. The `iop' driver will adjust
170 * the queue parameters once we're up and running.
172 ld
->sc_maxqueuecnt
= 1;
174 ld
->sc_maxxfer
= IOP_MAX_XFER
;
175 ld
->sc_dump
= ld_iop_dump
;
176 ld
->sc_flush
= ld_iop_flush
;
177 ld
->sc_start
= ld_iop_start
;
179 /* Say what the device is. */
181 iop_print_ident(iop
, ia
->ia_tid
);
184 * Claim the device so that we don't get any nasty surprises. Allow
187 rv
= iop_util_claim(iop
, &sc
->sc_ii
, 0,
188 I2O_UTIL_CLAIM_CAPACITY_SENSITIVE
|
189 I2O_UTIL_CLAIM_NO_PEER_SERVICE
|
190 I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE
|
191 I2O_UTIL_CLAIM_PRIMARY_USER
);
192 sc
->sc_flags
= rv
? 0 : LD_IOP_CLAIMED
;
194 rv
= iop_field_get_all(iop
, ia
->ia_tid
, I2O_PARAM_RBS_DEVICE_INFO
,
195 ¶m
, sizeof(param
), NULL
);
199 ld
->sc_secsize
= le32toh(param
.p
.bdi
.blocksize
);
200 ld
->sc_secperunit
= (int)
201 (le64toh(param
.p
.bdi
.capacity
) / ld
->sc_secsize
);
203 switch (param
.p
.bdi
.type
) {
204 case I2O_RBS_TYPE_DIRECT
:
205 typestr
= "direct access";
208 case I2O_RBS_TYPE_WORM
:
212 case I2O_RBS_TYPE_CDROM
:
216 case I2O_RBS_TYPE_OPTICAL
:
226 if ((le32toh(param
.p
.bdi
.capabilities
) & I2O_RBS_CAP_REMOVABLE_MEDIA
)
228 /* ld->sc_flags = LDF_REMOVABLE; */
229 fixedstr
= "removable";
234 printf(" %s, %s", typestr
, fixedstr
);
237 * Determine if the device has an private cache. If so, print the
238 * cache size. Even if the device doesn't appear to have a cache,
239 * we perform a flush at shutdown.
241 rv
= iop_field_get_all(iop
, ia
->ia_tid
, I2O_PARAM_RBS_CACHE_CONTROL
,
242 ¶m
, sizeof(param
), NULL
);
246 if ((cachesz
= le32toh(param
.p
.cc
.totalcachesize
)) != 0)
247 printf(", %dkB cache", cachesz
>> 10);
252 * Configure the DDM's timeout functions to time out all commands
255 timeoutbase
= htole32(LD_IOP_TIMEOUT
* 1000);
256 rwvtimeoutbase
= htole32(LD_IOP_TIMEOUT
* 1000);
259 iop_field_set(iop
, ia
->ia_tid
, I2O_PARAM_RBS_OPERATION
,
260 &timeoutbase
, sizeof(timeoutbase
),
261 I2O_PARAM_RBS_OPERATION_timeoutbase
);
262 iop_field_set(iop
, ia
->ia_tid
, I2O_PARAM_RBS_OPERATION
,
263 &rwvtimeoutbase
, sizeof(rwvtimeoutbase
),
264 I2O_PARAM_RBS_OPERATION_rwvtimeoutbase
);
265 iop_field_set(iop
, ia
->ia_tid
, I2O_PARAM_RBS_OPERATION
,
266 &rwvtimeout
, sizeof(rwvtimeout
),
267 I2O_PARAM_RBS_OPERATION_rwvtimeoutbase
);
270 ld
->sc_flags
|= LDF_ENABLED
;
272 aprint_error_dev(self
, "device not yet supported\n");
278 ld_iop_unconfig(sc
, evreg
);
282 ld_iop_unconfig(struct ld_iop_softc
*sc
, int evreg
)
284 struct iop_softc
*iop
;
286 iop
= device_private(device_parent(sc
->sc_ld
.sc_dv
));
288 if ((sc
->sc_flags
& LD_IOP_CLAIMED
) != 0)
289 iop_util_claim(iop
, &sc
->sc_ii
, 1,
290 I2O_UTIL_CLAIM_PRIMARY_USER
);
294 * Mask off events, and wait up to 5 seconds for a reply.
295 * Note that some adapters won't reply to this (XXX We
296 * should check the event capabilities).
298 mutex_spin_enter(&iop
->sc_intrlock
);
299 sc
->sc_flags
&= ~LD_IOP_NEW_EVTMASK
;
300 mutex_spin_exit(&iop
->sc_intrlock
);
302 iop_util_eventreg(iop
, &sc
->sc_eventii
,
303 I2O_EVENT_GEN_EVENT_MASK_MODIFIED
);
305 mutex_spin_enter(&iop
->sc_intrlock
);
306 if ((sc
->sc_flags
& LD_IOP_NEW_EVTMASK
) == 0)
307 cv_timedwait(&sc
->sc_eventii
.ii_cv
,
308 &iop
->sc_intrlock
, hz
* 5);
309 mutex_spin_exit(&iop
->sc_intrlock
);
312 iop_initiator_unregister(iop
, &sc
->sc_eventii
);
313 iop_initiator_unregister(iop
, &sc
->sc_ii
);
317 ld_iop_detach(device_t self
, int flags
)
319 struct ld_iop_softc
*sc
;
320 struct iop_softc
*iop
;
323 sc
= device_private(self
);
324 iop
= device_private(device_parent(self
));
326 if ((rv
= ldbegindetach(&sc
->sc_ld
, flags
)) != 0)
330 * Abort any requests queued with the IOP, but allow requests that
331 * are already in progress to complete.
333 if ((sc
->sc_ld
.sc_flags
& LDF_ENABLED
) != 0)
334 iop_util_abort(iop
, &sc
->sc_ii
, 0, 0,
335 I2O_UTIL_ABORT_WILD
| I2O_UTIL_ABORT_CLEAN
);
337 ldenddetach(&sc
->sc_ld
);
339 /* Un-claim the target, and un-register our initiators. */
340 if ((sc
->sc_ld
.sc_flags
& LDF_ENABLED
) != 0)
341 ld_iop_unconfig(sc
, 1);
347 ld_iop_start(struct ld_softc
*ld
, struct buf
*bp
)
350 struct iop_softc
*iop
;
351 struct ld_iop_softc
*sc
;
352 struct i2o_rbs_block_read
*mf
;
353 u_int rv
, flags
, write
;
355 u_int32_t mb
[IOP_MAX_MSG_SIZE
/ sizeof(u_int32_t
)];
357 sc
= device_private(ld
->sc_dv
);
358 iop
= device_private(device_parent(ld
->sc_dv
));
360 im
= iop_msg_alloc(iop
, 0);
361 im
->im_dvcontext
= bp
;
363 write
= ((bp
->b_flags
& B_READ
) == 0);
364 ba
= (u_int64_t
)bp
->b_rawblkno
* ld
->sc_secsize
;
367 * Write through the cache when performing synchronous writes. When
368 * performing a read, we don't request that the DDM cache the data,
369 * as there's little advantage to it.
372 if ((bp
->b_flags
& B_ASYNC
) == 0)
373 flags
= I2O_RBS_BLOCK_WRITE_CACHE_WT
;
375 flags
= I2O_RBS_BLOCK_WRITE_CACHE_WB
;
380 * Fill the message frame. We can use the block_read structure for
381 * both reads and writes, as it's almost identical to the
382 * block_write structure.
384 mf
= (struct i2o_rbs_block_read
*)mb
;
385 mf
->msgflags
= I2O_MSGFLAGS(i2o_rbs_block_read
);
386 mf
->msgfunc
= I2O_MSGFUNC(sc
->sc_ii
.ii_tid
,
387 write
? I2O_RBS_BLOCK_WRITE
: I2O_RBS_BLOCK_READ
);
388 mf
->msgictx
= sc
->sc_ii
.ii_ictx
;
389 mf
->msgtctx
= im
->im_tctx
;
390 mf
->flags
= flags
| (1 << 16); /* flags & time multiplier */
391 mf
->datasize
= bp
->b_bcount
;
392 mf
->lowoffset
= (u_int32_t
)ba
;
393 mf
->highoffset
= (u_int32_t
)(ba
>> 32);
395 /* Map the data transfer and enqueue the command. */
396 rv
= iop_msg_map_bio(iop
, im
, mb
, bp
->b_data
, bp
->b_bcount
, write
);
398 if ((rv
= iop_post(iop
, mb
)) != 0) {
399 iop_msg_unmap(iop
, im
);
400 iop_msg_free(iop
, im
);
407 ld_iop_dump(struct ld_softc
*ld
, void *data
, int blkno
, int blkcnt
)
410 struct iop_softc
*iop
;
411 struct ld_iop_softc
*sc
;
412 struct i2o_rbs_block_write
*mf
;
415 u_int32_t mb
[IOP_MAX_MSG_SIZE
/ sizeof(u_int32_t
)];
417 sc
= device_private(ld
->sc_dv
);
418 iop
= device_private(device_parent(ld
->sc_dv
));
419 bcount
= blkcnt
* ld
->sc_secsize
;
420 ba
= (u_int64_t
)blkno
* ld
->sc_secsize
;
421 im
= iop_msg_alloc(iop
, IM_POLL
);
423 mf
= (struct i2o_rbs_block_write
*)mb
;
424 mf
->msgflags
= I2O_MSGFLAGS(i2o_rbs_block_write
);
425 mf
->msgfunc
= I2O_MSGFUNC(sc
->sc_ii
.ii_tid
, I2O_RBS_BLOCK_WRITE
);
426 mf
->msgictx
= sc
->sc_ii
.ii_ictx
;
427 mf
->msgtctx
= im
->im_tctx
;
428 mf
->flags
= I2O_RBS_BLOCK_WRITE_CACHE_WT
| (1 << 16);
429 mf
->datasize
= bcount
;
430 mf
->lowoffset
= (u_int32_t
)ba
;
431 mf
->highoffset
= (u_int32_t
)(ba
>> 32);
433 if ((rv
= iop_msg_map(iop
, im
, mb
, data
, bcount
, 1, NULL
)) != 0) {
434 iop_msg_free(iop
, im
);
438 rv
= iop_msg_post(iop
, im
, mb
, LD_IOP_TIMEOUT
* 2);
439 iop_msg_unmap(iop
, im
);
440 iop_msg_free(iop
, im
);
445 ld_iop_flush(struct ld_softc
*ld
, int flags
)
448 struct iop_softc
*iop
;
449 struct ld_iop_softc
*sc
;
450 struct i2o_rbs_cache_flush mf
;
453 sc
= device_private(ld
->sc_dv
);
454 iop
= device_private(device_parent(ld
->sc_dv
));
455 im
= iop_msg_alloc(iop
, IM_WAIT
);
457 mf
.msgflags
= I2O_MSGFLAGS(i2o_rbs_cache_flush
);
458 mf
.msgfunc
= I2O_MSGFUNC(sc
->sc_ii
.ii_tid
, I2O_RBS_CACHE_FLUSH
);
459 mf
.msgictx
= sc
->sc_ii
.ii_ictx
;
460 mf
.msgtctx
= im
->im_tctx
;
461 mf
.flags
= 1 << 16; /* time multiplier */
463 /* Ancient disks will return an error here. */
464 rv
= iop_msg_post(iop
, im
, &mf
, LD_IOP_TIMEOUT
* 2);
465 iop_msg_free(iop
, im
);
470 ld_iop_intr(device_t dv
, struct iop_msg
*im
, void *reply
)
472 struct i2o_rbs_reply
*rb
;
474 struct ld_iop_softc
*sc
;
475 struct iop_softc
*iop
;
480 bp
= im
->im_dvcontext
;
481 sc
= device_private(dv
);
482 iop
= device_private(device_parent(dv
));
484 err
= ((rb
->msgflags
& I2O_MSGFLAGS_FAIL
) != 0);
486 if (!err
&& rb
->reqstatus
!= I2O_STATUS_SUCCESS
) {
487 detail
= le16toh(rb
->detail
);
488 if (detail
>= __arraycount(ld_iop_errors
))
489 errstr
= "<unknown>";
491 errstr
= ld_iop_errors
[detail
];
492 aprint_error_dev(dv
, "error 0x%04x: %s\n", detail
, errstr
);
498 bp
->b_resid
= bp
->b_bcount
;
500 bp
->b_resid
= bp
->b_bcount
- le32toh(rb
->transfercount
);
502 iop_msg_unmap(iop
, im
);
503 iop_msg_free(iop
, im
);
504 lddone(&sc
->sc_ld
, bp
);
508 ld_iop_intr_event(device_t dv
, struct iop_msg
*im
, void *reply
)
510 struct i2o_util_event_register_reply
*rb
;
511 struct ld_iop_softc
*sc
;
512 struct iop_softc
*iop
;
517 if ((rb
->msgflags
& I2O_MSGFLAGS_FAIL
) != 0)
520 event
= le32toh(rb
->event
);
521 sc
= device_private(dv
);
523 if (event
== I2O_EVENT_GEN_EVENT_MASK_MODIFIED
) {
524 iop
= device_private(device_parent(dv
));
525 mutex_spin_enter(&iop
->sc_intrlock
);
526 sc
->sc_flags
|= LD_IOP_NEW_EVTMASK
;
527 cv_broadcast(&sc
->sc_eventii
.ii_cv
);
528 mutex_spin_exit(&iop
->sc_intrlock
);
532 printf("%s: event 0x%08x received\n", device_xname(dv
), event
);
536 ld_iop_adjqparam(device_t dv
, int mpi
)
538 struct ld_iop_softc
*sc
= device_private(dv
);
539 struct iop_softc
*iop
= device_private(device_parent(dv
));
540 struct ld_softc
*ld
= &sc
->sc_ld
;
543 * AMI controllers seem to loose the plot if you hand off lots of
546 if (le16toh(I2O_ORG_AMI
) == iop
->sc_status
.orgid
&& mpi
> 64)
549 ldadjqparam(ld
, mpi
);