4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <kmdb/kctl/kctl.h>
27 #include <kmdb/kctl/kctl_wr.h>
28 #include <kmdb/kmdb_kctl.h>
29 #include <kmdb/kmdb_kdi.h>
30 #include <kmdb/kmdb_auxv.h>
31 #include <mdb/mdb_errno.h>
33 #include <sys/sysmacros.h>
34 #include <sys/reboot.h>
35 #include <sys/atomic.h>
36 #include <sys/bootconf.h>
39 #include <sys/kobj_impl.h>
40 #include <sys/promimpl.h>
41 #include <sys/kdi_impl.h>
42 #include <sys/ctf_api.h>
43 #include <vm/seg_kmem.h>
47 #define KCTL_EXECNAME "/kernel/drv/kmdb"
50 #define KCTL_MEM_GOALSZ (20 * 1024 * 1024)
52 #define KCTL_MEM_GOALSZ (10 * 1024 * 1024)
56 * kmdb will call its own copies of the promif routines during
57 * initialization. As these routines are intended to be used when the
58 * world is stopped, they don't attempt to grab the PROM lock. Very
59 * Bad Things could happen if kmdb called a prom routine while someone
60 * else was calling the kernel's copy of another prom routine, so we
61 * grab the PROM lock ourselves before we start initialization.
63 #define KCTL_PROM_LOCK
64 #define KCTL_PROM_UNLOCK
69 if (kobj_kdi
.kdi_version
!= KDI_VERSION
) {
70 kctl_warn("kmdb/kernel version mismatch (expected %d, "
71 "found %d)", KDI_VERSION
, kobj_kdi
.kdi_version
);
75 sema_init(&kctl
.kctl_wr_avail_sem
, 0, NULL
, SEMA_DRIVER
, NULL
);
76 mutex_init(&kctl
.kctl_wr_lock
, NULL
, MUTEX_DRIVER
, NULL
);
77 cv_init(&kctl
.kctl_wr_cv
, NULL
, CV_DRIVER
, NULL
);
78 mutex_init(&kctl
.kctl_lock
, NULL
, MUTEX_DRIVER
, NULL
);
80 kctl
.kctl_execname
= KCTL_EXECNAME
; /* XXX get from modctl? */
82 kctl
.kctl_state
= KCTL_ST_INACTIVE
;
84 kctl
.kctl_dseg
= kctl
.kctl_mrbase
= NULL
;
85 kctl
.kctl_dseg_size
= kctl
.kctl_mrsize
= 0;
97 mutex_destroy(&kctl
.kctl_lock
);
98 cv_destroy(&kctl
.kctl_wr_cv
);
99 mutex_destroy(&kctl
.kctl_wr_lock
);
100 sema_destroy(&kctl
.kctl_wr_avail_sem
);
104 kctl_set_state(uint_t state
)
106 uint_t ostate
= kctl
.kctl_state
;
108 /* forward progess only, please */
109 if (state
> ostate
) {
110 kctl_dprintf("new kctl state: %d", state
);
111 kctl
.kctl_state
= state
;
118 kctl_boot_dseg_alloc(caddr_t dsegaddr
, size_t dsegsz
)
121 * The Intel boot memory allocator will cleverly map us onto a 4M
122 * page if we request the whole 4M Intel segment at once. This
123 * will break physical memory r/w, so we break the request into
124 * chunks. The allocator isn't smart enough to combine requests,
125 * so it'll give us a bunch of 4k pages.
127 while (dsegsz
>= 1024*1024) {
128 size_t sz
= MIN(dsegsz
, 1024*1024);
130 if (BOP_ALLOC(kctl
.kctl_boot_ops
, dsegaddr
, sz
, BO_NO_ALIGN
) !=
142 kctl_dseg_alloc(caddr_t addr
, size_t sz
)
144 ASSERT(((uintptr_t)addr
& PAGEOFFSET
) == 0);
146 /* make sure there isn't something there already (like kadb) */
147 if (hat_getpfnum(kas
.a_hat
, addr
) != PFN_INVALID
)
150 if (segkmem_xalloc(NULL
, addr
, sz
, VM_NOSLEEP
, 0, segkmem_page_create
,
158 kctl_dseg_free(caddr_t addr
, size_t sz
)
160 ASSERT(((uintptr_t)addr
& PAGEOFFSET
) == 0);
162 segkmem_free(NULL
, addr
, sz
);
172 * We're now free to allocate the non-fixed portion of the debugger's
176 needed
= P2ROUNDUP(kctl
.kctl_memgoalsz
<= kctl
.kctl_dseg_size
? 0 :
177 kctl
.kctl_memgoalsz
- kctl
.kctl_dseg_size
, PAGESIZE
);
182 if ((base
= kmem_zalloc(needed
, KM_NOSLEEP
)) == NULL
) {
184 * If we're going to wedge the machine during debugger startup,
185 * at least let them know why it's going to wedge.
187 cmn_err(CE_WARN
, "retrying of kmdb allocation of 0x%lx bytes",
190 base
= kmem_zalloc(needed
, KM_SLEEP
);
193 kdi_dvec
->dv_memavail(base
, needed
);
194 kctl
.kctl_mrbase
= base
;
195 kctl
.kctl_mrsize
= needed
;
201 uint_t state
= kctl_set_state(KCTL_ST_DEACTIVATING
);
203 kctl_dprintf("cleaning up from state %d", state
);
205 ASSERT(kctl
.kctl_boot_loaded
== 0);
209 boothowto
&= ~RB_DEBUG
;
210 /* XXX there's a race here */
214 case KCTL_ST_DBG_ACTIVATED
:
220 case KCTL_ST_THREAD_STARTED
:
221 if (curthread
!= kctl
.kctl_wr_thr
) {
227 case KCTL_ST_MOD_NOTIFIERS
:
228 kctl_mod_notify_unreg();
231 case KCTL_ST_KCTL_PREACTIVATED
:
232 kctl_depreactivate_isadep();
235 case KCTL_ST_INITIALIZED
:
236 /* There's no kmdb_fini */
237 case KCTL_ST_DSEG_ALLOCED
:
238 kctl_dseg_free(kctl
.kctl_dseg
, kctl
.kctl_dseg_size
);
240 if (kctl
.kctl_mrbase
!= NULL
)
241 kmem_free(kctl
.kctl_mrbase
, kctl
.kctl_mrsize
);
245 kctl
.kctl_state
= KCTL_ST_INACTIVE
;
249 kctl_startup_modules(void)
254 * Normal module load and unload is now available. Prior to this point,
255 * we could only load modules, and that only when the debugger was being
258 * We'll need to prepare the modules we've already loaded (if any) for
259 * the brave new world in which boot is unmapped.
264 * Process any outstanding loads or unloads and prepare for automatic
265 * module loading and unloading.
267 (void) kctl_wr_process();
269 kctl_mod_notify_reg();
271 (void) kctl_set_state(KCTL_ST_MOD_NOTIFIERS
);
275 kctl_mod_loaded(modp
);
276 } while ((modp
= modp
->mod_next
) != &modules
);
280 kctl_startup_thread(void)
283 * Create the worker thread, which will handle future requests from the
288 (void) kctl_set_state(KCTL_ST_THREAD_STARTED
);
292 kctl_startup_boot(void)
294 struct modctl_list
*lp
, **lpp
;
297 if (kctl_wr_process() < 0) {
298 kctl_warn("kmdb: failed to load modules");
302 mutex_enter(&mod_lock
);
304 for (lpp
= kobj_linkmaps
; *lpp
!= NULL
; lpp
++) {
305 for (lp
= *lpp
; lp
!= NULL
; lp
= lp
->modl_next
) {
306 if ((rc
= kctl_mod_decompress(lp
->modl_modp
)) != 0) {
307 kctl_warn("kmdb: failed to decompress CTF data "
308 "for %s: %s", lp
->modl_modp
->mod_modname
,
314 mutex_exit(&mod_lock
);
320 kctl_startup_preactivate(void *romp
, const char *cfg
, const char **argv
)
325 kctl_auxv_init(&kav
, cfg
, argv
, romp
);
327 rc
= kmdb_init(kctl
.kctl_execname
, &kav
);
329 kctl_auxv_fini(&kav
);
332 return (EMDB_KNOLOAD
);
334 (void) kctl_set_state(KCTL_ST_INITIALIZED
);
336 if (kctl_preactivate_isadep() != 0)
339 (void) kctl_set_state(KCTL_ST_KCTL_PREACTIVATED
);
345 kctl_startup_activate(uint_t flags
)
347 kdi_debugvec_t
*dvec
;
350 kmdb_activate(&dvec
, flags
);
353 (void) kctl_set_state(KCTL_ST_DBG_ACTIVATED
);
356 * fill in a few remaining debugvec entries.
358 dvec
->dv_kctl_modavail
= kctl_startup_modules
;
359 dvec
->dv_kctl_thravail
= kctl_startup_thread
;
360 dvec
->dv_kctl_memavail
= kctl_memavail
;
362 kctl_activate_isadep(dvec
);
367 boothowto
|= RB_DEBUG
;
369 (void) kctl_set_state(KCTL_ST_ACTIVE
);
375 kctl_state_check(uint_t state
, uint_t ok_state
)
377 if (state
== ok_state
)
380 if (state
== KCTL_ST_INACTIVE
)
381 return (EMDB_KINACTIVE
);
382 else if (kctl
.kctl_state
> KCTL_ST_INACTIVE
&&
383 kctl
.kctl_state
< KCTL_ST_ACTIVE
)
384 return (EMDB_KACTIVATING
);
385 else if (kctl
.kctl_state
== KCTL_ST_ACTIVE
)
386 return (EMDB_KACTIVE
);
387 else if (kctl
.kctl_state
== KCTL_ST_DEACTIVATING
)
388 return (EMDB_KDEACTIVATING
);
394 kctl_deactivate(void)
398 mutex_enter(&kctl
.kctl_lock
);
400 if (kctl
.kctl_boot_loaded
) {
402 goto deactivate_done
;
405 if ((rc
= kctl_state_check(kctl
.kctl_state
, KCTL_ST_ACTIVE
)) != 0)
406 goto deactivate_done
;
408 kmdb_kdi_set_unload_request();
409 kmdb_kdi_kmdb_enter();
412 * The debugger will pass the request to the work thread, which will
418 mutex_exit(&kctl
.kctl_lock
);
424 * Called from krtld, this indicates that the user loaded kmdb at boot. We
425 * track activation states, but we don't attempt to clean up if activation
426 * fails, because boot debugger load failures are fatal.
428 * Further complicating matters, various kernel routines, such as bcopy and
429 * mutex_enter, assume the presence of some basic state. On SPARC, it's the
430 * presence of a valid curthread pointer. On AMD64, it's a valid curcpu
431 * pointer in GSBASE. We set up temporary versions of these before beginning
432 * activation, and tear them down when we're done.
435 kctl_boot_activate(struct bootops
*ops
, void *romp
, size_t memsz
,
440 old
= kctl_boot_tmpinit(); /* Set up temporary state */
443 kctl
.kctl_boot_ops
= ops
; /* must be set before kctl_init */
448 kctl
.kctl_boot_loaded
= 1;
450 kctl_dprintf("beginning kmdb initialization");
453 memsz
= KCTL_MEM_GOALSZ
;
455 kctl
.kctl_dseg
= kdi_segdebugbase
;
456 kctl
.kctl_dseg_size
=
457 memsz
> kdi_segdebugsize
? kdi_segdebugsize
: memsz
;
458 kctl
.kctl_memgoalsz
= memsz
;
460 if (kctl_boot_dseg_alloc(kctl
.kctl_dseg
, kctl
.kctl_dseg_size
) < 0) {
461 kctl_warn("kmdb: failed to allocate %lu-byte debugger area at "
462 "%p", kctl
.kctl_dseg_size
, (void *)kctl
.kctl_dseg
);
466 (void) kctl_set_state(KCTL_ST_DSEG_ALLOCED
);
468 if (kctl_startup_preactivate(romp
, NULL
, argv
) != 0 ||
469 kctl_startup_activate(KMDB_ACT_F_BOOT
)) {
470 kctl_warn("kmdb: failed to activate");
474 if (kctl_startup_boot() < 0)
477 kctl_dprintf("finished with kmdb initialization");
479 kctl_boot_tmpfini(old
);
481 kctl
.kctl_boot_ops
= NULL
;
487 kctl_modload_activate(size_t memsz
, const char *cfg
, uint_t flags
)
491 mutex_enter(&kctl
.kctl_lock
);
493 if ((rc
= kctl_state_check(kctl
.kctl_state
, KCTL_ST_INACTIVE
)) != 0) {
494 if ((flags
& KMDB_F_AUTO_ENTRY
) && rc
== EMDB_KACTIVE
) {
495 kmdb_kdi_kmdb_enter();
499 mutex_exit(&kctl
.kctl_lock
);
503 kctl
.kctl_flags
= flags
;
506 memsz
= KCTL_MEM_GOALSZ
;
508 kctl
.kctl_dseg
= kdi_segdebugbase
;
509 kctl
.kctl_dseg_size
=
510 memsz
> kdi_segdebugsize
? kdi_segdebugsize
: memsz
;
511 kctl
.kctl_memgoalsz
= memsz
;
513 if ((rc
= kctl_dseg_alloc(kctl
.kctl_dseg
, kctl
.kctl_dseg_size
)) != 0)
516 (void) kctl_set_state(KCTL_ST_DSEG_ALLOCED
);
518 if ((rc
= kctl_startup_preactivate(NULL
, cfg
, NULL
)) != 0)
521 kctl_startup_modules();
522 kctl_startup_thread();
524 if ((rc
= kctl_startup_activate(0)) != 0)
527 kctl_memavail(); /* Must be after kdi_dvec is set */
529 if (kctl
.kctl_flags
& KMDB_F_AUTO_ENTRY
)
530 kmdb_kdi_kmdb_enter();
532 mutex_exit(&kctl
.kctl_lock
);
537 mutex_exit(&kctl
.kctl_lock
);
542 * This interface will be called when drv/kmdb loads. When we get the call, one
543 * of two things will have happened:
545 * 1. The debugger was loaded at boot. We've progressed far enough into boot
546 * as to allow drv/kmdb to be loaded as a non-primary. Invocation of this
547 * interface is the signal to the debugger that it can start allowing things
548 * like dmod loading and automatic CTF decompression - things which require
549 * the system services that have now been started.
551 * 2. The debugger was loaded after boot. mdb opened /dev/kmdb, causing
552 * drv/kmdb to load, followed by misc/kmdb. Nothing has been set up yet,
553 * so we need to initialize. Activation will occur separately, so we don't
554 * have to worry about that.
557 kctl_attach(dev_info_t
*dip
)
559 kctl
.kctl_drv_dip
= dip
;
567 return (kctl
.kctl_state
== KCTL_ST_INACTIVE
? 0 : EBUSY
);
570 static struct modlmisc modlmisc
= {
575 static struct modlinkage modlinkage
= {
582 * Invoked only when debugger is loaded via modload - not invoked when debugger
583 * is loaded at boot. kctl_boot_activate needs to call anything (aside from
584 * mod_install) this function does.
592 return (mod_install(&modlinkage
));
596 _info(struct modinfo
*modinfop
)
598 return (mod_info(&modlinkage
, modinfop
));
606 return (mod_remove(&modlinkage
));