dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / cmd / mdb / common / kmdb / kctl / kctl_dmod.c
blob9f2b90587fa9d6c0755dc679bfd2c6531a578ce4
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Driver-side functions for loading and unloading dmods.
30 #include <sys/types.h>
31 #include <sys/kobj.h>
32 #include <sys/kobj_impl.h>
33 #include <sys/modctl.h>
34 #include <sys/systm.h>
35 #include <sys/ctf_api.h>
36 #include <sys/kmdb.h>
38 #include <kmdb/kctl/kctl.h>
39 #include <kmdb/kctl/kctl_wr.h>
40 #include <kmdb/kmdb_wr_impl.h>
41 #include <kmdb/kmdb_kdi.h>
42 #include <mdb/mdb_errno.h>
44 struct modctl *kdi_dmods;
47 * When a load is attempted, a check is first made of the modules on the
48 * kctl_dmods list. If a module is found, the load will not proceed.
49 * kctl_dmods_lock must be held while traversing kctl_dmods, and while adding
50 * to and subtracting from it.
52 static struct modctl kctl_dmods;
53 static kmutex_t kctl_dmods_lock;
55 static kmdb_wr_path_t *kctl_dmod_path;
58 * Used to track outstanding driver-initiated load notifications. These
59 * notifications have been allocated by driver, and thus must be freed by the
60 * driver in the event of an emergency unload. If we don't free them free
61 * them ourselves, they'll leak. Granted, the world is probably melting down
62 * at that point, but there's no reason why we shouldn't tidy up the deck
63 * chairs before we go.
65 static kmdb_wr_load_t *kctl_dmod_loads;
66 static kmutex_t kctl_dmod_loads_lock;
68 static int
69 kctl_find_module(char *modname, char *fullname, size_t fullnamelen)
71 intptr_t fd;
72 int i;
74 /* If they gave us an absolute path, we don't need to search */
75 if (modname[0] == '/') {
76 if (strlen(modname) + 1 > fullnamelen) {
77 cmn_err(CE_WARN, "Can't load dmod %s - name too long",
78 modname);
79 return (0);
82 if ((fd = kobj_open(modname)) == -1)
83 return (0);
84 kobj_close(fd);
86 (void) strcpy(fullname, modname);
88 return (1);
91 for (i = 0; kctl_dmod_path->dpth_path[i] != NULL; i++) {
92 const char *path = kctl_dmod_path->dpth_path[i];
94 if (strlen(path) + 1 + strlen(modname) + 1 > fullnamelen) {
95 kctl_dprintf("Can't load dmod from %s/%s - "
96 "name too long", path, modname);
97 continue;
100 (void) snprintf(fullname, fullnamelen, "%s/%s", path, modname);
102 if ((fd = kobj_open(fullname)) == -1)
103 continue;
105 kobj_close(fd);
107 kctl_dprintf("kobj_open %s found", fullname);
109 /* Found it */
110 return (1);
113 /* No luck */
114 return (0);
117 static void
118 kctl_dlr_free(kmdb_wr_load_t *dlr)
120 if (dlr->dlr_node.wn_flags & WNFLAGS_NOFREE)
121 return;
123 kctl_strfree(dlr->dlr_fname);
124 kmem_free(dlr, sizeof (kmdb_wr_load_t));
128 kctl_dmod_load(kmdb_wr_load_t *dlr)
130 struct modctl *modp;
131 char modpath[MAXPATHLEN];
132 const char *modname = kctl_basename(dlr->dlr_fname);
133 int rc;
135 mutex_enter(&kctl_dmods_lock);
137 /* Have we already loaded this dmod? */
138 for (modp = kctl_dmods.mod_next; modp != &kctl_dmods;
139 modp = modp->mod_next) {
140 if (strcmp(modname, modp->mod_modname) == 0) {
141 mutex_exit(&kctl_dmods_lock);
142 dlr->dlr_errno = EEXIST;
143 return (-1);
148 * If we find something that looks like a dmod, create a modctl for it,
149 * and add said modctl to our dmods list. This will allow us to drop
150 * the dmods lock, while still preventing duplicate loads. If we aren't
151 * able to actually load the dmod, we can always remove the modctl
152 * later.
154 if (!kctl_find_module(dlr->dlr_fname, modpath, sizeof (modpath))) {
155 mutex_exit(&kctl_dmods_lock);
156 dlr->dlr_errno = ENOENT;
157 return (-1);
160 modp = kobj_zalloc(sizeof (struct modctl), KM_SLEEP);
162 modp->mod_filename = kctl_strdup(modpath);
163 modp->mod_modname = kctl_basename(modp->mod_filename);
164 modp->mod_busy = 1;
165 modp->mod_loadflags |= MOD_NOAUTOUNLOAD | MOD_NONOTIFY;
166 modp->mod_next = &kctl_dmods;
167 modp->mod_prev = kctl_dmods.mod_prev;
168 modp->mod_prev->mod_next = modp;
169 kctl_dmods.mod_prev = modp;
171 mutex_exit(&kctl_dmods_lock);
173 if (kctl.kctl_boot_ops == NULL)
174 rc = kobj_load_module(modp, 0);
175 else
176 rc = kobj_load_primary_module(modp);
178 if (rc != 0) {
179 kctl_warn("failed to load dmod %s", modp->mod_modname);
181 if (kctl.kctl_boot_ops == NULL)
182 mod_release_requisites(modp);
184 mutex_enter(&kctl_dmods_lock);
185 modp->mod_next->mod_prev = modp->mod_prev;
186 modp->mod_prev->mod_next = modp->mod_next;
187 mutex_exit(&kctl_dmods_lock);
189 kctl_strfree(modp->mod_filename);
190 kobj_free(modp, sizeof (struct modctl));
192 dlr->dlr_errno = EMDB_NOMOD;
193 return (-1);
197 * It worked! If the module has any CTF data, decompress it, and make a
198 * note of the load.
200 mutex_enter(&mod_lock);
201 if ((rc = kctl_mod_decompress(modp)) != 0) {
202 kctl_warn("failed to decompress CTF data for dmod %s: %s",
203 modpath, ctf_errmsg(rc));
205 mutex_exit(&mod_lock);
207 kctl_dprintf("loaded dmod %s at %p", modpath, modp);
209 modp->mod_ref = 1;
210 modp->mod_loaded = 1;
212 dlr->dlr_modctl = modp;
214 return (0);
218 * Driver-initiated loads. Load the module and announce it to the debugger.
220 void
221 kctl_dmod_autoload(const char *fname)
223 kmdb_wr_load_t *dlr;
225 dlr = kobj_zalloc(sizeof (kmdb_wr_load_t), KM_SLEEP);
226 dlr->dlr_node.wn_task = WNTASK_DMOD_LOAD;
227 dlr->dlr_fname = kctl_strdup(fname);
230 * If we're loading at boot, the kmdb_wr_load_t will have been
231 * "allocated" by krtld, and will thus not be under the control of
232 * kmem. We need to ensure that we don't attempt to free it when
233 * we get it back from the debugger.
235 if (kctl.kctl_boot_ops != NULL)
236 dlr->dlr_node.wn_flags |= WNFLAGS_NOFREE;
238 if (kctl_dmod_load(dlr) < 0) {
239 kctl_dlr_free(dlr);
240 return;
244 * Add to the list of open driver-initiated loads. We need to track
245 * these so we can free them (and thus avoid leaks) in the event that
246 * the debugger needs to be blown away before it can return them.
248 mutex_enter(&kctl_dmod_loads_lock);
249 dlr->dlr_next = kctl_dmod_loads;
250 if (kctl_dmod_loads != NULL)
251 kctl_dmod_loads->dlr_prev = dlr;
252 kctl_dmod_loads = dlr;
253 mutex_exit(&kctl_dmod_loads_lock);
255 kmdb_wr_debugger_notify(dlr);
258 void
259 kctl_dmod_load_all(void)
262 * The standard list of modules isn't populated until the tail end of
263 * kobj_init(). Prior to that point, the only available list is that of
264 * primaries. We'll use that if the normal list isn't ready yet.
266 if (modules.mod_mp == NULL) {
267 /* modules hasn't been initialized yet -- use primaries */
268 struct modctl_list *ml;
270 for (ml = kobj_linkmaps[KOBJ_LM_PRIMARY]; ml != NULL;
271 ml = ml->modl_next)
272 kctl_dmod_autoload(ml->modl_modp->mod_modname);
274 } else {
275 struct modctl *modp = &modules;
277 do {
278 if (modp->mod_mp != NULL)
279 kctl_dmod_autoload(modp->mod_modname);
280 } while ((modp = modp->mod_next) != &modules);
284 void
285 kctl_dmod_load_ack(kmdb_wr_load_t *dlr)
287 /* Remove from the list of open driver-initiated requests */
288 mutex_enter(&kctl_dmod_loads_lock);
289 if (dlr->dlr_prev == NULL)
290 kctl_dmod_loads = dlr->dlr_next;
291 else
292 dlr->dlr_prev->dlr_next = dlr->dlr_next;
294 if (dlr->dlr_next != NULL)
295 dlr->dlr_next->dlr_prev = dlr->dlr_prev;
296 mutex_exit(&kctl_dmod_loads_lock);
298 kctl_dlr_free(dlr);
301 static int
302 kctl_dmod_unload_common(struct modctl *modp)
304 struct modctl *m;
306 kctl_dprintf("unloading dmod %s", modp->mod_modname);
308 mutex_enter(&kctl_dmods_lock);
309 for (m = kctl_dmods.mod_next; m != &kctl_dmods; m = m->mod_next) {
310 if (m == modp)
311 break;
313 mutex_exit(&kctl_dmods_lock);
315 if (m != modp)
316 return (ENOENT);
318 /* Found it */
319 modp->mod_ref = 0;
320 modp->mod_loaded = 0;
322 kobj_unload_module(modp);
324 mod_release_requisites(modp);
326 /* Remove it from our dmods list */
327 mutex_enter(&kctl_dmods_lock);
328 modp->mod_next->mod_prev = modp->mod_prev;
329 modp->mod_prev->mod_next = modp->mod_next;
330 mutex_exit(&kctl_dmods_lock);
332 kctl_strfree(modp->mod_filename);
333 kmem_free(modp, sizeof (struct modctl));
335 return (0);
338 void
339 kctl_dmod_unload(kmdb_wr_unload_t *dur)
341 int rc;
343 if ((rc = kctl_dmod_unload_common(dur->dur_modctl)) != 0) {
344 cmn_err(CE_WARN, "unexpected dmod unload failure: %d", rc);
345 dur->dur_errno = rc;
350 * This will be called during shutdown. The debugger has been stopped, we're
351 * off the module notification list, and we've already processed everything in
352 * the driver's work queue. We should have received (and processed) unload
353 * requests for each of the dmods we've loaded. To be safe, however, we'll
354 * double-check.
356 * If we're doing an emergency shutdown, there may be outstanding
357 * driver-initiated messages that haven't been returned to us. The debugger is
358 * dead, so it's not going to be returning them. We'll leak them unless we
359 * find and free them ourselves.
361 void
362 kctl_dmod_unload_all(void)
364 kmdb_wr_load_t *dlr;
365 struct modctl *modp;
367 while ((modp = kctl_dmods.mod_next) != &kctl_dmods)
368 (void) kctl_dmod_unload_common(modp);
370 while ((dlr = kctl_dmod_loads) != NULL) {
371 kctl_dmod_loads = dlr->dlr_next;
373 kctl_dprintf("freed orphan load notification for %s",
374 dlr->dlr_fname);
375 kctl_dlr_free(dlr);
379 kmdb_wr_path_t *
380 kctl_dmod_path_set(kmdb_wr_path_t *pth)
382 kmdb_wr_path_t *opth;
384 if (kctl.kctl_flags & KMDB_F_DRV_DEBUG) {
385 if (pth != NULL) {
386 int i;
387 kctl_dprintf("changing dmod path to: %p", pth);
388 for (i = 0; pth->dpth_path[i] != NULL; i++)
389 kctl_dprintf(" %s", pth->dpth_path[i]);
390 } else {
391 kctl_dprintf("changing dmod path to NULL");
395 opth = kctl_dmod_path;
396 kctl_dmod_path = pth;
398 return (opth);
401 void
402 kctl_dmod_path_reset(void)
404 kmdb_wr_path_t *pth;
406 if ((pth = kctl_dmod_path_set(NULL)) != NULL) {
407 WR_ACK(pth);
408 kmdb_wr_debugger_notify(pth);
412 void
413 kctl_dmod_sync(void)
415 struct modctl *modp;
418 * kobj_sync() has no visibility into our dmods, so we need to
419 * explicitly tell krtld to export the portions of our dmods that were
420 * allocated using boot scratch memory.
422 for (modp = kctl_dmods.mod_next; modp != &kctl_dmods;
423 modp = modp->mod_next)
424 kobj_export_module(modp->mod_mp);
427 void
428 kctl_dmod_init(void)
430 mutex_init(&kctl_dmod_loads_lock, NULL, MUTEX_DRIVER, NULL);
431 mutex_init(&kctl_dmods_lock, NULL, MUTEX_DRIVER, NULL);
433 bzero(&kctl_dmods, sizeof (struct modctl));
434 kctl_dmods.mod_next = kctl_dmods.mod_prev = &kctl_dmods;
435 kdi_dmods = &kctl_dmods;
438 void
439 kctl_dmod_fini(void)
441 mutex_destroy(&kctl_dmods_lock);
442 mutex_destroy(&kctl_dmod_loads_lock);
443 kdi_dmods = NULL;