4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 * Copyright (c) 2010, Intel Corporation.
28 * All rights reserved.
32 * DR memory support routines.
36 #include <sys/debug.h>
37 #include <sys/types.h>
38 #include <sys/errno.h>
39 #include <sys/param.h>
43 #include <sys/dditypes.h>
45 #include <sys/sunddi.h>
46 #include <sys/sunndi.h>
47 #include <sys/ddi_impldefs.h>
48 #include <sys/ndi_impldefs.h>
49 #include <sys/sysmacros.h>
50 #include <sys/machsystm.h>
51 #include <sys/promif.h>
53 #include <sys/mem_config.h>
54 #include <vm/seg_kmem.h>
58 #include <sys/dr_util.h>
59 #include <sys/drmach.h>
61 extern struct memlist
*phys_install
;
63 /* TODO: push this reference below drmach line */
66 /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
67 static char *dr_ie_fmt
= "dr_mem_acpi.c %d";
69 static void dr_init_mem_unit_data(dr_mem_unit_t
*mp
);
72 * dr_mem_unit_t.sbm_flags
74 #define DR_MFLAG_RESERVED 0x01 /* mem unit reserved for delete */
75 #define DR_MFLAG_SOURCE 0x02 /* source brd of copy/rename op */
76 #define DR_MFLAG_TARGET 0x04 /* target brd of copy/rename op */
77 #define DR_MFLAG_RELOWNER 0x20 /* memory release (delete) owner */
78 #define DR_MFLAG_RELDONE 0x40 /* memory release (delete) done */
81 #define _ptob64(p) ((uint64_t)(p) << PAGESHIFT)
82 #define _b64top(b) ((pgcnt_t)((b) >> PAGESHIFT))
84 static struct memlist
*
85 dr_get_memlist(dr_mem_unit_t
*mp
)
87 struct memlist
*mlist
= NULL
;
89 static fn_t f
= "dr_get_memlist";
91 PR_MEM("%s for %s...\n", f
, mp
->sbm_cm
.sbdev_path
);
94 * Return cached memlist, if present.
95 * This memlist will be present following an
96 * unconfigure (a.k.a: detach) of this memunit.
97 * It should only be used in the case were a configure
98 * is bringing this memunit back in without going
99 * through the disconnect and connect states.
102 PR_MEM("%s: found cached memlist\n", f
);
104 mlist
= memlist_dup(mp
->sbm_mlist
);
106 uint64_t basepa
= _ptob64(mp
->sbm_basepfn
);
108 /* attempt to construct a memlist using phys_install */
110 /* round down to slice base address */
111 basepa
&= ~mp
->sbm_alignment_mask
;
113 /* get a copy of phys_install to edit */
115 mlist
= memlist_dup(phys_install
);
116 memlist_read_unlock();
118 /* trim lower irrelevant span */
120 mlist
= memlist_del_span(mlist
, 0ull, basepa
);
122 /* trim upper irrelevant span */
124 uint64_t endpa
, toppa
;
126 toppa
= mp
->sbm_slice_top
;
127 endpa
= _ptob64(physmax
+ 1);
129 mlist
= memlist_del_span(
135 /* successfully built a memlist */
136 PR_MEM("%s: derived memlist from phys_install\n", f
);
139 /* if no mlist yet, try platform layer */
141 err
= drmach_mem_get_memlist(
142 mp
->sbm_cm
.sbdev_id
, &mlist
);
144 DRERR_SET_C(&mp
->sbm_cm
.sbdev_error
, &err
);
145 mlist
= NULL
; /* paranoia */
150 PR_MEM("%s: memlist for %s\n", f
, mp
->sbm_cm
.sbdev_path
);
151 PR_MEMLIST_DUMP(mlist
);
158 dr_release_mem(dr_common_unit_t
*cp
)
163 dr_attach_mem(dr_handle_t
*hp
, dr_common_unit_t
*cp
)
165 dr_mem_unit_t
*mp
= (dr_mem_unit_t
*)cp
;
166 struct memlist
*ml
, *mc
;
168 static fn_t f
= "dr_attach_mem";
171 PR_MEM("%s...\n", f
);
173 dr_lock_status(hp
->h_bd
);
174 err
= drmach_configure(cp
->sbdev_id
, 0);
175 dr_unlock_status(hp
->h_bd
);
177 DRERR_SET_C(&cp
->sbdev_error
, &err
);
181 ml
= dr_get_memlist(mp
);
183 /* Skip memory with address above plat_dr_physmax or kpm_size */
184 dr_physmax
= plat_dr_physmax
? ptob(plat_dr_physmax
) : UINT64_MAX
;
185 if (kpm_size
< dr_physmax
)
186 dr_physmax
= kpm_size
;
187 ml
= memlist_del_span(ml
, dr_physmax
, UINT64_MAX
- dr_physmax
);
189 for (mc
= ml
; mc
; mc
= mc
->ml_next
) {
193 rv
= kphysm_add_memory_dynamic(
194 (pfn_t
)btop(mc
->ml_address
),
195 (pgcnt_t
)btop(mc
->ml_size
));
196 if (rv
!= KPHYSM_OK
) {
198 * translate kphysm error and
199 * store in devlist error
202 case KPHYSM_ERESOURCE
:
215 if (rv
== ESBD_INTERNAL
) {
216 DR_DEV_INTERNAL_ERROR(&mp
->sbm_cm
);
218 dr_dev_err(CE_WARN
, &mp
->sbm_cm
, rv
);
222 err
= drmach_mem_add_span(
223 mp
->sbm_cm
.sbdev_id
, mc
->ml_address
, mc
->ml_size
);
225 DRERR_SET_C(&mp
->sbm_cm
.sbdev_error
, &err
);
231 dr_init_mem_unit_data(mp
);
233 /* back out if configure failed */
234 if (mp
->sbm_cm
.sbdev_error
!= NULL
) {
235 dr_lock_status(hp
->h_bd
);
236 err
= drmach_unconfigure(cp
->sbdev_id
, 0);
239 dr_unlock_status(hp
->h_bd
);
245 dr_detach_mem(dr_handle_t
*hp
, dr_common_unit_t
*cp
)
250 * This routine acts as a wrapper for kphysm_del_span_query in order to
251 * support potential memory holes in a board's physical address space.
252 * It calls kphysm_del_span_query for each node in a memlist and accumulates
253 * the results in *mp.
256 dr_del_mlist_query(struct memlist
*mlist
, memquery_t
*mp
)
261 cmn_err(CE_WARN
, "dr_del_mlist_query: mlist=NULL\n");
265 mp
->nonrelocatable
= 0;
266 mp
->first_nonrelocatable
= 0;
267 mp
->last_nonrelocatable
= 0;
273 * NOTE: This routine is only partially smart about multiple
274 * mem-units. Need to make mem-status structure smart
278 dr_mem_status(dr_handle_t
*hp
, dr_devset_t devset
, sbd_dev_stat_t
*dsp
)
285 static fn_t f
= "dr_mem_status";
288 devset
&= DR_DEVS_PRESENT(bp
);
290 for (m
= mix
= 0; m
< MAX_MEM_UNITS_PER_BOARD
; m
++) {
293 drmach_status_t pstat
;
296 if (DEVSET_IN_SET(devset
, SBD_COMP_MEM
, m
) == 0)
299 mp
= dr_get_mem_unit(bp
, m
);
301 if (mp
->sbm_cm
.sbdev_state
== DR_STATE_EMPTY
) {
302 /* present, but not fully initialized */
306 if (mp
->sbm_cm
.sbdev_id
== (drmachid_t
)0)
309 /* fetch platform status */
310 err
= drmach_status(mp
->sbm_cm
.sbdev_id
, &pstat
);
312 DRERR_SET_C(&mp
->sbm_cm
.sbdev_error
, &err
);
317 bzero((caddr_t
)msp
, sizeof (*msp
));
319 (void) strlcpy(msp
->ms_cm
.c_id
.c_name
, pstat
.type
,
320 sizeof (msp
->ms_cm
.c_id
.c_name
));
321 msp
->ms_cm
.c_id
.c_type
= mp
->sbm_cm
.sbdev_type
;
322 msp
->ms_cm
.c_id
.c_unit
= mp
->sbm_cm
.sbdev_unum
;
323 msp
->ms_cm
.c_cond
= mp
->sbm_cm
.sbdev_cond
;
324 msp
->ms_cm
.c_busy
= mp
->sbm_cm
.sbdev_busy
| pstat
.busy
;
325 msp
->ms_cm
.c_time
= mp
->sbm_cm
.sbdev_time
;
326 msp
->ms_cm
.c_ostate
= mp
->sbm_cm
.sbdev_ostate
;
328 msp
->ms_totpages
= mp
->sbm_npages
;
329 msp
->ms_basepfn
= mp
->sbm_basepfn
;
330 msp
->ms_pageslost
= mp
->sbm_pageslost
;
331 msp
->ms_cage_enabled
= kcage_on
;
333 if (mp
->sbm_flags
& DR_MFLAG_RESERVED
)
339 msp
->ms_peer_is_target
= 0;
340 msp
->ms_peer_ap_id
[0] = '\0';
341 } else if (p_mp
->sbm_flags
& DR_MFLAG_RESERVED
) {
342 char *path
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
346 * b_dip doesn't have to be held for ddi_pathname()
347 * because the board struct (dr_board_t) will be
348 * destroyed before b_dip detaches.
350 (void) ddi_pathname(bp
->b_dip
, path
);
351 minor
= strchr(p_mp
->sbm_cm
.sbdev_path
, ':');
353 (void) snprintf(msp
->ms_peer_ap_id
,
354 sizeof (msp
->ms_peer_ap_id
), "%s%s",
355 path
, (minor
== NULL
) ? "" : minor
);
357 kmem_free(path
, MAXPATHLEN
);
359 if (p_mp
->sbm_flags
& DR_MFLAG_TARGET
)
360 msp
->ms_peer_is_target
= 1;
364 * kphysm_del_span_query can report non-reloc pages = total
365 * pages for memory that is not yet configured
367 if (mp
->sbm_cm
.sbdev_state
!= DR_STATE_UNCONFIGURED
) {
370 ml
= dr_get_memlist(mp
);
371 rv
= ml
? dr_del_mlist_query(ml
, &mq
) : -1;
374 if (rv
== KPHYSM_OK
) {
375 msp
->ms_managed_pages
= mq
.managed
;
376 msp
->ms_noreloc_pages
= mq
.nonrelocatable
;
377 msp
->ms_noreloc_first
=
378 mq
.first_nonrelocatable
;
379 msp
->ms_noreloc_last
=
380 mq
.last_nonrelocatable
;
381 msp
->ms_cm
.c_sflags
= 0;
382 if (mq
.nonrelocatable
&&
383 drmach_copy_rename_need_suspend(
384 mp
->sbm_cm
.sbdev_id
)) {
385 SBD_SET_SUSPEND(SBD_CMD_UNCONFIGURE
,
386 msp
->ms_cm
.c_sflags
);
389 PR_MEM("%s: kphysm_del_span_query() = %d\n",
395 * Check source unit state during copy-rename
397 if ((mp
->sbm_flags
& DR_MFLAG_SOURCE
) &&
398 (mp
->sbm_cm
.sbdev_state
== DR_STATE_UNREFERENCED
||
399 mp
->sbm_cm
.sbdev_state
== DR_STATE_RELEASE
))
400 msp
->ms_cm
.c_ostate
= SBD_STAT_CONFIGURED
;
411 dr_pre_attach_mem(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
416 static fn_t f
= "dr_pre_attach_mem";
418 PR_MEM("%s...\n", f
);
420 for (d
= 0; d
< devnum
; d
++) {
421 dr_mem_unit_t
*mp
= (dr_mem_unit_t
*)devlist
[d
];
424 cmn_err(CE_CONT
, "OS configure %s", mp
->sbm_cm
.sbdev_path
);
426 state
= mp
->sbm_cm
.sbdev_state
;
428 case DR_STATE_UNCONFIGURED
:
429 PR_MEM("%s: recovering from UNCONFIG for %s\n",
430 f
, mp
->sbm_cm
.sbdev_path
);
432 /* use memlist cached by dr_post_detach_mem_unit */
433 ASSERT(mp
->sbm_mlist
!= NULL
);
434 PR_MEM("%s: re-configuring cached memlist for %s:\n",
435 f
, mp
->sbm_cm
.sbdev_path
);
436 PR_MEMLIST_DUMP(mp
->sbm_mlist
);
438 /* kphysm del handle should be have been freed */
439 ASSERT((mp
->sbm_flags
& DR_MFLAG_RELOWNER
) == 0);
443 case DR_STATE_CONNECTED
:
444 PR_MEM("%s: reprogramming mem hardware on %s\n",
445 f
, mp
->sbm_cm
.sbdev_bp
->b_path
);
447 PR_MEM("%s: enabling %s\n",
448 f
, mp
->sbm_cm
.sbdev_path
);
450 err
= drmach_mem_enable(mp
->sbm_cm
.sbdev_id
);
452 DRERR_SET_C(&mp
->sbm_cm
.sbdev_error
, &err
);
458 dr_dev_err(CE_WARN
, &mp
->sbm_cm
, ESBD_STATE
);
463 /* exit for loop if error encountered */
468 return (err_flag
? -1 : 0);
473 dr_post_attach_mem(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
476 static fn_t f
= "dr_post_attach_mem";
478 PR_MEM("%s...\n", f
);
480 for (d
= 0; d
< devnum
; d
++) {
481 dr_mem_unit_t
*mp
= (dr_mem_unit_t
*)devlist
[d
];
482 struct memlist
*mlist
, *ml
;
484 mlist
= dr_get_memlist(mp
);
487 * Verify the memory really did successfully attach
488 * by checking for its existence in phys_install.
491 if (memlist_intersect(phys_install
, mlist
) == 0) {
492 memlist_read_unlock();
494 DR_DEV_INTERNAL_ERROR(&mp
->sbm_cm
);
496 PR_MEM("%s: %s memlist not in phys_install",
497 f
, mp
->sbm_cm
.sbdev_path
);
499 memlist_delete(mlist
);
502 memlist_read_unlock();
504 for (ml
= mlist
; ml
!= NULL
; ml
= ml
->ml_next
) {
507 err
= drmach_mem_add_span(
512 DRERR_SET_C(&mp
->sbm_cm
.sbdev_error
, &err
);
515 memlist_delete(mlist
);
518 * Destroy cached memlist, if any.
519 * There will be a cached memlist in sbm_mlist if
520 * this board is being configured directly after
522 * To support this transition, dr_post_detach_mem
523 * left a copy of the last known memlist in sbm_mlist.
524 * This memlist could differ from any derived from
525 * hardware if while this memunit was last configured
526 * the system detected and deleted bad pages from
527 * phys_install. The location of those bad pages
528 * will be reflected in the cached memlist.
531 memlist_delete(mp
->sbm_mlist
);
532 mp
->sbm_mlist
= NULL
;
541 dr_pre_detach_mem(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
548 dr_post_detach_mem(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
554 * Successful return from this function will have the memory
555 * handle in bp->b_dev[..mem-unit...].sbm_memhandle allocated
556 * and waiting. This routine's job is to select the memory that
557 * actually has to be released (detached) which may not necessarily
558 * be the same memory node that came in in devlist[],
559 * i.e. a copy-rename is needed.
563 dr_pre_release_mem(dr_handle_t
*hp
, dr_common_unit_t
**devlist
, int devnum
)
570 dr_release_mem_done(dr_common_unit_t
*cp
)
576 dr_disconnect_mem(dr_mem_unit_t
*mp
)
583 dr_cancel_mem(dr_mem_unit_t
*s_mp
)
589 dr_init_mem_unit(dr_mem_unit_t
*mp
)
591 dr_state_t new_state
;
593 if (DR_DEV_IS_ATTACHED(&mp
->sbm_cm
)) {
594 new_state
= DR_STATE_CONFIGURED
;
595 mp
->sbm_cm
.sbdev_cond
= SBD_COND_OK
;
596 } else if (DR_DEV_IS_PRESENT(&mp
->sbm_cm
)) {
597 new_state
= DR_STATE_CONNECTED
;
598 mp
->sbm_cm
.sbdev_cond
= SBD_COND_OK
;
599 } else if (mp
->sbm_cm
.sbdev_id
!= (drmachid_t
)0) {
600 new_state
= DR_STATE_OCCUPIED
;
602 new_state
= DR_STATE_EMPTY
;
605 if (DR_DEV_IS_PRESENT(&mp
->sbm_cm
))
606 dr_init_mem_unit_data(mp
);
608 /* delay transition until fully initialized */
609 dr_device_transition(&mp
->sbm_cm
, new_state
);
613 dr_init_mem_unit_data(dr_mem_unit_t
*mp
)
615 drmachid_t id
= mp
->sbm_cm
.sbdev_id
;
616 drmach_mem_info_t minfo
;
618 static fn_t f
= "dr_init_mem_unit_data";
620 PR_MEM("%s...\n", f
);
622 /* a little sanity checking */
623 ASSERT(mp
->sbm_peer
== NULL
);
624 ASSERT(mp
->sbm_flags
== 0);
626 if (err
= drmach_mem_get_info(id
, &minfo
)) {
627 DRERR_SET_C(&mp
->sbm_cm
.sbdev_error
, &err
);
630 mp
->sbm_basepfn
= _b64top(minfo
.mi_basepa
);
631 mp
->sbm_npages
= _b64top(minfo
.mi_size
);
632 mp
->sbm_alignment_mask
= minfo
.mi_alignment_mask
;
633 mp
->sbm_slice_base
= minfo
.mi_slice_base
;
634 mp
->sbm_slice_top
= minfo
.mi_slice_top
;
635 mp
->sbm_slice_size
= minfo
.mi_slice_size
;
637 PR_MEM("%s: %s (basepfn = 0x%lx, npgs = %ld)\n",
638 f
, mp
->sbm_cm
.sbdev_path
, mp
->sbm_basepfn
, mp
->sbm_npages
);