4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Ereport-handling routines for CPU errors
36 #include <fm/fmd_api.h>
37 #include <sys/fm/protocol.h>
38 #include <sys/async.h>
40 #include <sys/fm/cpu/UltraSPARC-III.h>
41 #include <cmd_Lxcache.h>
46 * We follow the same algorithm for handling all L1$, TLB, and L2/L3 cache
47 * tag events so we can have one common routine into which each handler
48 * calls. The two tests of (strcmp(serdnm, "") != 0) are used to eliminate
49 * the need for a separate macro for UEs which override SERD engine
50 * counting CEs leading to same fault.
54 cmd_cpuerr_common(fmd_hdl_t
*hdl
, fmd_event_t
*ep
, cmd_cpu_t
*cpu
,
55 cmd_case_t
*cc
, cmd_ptrsubtype_t pstype
, const char *serdnm
,
56 const char *serdn
, const char *serdt
, const char *fltnm
,
61 if (cc
->cc_cp
!= NULL
&& fmd_case_solved(hdl
, cc
->cc_cp
))
62 return (CMD_EVD_REDUND
);
64 if (cc
->cc_cp
== NULL
) {
65 cc
->cc_cp
= cmd_case_create(hdl
, &cpu
->cpu_header
, pstype
,
67 if (strcmp(serdnm
, "") != 0) {
68 cc
->cc_serdnm
= cmd_cpu_serdnm_create(hdl
, cpu
,
70 fmd_serd_create(hdl
, cc
->cc_serdnm
,
71 fmd_prop_get_int32(hdl
, serdn
),
72 fmd_prop_get_int64(hdl
, serdt
));
76 if (strcmp(serdnm
, "") != 0) {
77 fmd_hdl_debug(hdl
, "adding event to %s\n", cc
->cc_serdnm
);
78 if (fmd_serd_record(hdl
, cc
->cc_serdnm
, ep
) == FMD_B_FALSE
)
79 return (CMD_EVD_OK
); /* serd engine hasn't fired yet */
81 fmd_case_add_serd(hdl
, cc
->cc_cp
, cc
->cc_serdnm
);
83 if (cc
->cc_serdnm
!= NULL
) {
85 "destroying existing %s state for class %x\n",
86 cc
->cc_serdnm
, clcode
);
87 fmd_serd_destroy(hdl
, cc
->cc_serdnm
);
88 fmd_hdl_strfree(hdl
, cc
->cc_serdnm
);
91 fmd_case_reset(hdl
, cc
->cc_cp
);
92 fmd_case_add_ereport(hdl
, cc
->cc_cp
, ep
);
95 cmd_cpu_create_faultlist(hdl
, cc
->cc_cp
, cpu
, fltnm
, NULL
, 100);
97 fmd_case_solve(hdl
, cc
->cc_cp
);
103 #define CMD_CPU_TAGHANDLER(name, casenm, ptr, ntname, fltname) \
105 cmd_##name(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, \
106 const char *class, cmd_errcl_t clcode) \
108 uint8_t level = clcode & CMD_ERRCL_LEVEL_EXTRACT; \
111 clcode &= CMD_ERRCL_LEVEL_MASK; \
112 if ((cpu = cmd_cpu_lookup_from_detector(hdl, nvl, class, \
113 level)) == NULL || cpu->cpu_faulting) \
114 return (CMD_EVD_UNUSED); \
116 if ((strstr(class, "ultraSPARC-IVplus.l3-thce") != 0) || \
117 (strstr(class, "ultraSPARC-IVplus.thce") != 0)) { \
118 return (cmd_us4plus_tag_err(hdl, ep, nvl, cpu, \
119 ptr, ntname "_n", ntname "_t", fltname, clcode)); \
121 return (cmd_cpuerr_common(hdl, ep, cpu, &cpu->cpu_##casenm, \
122 ptr, ntname, ntname "_n", ntname "_t", fltname, clcode)); \
126 #define CMD_CPU_SIMPLEHANDLER(name, casenm, ptr, ntname, fltname) \
128 cmd_##name(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, \
129 const char *class, cmd_errcl_t clcode) \
131 uint8_t level = clcode & CMD_ERRCL_LEVEL_EXTRACT; \
134 clcode &= CMD_ERRCL_LEVEL_MASK; \
135 if ((cpu = cmd_cpu_lookup_from_detector(hdl, nvl, class, \
136 level)) == NULL || cpu->cpu_faulting) \
137 return (CMD_EVD_UNUSED); \
139 return (cmd_cpuerr_common(hdl, ep, cpu, &cpu->cpu_##casenm, \
140 ptr, ntname, ntname "_n", ntname "_t", fltname, clcode)); \
144 CMD_CPU_TAGHANDLER(txce
, l2tag
, CMD_PTR_CPU_L2TAG
, "l2tag", "l2cachetag")
145 CMD_CPU_TAGHANDLER(l3_thce
, l3tag
, CMD_PTR_CPU_L3TAG
, "l3tag", "l3cachetag")
147 CMD_CPU_SIMPLEHANDLER(txce
, l2tag
, CMD_PTR_CPU_L2TAG
, "l2tag", "l2cachetag")
148 CMD_CPU_SIMPLEHANDLER(l3_thce
, l3tag
, CMD_PTR_CPU_L3TAG
, "l3tag", "l3cachetag")
150 CMD_CPU_SIMPLEHANDLER(icache
, icache
, CMD_PTR_CPU_ICACHE
, "icache", "icache")
151 CMD_CPU_SIMPLEHANDLER(dcache
, dcache
, CMD_PTR_CPU_DCACHE
, "dcache", "dcache")
152 CMD_CPU_SIMPLEHANDLER(pcache
, pcache
, CMD_PTR_CPU_PCACHE
, "pcache", "pcache")
153 CMD_CPU_SIMPLEHANDLER(itlb
, itlb
, CMD_PTR_CPU_ITLB
, "itlb", "itlb")
154 CMD_CPU_SIMPLEHANDLER(dtlb
, dtlb
, CMD_PTR_CPU_DTLB
, "dtlb", "dtlb")
155 CMD_CPU_SIMPLEHANDLER(irc
, ireg
, CMD_PTR_CPU_IREG
, "ireg", "ireg")
156 CMD_CPU_SIMPLEHANDLER(frc
, freg
, CMD_PTR_CPU_FREG
, "freg", "freg")
157 CMD_CPU_SIMPLEHANDLER(mau
, mau
, CMD_PTR_CPU_MAU
, "mau", "mau")
158 CMD_CPU_SIMPLEHANDLER(miscregs_ce
, misc_regs
, CMD_PTR_CPU_MISC_REGS
,
159 "misc_regs", "misc_reg")
160 CMD_CPU_SIMPLEHANDLER(l2c
, l2data
, CMD_PTR_CPU_L2DATA
, "l2data", "l2data-c")
162 CMD_CPU_SIMPLEHANDLER(fpu
, fpu
, CMD_PTR_CPU_FPU
, "", "fpu")
163 CMD_CPU_SIMPLEHANDLER(l2ctl
, l2ctl
, CMD_PTR_CPU_L2CTL
, "", "l2cachectl")
164 CMD_CPU_SIMPLEHANDLER(iru
, ireg
, CMD_PTR_CPU_IREG
, "", "ireg")
165 CMD_CPU_SIMPLEHANDLER(fru
, freg
, CMD_PTR_CPU_FREG
, "", "freg")
166 CMD_CPU_SIMPLEHANDLER(miscregs_ue
, misc_regs
, CMD_PTR_CPU_MISC_REGS
,
168 CMD_CPU_SIMPLEHANDLER(l2u
, l2data
, CMD_PTR_CPU_L2DATA
, "", "l2data-u")
169 CMD_CPU_SIMPLEHANDLER(lfu_ue
, lfu
, CMD_PTR_CPU_LFU
, "", "lfu-u")
170 CMD_CPU_SIMPLEHANDLER(lfu_ce
, lfu
, CMD_PTR_CPU_LFU
, "", "lfu-f")
171 CMD_CPU_SIMPLEHANDLER(lfu_pe
, lfu
, CMD_PTR_CPU_LFU
, "", "lfu-p")
176 * The following macro handles UEs or CPU errors.
177 * It handles the error cases in which there is with or
178 * without "resource".
180 * If the "fltname" "core" is to be generated, the sibling CPUs
181 * within the core will be added to the suspect list.
182 * If the "fltname" "chip" is to be generated, the sibling CPUs
183 * within the chip will be added to the suspect list.
184 * If the "fltname" "strand" is to be generated, the strand
185 * itself will be in the suspect list.
187 #define CMD_OPL_UEHANDLER(name, casenm, ptr, fltname, has_rsrc) \
189 cmd_##name(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, \
190 const char *class, cmd_errcl_t clcode) \
195 nvlist_t *rsrc = NULL; \
196 uint8_t cpumask, version = 1; \
197 uint8_t lookup_rsrc = has_rsrc; \
200 "Enter cmd_opl_ue_cpu for class %x\n", clcode); \
203 if (nvlist_lookup_nvlist(nvl, \
204 FM_EREPORT_PAYLOAD_NAME_RESOURCE, &rsrc) != 0) \
205 return (CMD_EVD_BAD); \
207 if ((cpu = cmd_cpu_lookup(hdl, rsrc, class, \
208 CMD_CPU_LEVEL_THREAD)) == NULL || \
210 return (CMD_EVD_UNUSED); \
212 if ((cpu = cmd_cpu_lookup_from_detector(hdl, nvl, class,\
213 CMD_CPU_LEVEL_THREAD)) == NULL || cpu->cpu_faulting)\
214 return (CMD_EVD_UNUSED); \
216 (void) nvlist_lookup_nvlist(nvl, \
217 FM_EREPORT_DETECTOR, &rsrc); \
220 if (nvlist_lookup_uint8(rsrc, FM_VERSION, &version) != 0 || \
221 version > FM_CPU_SCHEME_VERSION || \
222 nvlist_lookup_uint8(rsrc, FM_FMRI_CPU_MASK, &cpumask) != 0) \
223 return (CMD_EVD_BAD); \
225 cc = &cpu->cpu_##casenm; \
226 rc = cmd_opl_ue_cpu(hdl, ep, class, fltname, \
227 ptr, cpu, cc, cpumask); \
232 * CPU errors without resource
234 CMD_OPL_UEHANDLER(oplinv_urg
, opl_inv_urg
, CMD_PTR_CPU_UGESR_INV_URG
, "core", 0)
235 CMD_OPL_UEHANDLER(oplcre
, opl_cre
, CMD_PTR_CPU_UGESR_CRE
, "core", 0)
236 CMD_OPL_UEHANDLER(opltsb_ctx
, opl_tsb_ctx
, CMD_PTR_CPU_UGESR_TSB_CTX
, "core", 0)
237 CMD_OPL_UEHANDLER(opltsbp
, opl_tsbp
, CMD_PTR_CPU_UGESR_TSBP
, "core", 0)
238 CMD_OPL_UEHANDLER(oplpstate
, opl_pstate
, CMD_PTR_CPU_UGESR_PSTATE
, "core", 0)
239 CMD_OPL_UEHANDLER(opltstate
, opl_tstate
, CMD_PTR_CPU_UGESR_TSTATE
, "core", 0)
240 CMD_OPL_UEHANDLER(opliug_f
, opl_iug_f
, CMD_PTR_CPU_UGESR_IUG_F
, "core", 0)
241 CMD_OPL_UEHANDLER(opliug_r
, opl_iug_r
, CMD_PTR_CPU_UGESR_IUG_R
, "core", 0)
242 CMD_OPL_UEHANDLER(oplsdc
, opl_sdc
, CMD_PTR_CPU_UGESR_SDC
, "chip", 0)
243 CMD_OPL_UEHANDLER(oplwdt
, opl_wdt
, CMD_PTR_CPU_UGESR_WDT
, "core", 0)
244 CMD_OPL_UEHANDLER(opldtlb
, opl_dtlb
, CMD_PTR_CPU_UGESR_DTLB
, "core", 0)
245 CMD_OPL_UEHANDLER(oplitlb
, opl_itlb
, CMD_PTR_CPU_UGESR_ITLB
, "core", 0)
246 CMD_OPL_UEHANDLER(oplcore_err
, opl_core_err
, CMD_PTR_CPU_UGESR_CORE_ERR
,
248 CMD_OPL_UEHANDLER(opldae
, opl_dae
, CMD_PTR_CPU_UGESR_DAE
, "core", 0)
249 CMD_OPL_UEHANDLER(opliae
, opl_iae
, CMD_PTR_CPU_UGESR_IAE
, "core", 0)
250 CMD_OPL_UEHANDLER(opluge
, opl_uge
, CMD_PTR_CPU_UGESR_UGE
, "core", 0)
255 CMD_OPL_UEHANDLER(oplinv_sfsr
, opl_invsfsr
, CMD_PTR_CPU_INV_SFSR
, "strand", 1)
256 CMD_OPL_UEHANDLER(opluecpu_detcpu
, oplue_detcpu
, CMD_PTR_CPU_UE_DET_CPU
,
258 CMD_OPL_UEHANDLER(opluecpu_detio
, oplue_detio
, CMD_PTR_CPU_UE_DET_IO
, "core", 1)
259 CMD_OPL_UEHANDLER(oplmtlb
, opl_mtlb
, CMD_PTR_CPU_MTLB
, "core", 1)
260 CMD_OPL_UEHANDLER(opltlbp
, opl_tlbp
, CMD_PTR_CPU_TLBP
, "core", 1)
265 cmd_nop_hdlr(fmd_hdl_t
*hdl
, cmd_xr_t
*xr
, fmd_event_t
*ep
)
267 fmd_hdl_debug(hdl
, "nop train resolved for clcode %llx\n",
272 cmd_xxu_hdlr(fmd_hdl_t
*hdl
, cmd_xr_t
*xr
, fmd_event_t
*ep
)
275 cmd_cpu_t
*cpu
= xr
->xr_cpu
;
278 nvlist_t
*rsrc
= NULL
;
280 cmd_fill_errdata(xr
->xr_clcode
, cpu
, &cc
, &ed
);
282 if (cpu
->cpu_faulting
) {
283 CMD_STAT_BUMP(xxu_retr_flt
);
287 if (cmd_afar_status_check(xr
->xr_afar_status
, xr
->xr_clcode
) < 0) {
288 fmd_hdl_debug(hdl
, "xxU dropped, afar not VALID\n");
292 if (cmd_cpu_synd_check(xr
->xr_synd
, xr
->xr_clcode
) < 0) {
293 fmd_hdl_debug(hdl
, "xxU/LDxU dropped due to syndrome\n");
299 * UE cache needed for sun4u only, because sun4u doesn't poison
300 * uncorrectable data loaded into L2/L3 cache.
302 if (cmd_cpu_uec_match(xr
->xr_cpu
, xr
->xr_afar
)) {
303 fmd_hdl_debug(hdl
, "ue matched in UE cache\n");
304 CMD_STAT_BUMP(xxu_ue_match
);
310 * We didn't match in the UE cache. We don't need to sleep for UE
311 * arrival, as we've already slept once for the train match.
314 if (cc
->cc_cp
== NULL
) {
315 cc
->cc_cp
= cmd_case_create(hdl
, &cpu
->cpu_header
, ed
->ed_pst
,
317 } else if (cc
->cc_serdnm
!= NULL
) {
318 fmd_hdl_debug(hdl
, "destroying existing %s state\n",
321 fmd_serd_destroy(hdl
, cc
->cc_serdnm
);
322 fmd_hdl_strfree(hdl
, cc
->cc_serdnm
);
323 cc
->cc_serdnm
= NULL
;
325 fmd_case_reset(hdl
, cc
->cc_cp
);
328 if (xr
->xr_rsrc_nvl
!= NULL
&& nvlist_dup(xr
->xr_rsrc_nvl
,
330 fmd_hdl_abort(hdl
, "failed to duplicate resource FMRI for "
331 "%s fault", ed
->ed_fltnm
);
334 fmd_case_add_ereport(hdl
, cc
->cc_cp
, ep
);
336 cmd_cpu_create_faultlist(hdl
, cc
->cc_cp
, cpu
, ed
->ed_fltnm
, rsrc
, 100);
338 fmd_case_solve(hdl
, cc
->cc_cp
);
342 cmd_xxc_hdlr(fmd_hdl_t
*hdl
, cmd_xr_t
*xr
, fmd_event_t
*ep
)
345 cmd_cpu_t
*cpu
= xr
->xr_cpu
;
348 nvlist_t
*rsrc
= NULL
;
351 if (cmd_cache_ce_panther(hdl
, ep
, xr
) == 0) {
355 cmd_fill_errdata(xr
->xr_clcode
, cpu
, &cc
, &ed
);
357 if (cpu
->cpu_faulting
|| (cc
->cc_cp
!= NULL
&&
358 fmd_case_solved(hdl
, cc
->cc_cp
)))
361 if (cc
->cc_cp
== NULL
) {
362 cc
->cc_cp
= cmd_case_create(hdl
, &cpu
->cpu_header
, ed
->ed_pst
,
364 cc
->cc_serdnm
= cmd_cpu_serdnm_create(hdl
, cpu
,
365 ed
->ed_serd
->cs_name
);
367 fmd_serd_create(hdl
, cc
->cc_serdnm
, ed
->ed_serd
->cs_n
,
371 fmd_hdl_debug(hdl
, "adding event to %s\n", cc
->cc_serdnm
);
373 if (fmd_serd_record(hdl
, cc
->cc_serdnm
, ep
) == FMD_B_FALSE
)
374 return; /* serd engine hasn't fired yet */
376 if (xr
->xr_rsrc_nvl
!= NULL
&& nvlist_dup(xr
->xr_rsrc_nvl
,
378 fmd_hdl_abort(hdl
, "failed to duplicate resource FMRI for "
379 "%s fault", ed
->ed_fltnm
);
382 fmd_case_add_serd(hdl
, cc
->cc_cp
, cc
->cc_serdnm
);
383 cmd_cpu_create_faultlist(hdl
, cc
->cc_cp
, cpu
, ed
->ed_fltnm
, rsrc
, 100);
385 fmd_case_solve(hdl
, cc
->cc_cp
);
389 * We're back from the timeout. Check to see if this event was part of a train.
390 * If it was, make sure to only process the cause of the train. If not,
391 * process the event directly.
394 cmd_xxcu_resolve(fmd_hdl_t
*hdl
, cmd_xr_t
*xr
, fmd_event_t
*ep
,
404 if (xr
->xr_afar_status
== AFLT_STAT_VALID
)
407 if ((trw
= cmd_trw_lookup(xr
->xr_ena
,
408 xr
->xr_afar_status
, afar
)) == NULL
) {
409 fmd_hdl_debug(hdl
, "cmd_trw_lookup: Not found\n");
413 fmd_hdl_debug(hdl
, "found waiter with mask 0x%08llx\n", trw
->trw_mask
);
415 trw
->trw_flags
|= CMD_TRW_F_DELETING
;
418 * In sun4v, the matching train rule is changed. It matches only
419 * a portion of the train mask, so can't discard the rest of
420 * the error in the train mask.
423 if (trw
->trw_flags
& CMD_TRW_F_CAUSESEEN
) {
424 fmd_hdl_debug(hdl
, "cause already seen -- discarding\n");
429 if ((cause
= cmd_train_match(trw
->trw_mask
, xr
->xr_clcode
)) == 0) {
431 * We didn't match in a train, so we're going to process each
432 * event individually.
434 fmd_hdl_debug(hdl
, "didn't match in a train\n");
439 fmd_hdl_debug(hdl
, "found a match for train. cause is %llx, "
440 "this is %llx\n", cause
, xr
->xr_clcode
);
443 * We've got a train match. If this event is the cause of the train,
446 if (cause
== xr
->xr_clcode
) {
447 trw
->trw_flags
|= CMD_TRW_F_CAUSESEEN
;
452 cmd_trw_deref(hdl
, trw
);
456 cmd_xxc_resolve(fmd_hdl_t
*hdl
, cmd_xr_t
*xr
, fmd_event_t
*ep
)
458 cmd_xxcu_resolve(hdl
, xr
, ep
, cmd_xxc_hdlr
);
462 cmd_xxu_resolve(fmd_hdl_t
*hdl
, cmd_xr_t
*xr
, fmd_event_t
*ep
)
464 cmd_xxcu_resolve(hdl
, xr
, ep
, cmd_xxu_hdlr
);
468 cmd_nop_resolve(fmd_hdl_t
*hdl
, cmd_xr_t
*xr
, fmd_event_t
*ep
)
470 cmd_xxcu_resolve(hdl
, xr
, ep
, cmd_nop_hdlr
);
474 cmd_xxcu_initial(fmd_hdl_t
*hdl
, fmd_event_t
*ep
, nvlist_t
*nvl
,
475 const char *class, cmd_errcl_t clcode
, uint_t hdlrid
)
483 uint8_t level
= clcode
& CMD_ERRCL_LEVEL_EXTRACT
;
485 const errdata_t
*ed
= NULL
;
486 int ref_incremented
= 0;
488 clcode
&= CMD_ERRCL_LEVEL_MASK
; /* keep level bits out of train masks */
490 if ((cpu
= cmd_cpu_lookup_from_detector(hdl
, nvl
, class,
491 level
)) == NULL
|| cpu
->cpu_faulting
)
492 return (CMD_EVD_UNUSED
);
494 cmd_fill_errdata(clcode
, cpu
, &cc
, &ed
);
496 if (cc
->cc_cp
!= NULL
&& fmd_case_solved(hdl
, cc
->cc_cp
))
497 return (CMD_EVD_REDUND
);
499 (void) nvlist_lookup_uint64(nvl
, FM_EREPORT_ENA
, &ena
);
501 if (cmd_afar_valid(hdl
, nvl
, clcode
, &afar
) != 0) {
502 afar_status
= AFLT_STAT_INVALID
;
505 afar_status
= AFLT_STAT_VALID
;
508 fmd_hdl_debug(hdl
, "scheduling %s (%llx) for redelivery\n",
510 fmd_hdl_debug(hdl
, "looking up ena %llx,afar %llx with\n", ena
, afar
);
512 fmd_hdl_debug(hdl
, "afar status of %02x\n", afar_status
);
514 if ((trw
= cmd_trw_lookup(ena
, afar_status
, afar
)) == NULL
) {
515 if ((trw
= cmd_trw_alloc(ena
, afar
)) == NULL
) {
516 fmd_hdl_debug(hdl
, "failed to get new trw\n");
521 if (trw
->trw_flags
& CMD_TRW_F_DELETING
)
524 if (trw
->trw_mask
& clcode
) {
525 fmd_hdl_debug(hdl
, "clcode %llx is already in trw "
526 "(mask %llx)\n", clcode
, trw
->trw_mask
);
527 return (CMD_EVD_UNUSED
);
530 cmd_trw_ref(hdl
, trw
, clcode
);
533 fmd_hdl_debug(hdl
, "trw rescheduled for train delivery\n");
536 if ((xr
= cmd_xr_create(hdl
, ep
, nvl
, cpu
, clcode
)) == NULL
) {
537 fmd_hdl_debug(hdl
, "cmd_xr_create failed");
539 cmd_trw_deref(hdl
, trw
);
540 return (CMD_EVD_BAD
);
543 return (cmd_xr_reschedule(hdl
, xr
, hdlrid
));
548 cmd_xxu(fmd_hdl_t
*hdl
, fmd_event_t
*ep
, nvlist_t
*nvl
, const char *class,
551 return (cmd_xxcu_initial(hdl
, ep
, nvl
, class, clcode
, CMD_XR_HDLR_XXU
));
555 cmd_xxc(fmd_hdl_t
*hdl
, fmd_event_t
*ep
, nvlist_t
*nvl
, const char *class,
558 return (cmd_xxcu_initial(hdl
, ep
, nvl
, class, clcode
, CMD_XR_HDLR_XXC
));
562 cmd_nop_train(fmd_hdl_t
*hdl
, fmd_event_t
*ep
, nvlist_t
*nvl
,
563 const char *class, cmd_errcl_t clcode
)
565 return (cmd_xxcu_initial(hdl
, ep
, nvl
, class, clcode
, CMD_XR_HDLR_NOP
));
569 cmd_miscregs_train(fmd_hdl_t
*hdl
, fmd_event_t
*ep
, nvlist_t
*nvl
,
570 const char *class, cmd_errcl_t clcode
)
572 return (cmd_xxcu_initial(hdl
, ep
, nvl
, class, clcode
,
577 cmd_cpuerr_close(fmd_hdl_t
*hdl
, void *arg
)
579 cmd_cpu_destroy(hdl
, arg
);