4 * Copyright (c) 2006 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1 as published by the Free Software Foundation.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, see <http://www.gnu.org/licenses/>
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
22 #include "qemu/osdep.h"
24 #include "hw/acpi/acpi.h"
25 #include "hw/nvram/fw_cfg.h"
26 #include "qemu/config-file.h"
27 #include "qapi/error.h"
28 #include "qapi/opts-visitor.h"
29 #include "qapi/qapi-events-run-state.h"
30 #include "qapi/qapi-visit-acpi.h"
31 #include "qemu/error-report.h"
32 #include "qemu/module.h"
33 #include "qemu/option.h"
34 #include "sysemu/runstate.h"
37 struct acpi_table_header
{
38 uint16_t _length
; /* our length, not actual part of the hdr */
39 /* allows easier parsing for fw_cfg clients */
41 QEMU_NONSTRING
; /* ACPI signature (4 ASCII characters) */
42 uint32_t length
; /* Length of table, in bytes, including header */
43 uint8_t revision
; /* ACPI Specification minor version # */
44 uint8_t checksum
; /* To make sum of entire table == 0 */
46 QEMU_NONSTRING
; /* OEM identification */
48 QEMU_NONSTRING
; /* OEM table identification */
49 uint32_t oem_revision
; /* OEM revision number */
50 char asl_compiler_id
[4]
51 QEMU_NONSTRING
; /* ASL compiler vendor ID */
52 uint32_t asl_compiler_revision
; /* ASL compiler revision number */
55 #define ACPI_TABLE_HDR_SIZE sizeof(struct acpi_table_header)
56 #define ACPI_TABLE_PFX_SIZE sizeof(uint16_t) /* size of the extra prefix */
58 static const char unsigned dfl_hdr
[ACPI_TABLE_HDR_SIZE
- ACPI_TABLE_PFX_SIZE
] =
59 "QEMU\0\0\0\0\1\0" /* sig (4), len(4), revno (1), csum (1) */
60 "QEMUQEQEMUQEMU\1\0\0\0" /* OEM id (6), table (8), revno (4) */
61 "QEMU\1\0\0\0" /* ASL compiler ID (4), version (4) */
64 char unsigned *acpi_tables
;
65 size_t acpi_tables_len
;
67 static QemuOptsList qemu_acpi_opts
= {
69 .implied_opt_name
= "data",
70 .head
= QTAILQ_HEAD_INITIALIZER(qemu_acpi_opts
.head
),
71 .desc
= { { 0 } } /* validated with OptsVisitor */
74 static void acpi_register_config(void)
76 qemu_add_opts(&qemu_acpi_opts
);
79 opts_init(acpi_register_config
);
81 static int acpi_checksum(const uint8_t *data
, int len
)
85 for (i
= 0; i
< len
; i
++) {
92 /* Install a copy of the ACPI table specified in @blob.
94 * If @has_header is set, @blob starts with the System Description Table Header
95 * structure. Otherwise, "dfl_hdr" is prepended. In any case, each header field
96 * is optionally overwritten from @hdrs.
98 * It is valid to call this function with
99 * (@blob == NULL && bloblen == 0 && !has_header).
101 * @hdrs->file and @hdrs->data are ignored.
103 * SIZE_MAX is considered "infinity" in this function.
105 * The number of tables that can be installed is not limited, but the 16-bit
106 * counter at the beginning of "acpi_tables" wraps around after UINT16_MAX.
108 static void acpi_table_install(const char unsigned *blob
, size_t bloblen
,
110 const struct AcpiTableOptions
*hdrs
,
114 const char unsigned *hdr_src
;
115 size_t body_size
, acpi_payload_size
;
116 struct acpi_table_header
*ext_hdr
;
117 unsigned changed_fields
;
119 /* Calculate where the ACPI table body starts within the blob, plus where
120 * to copy the ACPI table header from.
123 /* _length | ACPI header in blob | blob body
124 * ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^
125 * ACPI_TABLE_PFX_SIZE sizeof dfl_hdr body_size
128 * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
129 * acpi_payload_size == bloblen
131 body_start
= sizeof dfl_hdr
;
133 if (bloblen
< body_start
) {
134 error_setg(errp
, "ACPI table claiming to have header is too "
135 "short, available: %zu, expected: %zu", bloblen
,
141 /* _length | ACPI header in template | blob body
142 * ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^
143 * ACPI_TABLE_PFX_SIZE sizeof dfl_hdr body_size
146 * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
152 body_size
= bloblen
- body_start
;
153 acpi_payload_size
= sizeof dfl_hdr
+ body_size
;
155 if (acpi_payload_size
> UINT16_MAX
) {
156 error_setg(errp
, "ACPI table too big, requested: %zu, max: %u",
157 acpi_payload_size
, (unsigned)UINT16_MAX
);
161 /* We won't fail from here on. Initialize / extend the globals. */
162 if (acpi_tables
== NULL
) {
163 acpi_tables_len
= sizeof(uint16_t);
164 acpi_tables
= g_malloc0(acpi_tables_len
);
167 acpi_tables
= g_realloc(acpi_tables
, acpi_tables_len
+
168 ACPI_TABLE_PFX_SIZE
+
169 sizeof dfl_hdr
+ body_size
);
171 ext_hdr
= (struct acpi_table_header
*)(acpi_tables
+ acpi_tables_len
);
172 acpi_tables_len
+= ACPI_TABLE_PFX_SIZE
;
174 memcpy(acpi_tables
+ acpi_tables_len
, hdr_src
, sizeof dfl_hdr
);
175 acpi_tables_len
+= sizeof dfl_hdr
;
178 memcpy(acpi_tables
+ acpi_tables_len
, blob
+ body_start
, body_size
);
179 acpi_tables_len
+= body_size
;
182 /* increase number of tables */
183 stw_le_p(acpi_tables
, lduw_le_p(acpi_tables
) + 1u);
185 /* Update the header fields. The strings need not be NUL-terminated. */
187 ext_hdr
->_length
= cpu_to_le16(acpi_payload_size
);
190 strncpy(ext_hdr
->sig
, hdrs
->sig
, sizeof ext_hdr
->sig
);
194 if (has_header
&& le32_to_cpu(ext_hdr
->length
) != acpi_payload_size
) {
195 warn_report("ACPI table has wrong length, header says "
196 "%" PRIu32
", actual size %zu bytes",
197 le32_to_cpu(ext_hdr
->length
), acpi_payload_size
);
199 ext_hdr
->length
= cpu_to_le32(acpi_payload_size
);
202 ext_hdr
->revision
= hdrs
->rev
;
206 ext_hdr
->checksum
= 0;
209 strncpy(ext_hdr
->oem_id
, hdrs
->oem_id
, sizeof ext_hdr
->oem_id
);
212 if (hdrs
->oem_table_id
) {
213 strncpy(ext_hdr
->oem_table_id
, hdrs
->oem_table_id
,
214 sizeof ext_hdr
->oem_table_id
);
217 if (hdrs
->has_oem_rev
) {
218 ext_hdr
->oem_revision
= cpu_to_le32(hdrs
->oem_rev
);
221 if (hdrs
->asl_compiler_id
) {
222 strncpy(ext_hdr
->asl_compiler_id
, hdrs
->asl_compiler_id
,
223 sizeof ext_hdr
->asl_compiler_id
);
226 if (hdrs
->has_asl_compiler_rev
) {
227 ext_hdr
->asl_compiler_revision
= cpu_to_le32(hdrs
->asl_compiler_rev
);
231 if (!has_header
&& changed_fields
== 0) {
232 warn_report("ACPI table: no headers are specified");
235 /* recalculate checksum */
236 ext_hdr
->checksum
= acpi_checksum((const char unsigned *)ext_hdr
+
237 ACPI_TABLE_PFX_SIZE
, acpi_payload_size
);
240 void acpi_table_add(const QemuOpts
*opts
, Error
**errp
)
242 AcpiTableOptions
*hdrs
= NULL
;
243 char **pathnames
= NULL
;
246 char unsigned *blob
= NULL
;
251 v
= opts_visitor_new(opts
);
252 visit_type_AcpiTableOptions(v
, NULL
, &hdrs
, errp
);
259 if (!hdrs
->file
== !hdrs
->data
) {
260 error_setg(errp
, "'-acpitable' requires one of 'data' or 'file'");
264 pathnames
= g_strsplit(hdrs
->file
?: hdrs
->data
, ":", 0);
265 if (pathnames
== NULL
|| pathnames
[0] == NULL
) {
266 error_setg(errp
, "'-acpitable' requires at least one pathname");
270 /* now read in the data files, reallocating buffer as needed */
271 for (cur
= pathnames
; *cur
; ++cur
) {
272 int fd
= open(*cur
, O_RDONLY
| O_BINARY
);
275 error_setg(errp
, "can't open file %s: %s", *cur
, strerror(errno
));
280 char unsigned data
[8192];
283 r
= read(fd
, data
, sizeof data
);
287 blob
= g_realloc(blob
, bloblen
+ r
);
288 memcpy(blob
+ bloblen
, data
, r
);
290 } else if (errno
!= EINTR
) {
291 error_setg(errp
, "can't read file %s: %s", *cur
,
301 acpi_table_install(blob
, bloblen
, !!hdrs
->file
, hdrs
, errp
);
305 g_strfreev(pathnames
);
306 qapi_free_AcpiTableOptions(hdrs
);
309 unsigned acpi_table_len(void *current
)
311 struct acpi_table_header
*hdr
= current
- sizeof(hdr
->_length
);
316 void *acpi_table_hdr(void *h
)
318 struct acpi_table_header
*hdr
= h
;
322 uint8_t *acpi_table_first(void)
327 return acpi_table_hdr(acpi_tables
+ ACPI_TABLE_PFX_SIZE
);
330 uint8_t *acpi_table_next(uint8_t *current
)
332 uint8_t *next
= current
+ acpi_table_len(current
);
334 if (next
- acpi_tables
>= acpi_tables_len
) {
337 return acpi_table_hdr(next
);
341 int acpi_get_slic_oem(AcpiSlicOem
*oem
)
345 for (u
= acpi_table_first(); u
; u
= acpi_table_next(u
)) {
346 struct acpi_table_header
*hdr
= (void *)(u
- sizeof(hdr
->_length
));
348 if (memcmp(hdr
->sig
, "SLIC", 4) == 0) {
349 oem
->id
= g_strndup(hdr
->oem_id
, 6);
350 oem
->table_id
= g_strndup(hdr
->oem_table_id
, 8);
357 static void acpi_notify_wakeup(Notifier
*notifier
, void *data
)
359 ACPIREGS
*ar
= container_of(notifier
, ACPIREGS
, wakeup
);
360 WakeupReason
*reason
= data
;
363 case QEMU_WAKEUP_REASON_RTC
:
365 (ACPI_BITMASK_WAKE_STATUS
| ACPI_BITMASK_RT_CLOCK_STATUS
);
367 case QEMU_WAKEUP_REASON_PMTIMER
:
369 (ACPI_BITMASK_WAKE_STATUS
| ACPI_BITMASK_TIMER_STATUS
);
371 case QEMU_WAKEUP_REASON_OTHER
:
372 /* ACPI_BITMASK_WAKE_STATUS should be set on resume.
373 Pretend that resume was caused by power button */
375 (ACPI_BITMASK_WAKE_STATUS
| ACPI_BITMASK_POWER_BUTTON_STATUS
);
383 uint16_t acpi_pm1_evt_get_sts(ACPIREGS
*ar
)
385 /* Compare ns-clock, not PM timer ticks, because
386 acpi_pm_tmr_update function uses ns for setting the timer. */
387 int64_t d
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
);
388 if (d
>= muldiv64(ar
->tmr
.overflow_time
,
389 NANOSECONDS_PER_SECOND
, PM_TIMER_FREQUENCY
)) {
390 ar
->pm1
.evt
.sts
|= ACPI_BITMASK_TIMER_STATUS
;
392 return ar
->pm1
.evt
.sts
;
395 static void acpi_pm1_evt_write_sts(ACPIREGS
*ar
, uint16_t val
)
397 uint16_t pm1_sts
= acpi_pm1_evt_get_sts(ar
);
398 if (pm1_sts
& val
& ACPI_BITMASK_TIMER_STATUS
) {
399 /* if TMRSTS is reset, then compute the new overflow time */
400 acpi_pm_tmr_calc_overflow_time(ar
);
402 ar
->pm1
.evt
.sts
&= ~val
;
405 static void acpi_pm1_evt_write_en(ACPIREGS
*ar
, uint16_t val
)
407 ar
->pm1
.evt
.en
= val
;
408 qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC
,
409 val
& ACPI_BITMASK_RT_CLOCK_ENABLE
);
410 qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_PMTIMER
,
411 val
& ACPI_BITMASK_TIMER_ENABLE
);
414 void acpi_pm1_evt_power_down(ACPIREGS
*ar
)
416 if (ar
->pm1
.evt
.en
& ACPI_BITMASK_POWER_BUTTON_ENABLE
) {
417 ar
->pm1
.evt
.sts
|= ACPI_BITMASK_POWER_BUTTON_STATUS
;
418 ar
->tmr
.update_sci(ar
);
422 void acpi_pm1_evt_reset(ACPIREGS
*ar
)
426 qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC
, 0);
427 qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_PMTIMER
, 0);
430 static uint64_t acpi_pm_evt_read(void *opaque
, hwaddr addr
, unsigned width
)
432 ACPIREGS
*ar
= opaque
;
435 return acpi_pm1_evt_get_sts(ar
);
437 return ar
->pm1
.evt
.en
;
443 static void acpi_pm_evt_write(void *opaque
, hwaddr addr
, uint64_t val
,
446 ACPIREGS
*ar
= opaque
;
449 acpi_pm1_evt_write_sts(ar
, val
);
450 ar
->pm1
.evt
.update_sci(ar
);
453 acpi_pm1_evt_write_en(ar
, val
);
454 ar
->pm1
.evt
.update_sci(ar
);
459 static const MemoryRegionOps acpi_pm_evt_ops
= {
460 .read
= acpi_pm_evt_read
,
461 .write
= acpi_pm_evt_write
,
462 .impl
.min_access_size
= 2,
463 .valid
.min_access_size
= 1,
464 .valid
.max_access_size
= 2,
465 .endianness
= DEVICE_LITTLE_ENDIAN
,
468 void acpi_pm1_evt_init(ACPIREGS
*ar
, acpi_update_sci_fn update_sci
,
469 MemoryRegion
*parent
)
471 ar
->pm1
.evt
.update_sci
= update_sci
;
472 memory_region_init_io(&ar
->pm1
.evt
.io
, memory_region_owner(parent
),
473 &acpi_pm_evt_ops
, ar
, "acpi-evt", 4);
474 memory_region_add_subregion(parent
, 0, &ar
->pm1
.evt
.io
);
478 void acpi_pm_tmr_update(ACPIREGS
*ar
, bool enable
)
482 /* schedule a timer interruption if needed */
484 expire_time
= muldiv64(ar
->tmr
.overflow_time
, NANOSECONDS_PER_SECOND
,
486 timer_mod(ar
->tmr
.timer
, expire_time
);
488 timer_del(ar
->tmr
.timer
);
492 static inline int64_t acpi_pm_tmr_get_clock(void)
494 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
), PM_TIMER_FREQUENCY
,
495 NANOSECONDS_PER_SECOND
);
498 void acpi_pm_tmr_calc_overflow_time(ACPIREGS
*ar
)
500 int64_t d
= acpi_pm_tmr_get_clock();
501 ar
->tmr
.overflow_time
= (d
+ 0x800000LL
) & ~0x7fffffLL
;
504 static uint32_t acpi_pm_tmr_get(ACPIREGS
*ar
)
506 uint32_t d
= acpi_pm_tmr_get_clock();
510 static void acpi_pm_tmr_timer(void *opaque
)
512 ACPIREGS
*ar
= opaque
;
514 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_PMTIMER
, NULL
);
515 ar
->tmr
.update_sci(ar
);
518 static uint64_t acpi_pm_tmr_read(void *opaque
, hwaddr addr
, unsigned width
)
520 return acpi_pm_tmr_get(opaque
);
523 static void acpi_pm_tmr_write(void *opaque
, hwaddr addr
, uint64_t val
,
529 static const MemoryRegionOps acpi_pm_tmr_ops
= {
530 .read
= acpi_pm_tmr_read
,
531 .write
= acpi_pm_tmr_write
,
532 .impl
.min_access_size
= 4,
533 .valid
.min_access_size
= 1,
534 .valid
.max_access_size
= 4,
535 .endianness
= DEVICE_LITTLE_ENDIAN
,
538 void acpi_pm_tmr_init(ACPIREGS
*ar
, acpi_update_sci_fn update_sci
,
539 MemoryRegion
*parent
)
541 ar
->tmr
.update_sci
= update_sci
;
542 ar
->tmr
.timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, acpi_pm_tmr_timer
, ar
);
543 memory_region_init_io(&ar
->tmr
.io
, memory_region_owner(parent
),
544 &acpi_pm_tmr_ops
, ar
, "acpi-tmr", 4);
545 memory_region_add_subregion(parent
, 8, &ar
->tmr
.io
);
548 void acpi_pm_tmr_reset(ACPIREGS
*ar
)
550 ar
->tmr
.overflow_time
= 0;
551 timer_del(ar
->tmr
.timer
);
555 void acpi_pm1_cnt_update(ACPIREGS
*ar
,
556 bool sci_enable
, bool sci_disable
)
558 /* ACPI specs 3.0, 4.7.2.5 */
559 if (ar
->pm1
.cnt
.acpi_only
) {
564 ar
->pm1
.cnt
.cnt
|= ACPI_BITMASK_SCI_ENABLE
;
565 } else if (sci_disable
) {
566 ar
->pm1
.cnt
.cnt
&= ~ACPI_BITMASK_SCI_ENABLE
;
570 static uint64_t acpi_pm_cnt_read(void *opaque
, hwaddr addr
, unsigned width
)
572 ACPIREGS
*ar
= opaque
;
573 return ar
->pm1
.cnt
.cnt
>> addr
* 8;
576 static void acpi_pm_cnt_write(void *opaque
, hwaddr addr
, uint64_t val
,
579 ACPIREGS
*ar
= opaque
;
582 val
= val
<< 8 | (ar
->pm1
.cnt
.cnt
& 0xff);
584 ar
->pm1
.cnt
.cnt
= val
& ~(ACPI_BITMASK_SLEEP_ENABLE
);
586 if (val
& ACPI_BITMASK_SLEEP_ENABLE
) {
587 /* change suspend type */
588 uint16_t sus_typ
= (val
>> 10) & 7;
590 case 0: /* soft power off */
591 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
594 qemu_system_suspend_request();
597 if (sus_typ
== ar
->pm1
.cnt
.s4_val
) { /* S4 request */
598 qapi_event_send_suspend_disk();
599 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
606 static const MemoryRegionOps acpi_pm_cnt_ops
= {
607 .read
= acpi_pm_cnt_read
,
608 .write
= acpi_pm_cnt_write
,
609 .impl
.min_access_size
= 2,
610 .valid
.min_access_size
= 1,
611 .valid
.max_access_size
= 2,
612 .endianness
= DEVICE_LITTLE_ENDIAN
,
615 void acpi_pm1_cnt_init(ACPIREGS
*ar
, MemoryRegion
*parent
,
616 bool disable_s3
, bool disable_s4
, uint8_t s4_val
,
621 ar
->pm1
.cnt
.s4_val
= s4_val
;
622 ar
->pm1
.cnt
.acpi_only
= acpi_only
;
623 ar
->wakeup
.notify
= acpi_notify_wakeup
;
624 qemu_register_wakeup_notifier(&ar
->wakeup
);
627 * Register wake-up support in QMP query-current-machine API
629 qemu_register_wakeup_support();
631 memory_region_init_io(&ar
->pm1
.cnt
.io
, memory_region_owner(parent
),
632 &acpi_pm_cnt_ops
, ar
, "acpi-cnt", 2);
633 memory_region_add_subregion(parent
, 4, &ar
->pm1
.cnt
.io
);
635 fw_cfg
= fw_cfg_find();
637 uint8_t suspend
[6] = {128, 0, 0, 129, 128, 128};
638 suspend
[3] = 1 | ((!disable_s3
) << 7);
639 suspend
[4] = s4_val
| ((!disable_s4
) << 7);
641 fw_cfg_add_file(fw_cfg
, "etc/system-states", g_memdup(suspend
, 6), 6);
645 void acpi_pm1_cnt_reset(ACPIREGS
*ar
)
648 if (ar
->pm1
.cnt
.acpi_only
) {
649 ar
->pm1
.cnt
.cnt
|= ACPI_BITMASK_SCI_ENABLE
;
654 void acpi_gpe_init(ACPIREGS
*ar
, uint8_t len
)
657 /* Only first len / 2 bytes are ever used,
658 * but the caller in ich9.c migrates full len bytes.
659 * TODO: fix ich9.c and drop the extra allocation.
661 ar
->gpe
.sts
= g_malloc0(len
);
662 ar
->gpe
.en
= g_malloc0(len
);
665 void acpi_gpe_reset(ACPIREGS
*ar
)
667 memset(ar
->gpe
.sts
, 0, ar
->gpe
.len
/ 2);
668 memset(ar
->gpe
.en
, 0, ar
->gpe
.len
/ 2);
671 static uint8_t *acpi_gpe_ioport_get_ptr(ACPIREGS
*ar
, uint32_t addr
)
675 if (addr
< ar
->gpe
.len
/ 2) {
676 cur
= ar
->gpe
.sts
+ addr
;
677 } else if (addr
< ar
->gpe
.len
) {
678 cur
= ar
->gpe
.en
+ addr
- ar
->gpe
.len
/ 2;
686 void acpi_gpe_ioport_writeb(ACPIREGS
*ar
, uint32_t addr
, uint32_t val
)
690 cur
= acpi_gpe_ioport_get_ptr(ar
, addr
);
691 if (addr
< ar
->gpe
.len
/ 2) {
692 trace_acpi_gpe_sts_ioport_writeb(addr
, val
);
694 *cur
= (*cur
) & ~val
;
695 } else if (addr
< ar
->gpe
.len
) {
696 trace_acpi_gpe_en_ioport_writeb(addr
- (ar
->gpe
.len
/ 2), val
);
704 uint32_t acpi_gpe_ioport_readb(ACPIREGS
*ar
, uint32_t addr
)
709 cur
= acpi_gpe_ioport_get_ptr(ar
, addr
);
715 if (addr
< ar
->gpe
.len
/ 2) {
716 trace_acpi_gpe_sts_ioport_readb(addr
, val
);
718 trace_acpi_gpe_en_ioport_readb(addr
- (ar
->gpe
.len
/ 2), val
);
724 void acpi_send_gpe_event(ACPIREGS
*ar
, qemu_irq irq
,
725 AcpiEventStatusBits status
)
727 ar
->gpe
.sts
[0] |= status
;
728 acpi_update_sci(ar
, irq
);
731 void acpi_update_sci(ACPIREGS
*regs
, qemu_irq irq
)
733 int sci_level
, pm1a_sts
;
735 pm1a_sts
= acpi_pm1_evt_get_sts(regs
);
737 sci_level
= ((pm1a_sts
&
738 regs
->pm1
.evt
.en
& ACPI_BITMASK_PM1_COMMON_ENABLED
) != 0) ||
739 ((regs
->gpe
.sts
[0] & regs
->gpe
.en
[0]) != 0);
741 qemu_set_irq(irq
, sci_level
);
743 /* schedule a timer interruption if needed */
744 acpi_pm_tmr_update(regs
,
745 (regs
->pm1
.evt
.en
& ACPI_BITMASK_TIMER_ENABLE
) &&
746 !(pm1a_sts
& ACPI_BITMASK_TIMER_STATUS
));