2 * ec.c - ACPI Embedded Controller Driver (v3)
4 * Copyright (C) 2001-2015 Intel Corporation
5 * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
6 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
7 * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
8 * 2004 Luming Yu <luming.yu@intel.com>
9 * 2001, 2002 Andy Grover <andrew.grover@intel.com>
10 * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
11 * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
13 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or (at
18 * your option) any later version.
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 /* Uncomment next line to get verbose printout */
30 #define pr_fmt(fmt) "ACPI : EC: " fmt
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/types.h>
36 #include <linux/delay.h>
37 #include <linux/interrupt.h>
38 #include <linux/list.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/acpi.h>
42 #include <linux/dmi.h>
47 #define ACPI_EC_CLASS "embedded_controller"
48 #define ACPI_EC_DEVICE_NAME "Embedded Controller"
49 #define ACPI_EC_FILE_INFO "info"
51 /* EC status register */
52 #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
53 #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
54 #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
55 #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
56 #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
59 * The SCI_EVT clearing timing is not defined by the ACPI specification.
60 * This leads to lots of practical timing issues for the host EC driver.
61 * The following variations are defined (from the target EC firmware's
63 * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
64 * target can clear SCI_EVT at any time so long as the host can see
65 * the indication by reading the status register (EC_SC). So the
66 * host should re-check SCI_EVT after the first time the SCI_EVT
67 * indication is seen, which is the same time the query request
68 * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
69 * at any later time could indicate another event. Normally such
70 * kind of EC firmware has implemented an event queue and will
71 * return 0x00 to indicate "no outstanding event".
72 * QUERY: After seeing the query request (QR_EC) written to the command
73 * register (EC_CMD) by the host and having prepared the responding
74 * event value in the data register (EC_DATA), the target can safely
75 * clear SCI_EVT because the target can confirm that the current
76 * event is being handled by the host. The host then should check
77 * SCI_EVT right after reading the event response from the data
79 * EVENT: After seeing the event response read from the data register
80 * (EC_DATA) by the host, the target can clear SCI_EVT. As the
81 * target requires time to notice the change in the data register
82 * (EC_DATA), the host may be required to wait additional guarding
83 * time before checking the SCI_EVT again. Such guarding may not be
84 * necessary if the host is notified via another IRQ.
86 #define ACPI_EC_EVT_TIMING_STATUS 0x00
87 #define ACPI_EC_EVT_TIMING_QUERY 0x01
88 #define ACPI_EC_EVT_TIMING_EVENT 0x02
92 ACPI_EC_COMMAND_READ
= 0x80,
93 ACPI_EC_COMMAND_WRITE
= 0x81,
94 ACPI_EC_BURST_ENABLE
= 0x82,
95 ACPI_EC_BURST_DISABLE
= 0x83,
96 ACPI_EC_COMMAND_QUERY
= 0x84,
99 #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
100 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
101 #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
102 #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
103 * when trying to clear the EC */
104 #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
107 EC_FLAGS_QUERY_PENDING
, /* Query is pending */
108 EC_FLAGS_QUERY_GUARDING
, /* Guard for SCI_EVT check */
109 EC_FLAGS_HANDLERS_INSTALLED
, /* Handlers for GPE and
110 * OpReg are installed */
111 EC_FLAGS_STARTED
, /* Driver is started */
112 EC_FLAGS_STOPPED
, /* Driver is stopped */
113 EC_FLAGS_COMMAND_STORM
, /* GPE storms occurred to the
114 * current command processing */
117 #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
118 #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
120 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
121 static unsigned int ec_delay __read_mostly
= ACPI_EC_DELAY
;
122 module_param(ec_delay
, uint
, 0644);
123 MODULE_PARM_DESC(ec_delay
, "Timeout(ms) waited until an EC command completes");
125 static unsigned int ec_max_queries __read_mostly
= ACPI_EC_MAX_QUERIES
;
126 module_param(ec_max_queries
, uint
, 0644);
127 MODULE_PARM_DESC(ec_max_queries
, "Maximum parallel _Qxx evaluations");
129 static bool ec_busy_polling __read_mostly
;
130 module_param(ec_busy_polling
, bool, 0644);
131 MODULE_PARM_DESC(ec_busy_polling
, "Use busy polling to advance EC transaction");
133 static unsigned int ec_polling_guard __read_mostly
= ACPI_EC_UDELAY_POLL
;
134 module_param(ec_polling_guard
, uint
, 0644);
135 MODULE_PARM_DESC(ec_polling_guard
, "Guard time(us) between EC accesses in polling modes");
137 static unsigned int ec_event_clearing __read_mostly
= ACPI_EC_EVT_TIMING_QUERY
;
140 * If the number of false interrupts per one transaction exceeds
141 * this threshold, will think there is a GPE storm happened and
142 * will disable the GPE for normal transaction.
144 static unsigned int ec_storm_threshold __read_mostly
= 8;
145 module_param(ec_storm_threshold
, uint
, 0644);
146 MODULE_PARM_DESC(ec_storm_threshold
, "Maxim false GPE numbers not considered as GPE storm");
148 struct acpi_ec_query_handler
{
149 struct list_head node
;
150 acpi_ec_query_func func
;
160 unsigned short irq_count
;
169 struct acpi_ec_query
{
170 struct transaction transaction
;
171 struct work_struct work
;
172 struct acpi_ec_query_handler
*handler
;
175 static int acpi_ec_query(struct acpi_ec
*ec
, u8
*data
);
176 static void advance_transaction(struct acpi_ec
*ec
);
177 static void acpi_ec_event_handler(struct work_struct
*work
);
178 static void acpi_ec_event_processor(struct work_struct
*work
);
180 struct acpi_ec
*boot_ec
, *first_ec
;
181 EXPORT_SYMBOL(first_ec
);
182 static struct workqueue_struct
*ec_query_wq
;
184 static int EC_FLAGS_VALIDATE_ECDT
; /* ASUStec ECDTs need to be validated */
185 static int EC_FLAGS_SKIP_DSDT_SCAN
; /* Not all BIOS survive early DSDT scan */
186 static int EC_FLAGS_CLEAR_ON_RESUME
; /* Needs acpi_ec_clear() on boot/resume */
187 static int EC_FLAGS_QUERY_HANDSHAKE
; /* Needs QR_EC issued when SCI_EVT set */
189 /* --------------------------------------------------------------------------
191 * -------------------------------------------------------------------------- */
194 * Splitters used by the developers to track the boundary of the EC
195 * handling processes.
198 #define EC_DBG_SEP " "
199 #define EC_DBG_DRV "+++++"
200 #define EC_DBG_STM "====="
201 #define EC_DBG_REQ "*****"
202 #define EC_DBG_EVT "#####"
204 #define EC_DBG_SEP ""
211 #define ec_log_raw(fmt, ...) \
212 pr_info(fmt "\n", ##__VA_ARGS__)
213 #define ec_dbg_raw(fmt, ...) \
214 pr_debug(fmt "\n", ##__VA_ARGS__)
215 #define ec_log(filter, fmt, ...) \
216 ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
217 #define ec_dbg(filter, fmt, ...) \
218 ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
220 #define ec_log_drv(fmt, ...) \
221 ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
222 #define ec_dbg_drv(fmt, ...) \
223 ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
224 #define ec_dbg_stm(fmt, ...) \
225 ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
226 #define ec_dbg_req(fmt, ...) \
227 ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
228 #define ec_dbg_evt(fmt, ...) \
229 ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
230 #define ec_dbg_ref(ec, fmt, ...) \
231 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
233 /* --------------------------------------------------------------------------
235 * -------------------------------------------------------------------------- */
237 static bool acpi_ec_started(struct acpi_ec
*ec
)
239 return test_bit(EC_FLAGS_STARTED
, &ec
->flags
) &&
240 !test_bit(EC_FLAGS_STOPPED
, &ec
->flags
);
243 static bool acpi_ec_flushed(struct acpi_ec
*ec
)
245 return ec
->reference_count
== 1;
248 /* --------------------------------------------------------------------------
250 * -------------------------------------------------------------------------- */
252 static inline u8
acpi_ec_read_status(struct acpi_ec
*ec
)
254 u8 x
= inb(ec
->command_addr
);
256 ec_dbg_raw("EC_SC(R) = 0x%2.2x "
257 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
259 !!(x
& ACPI_EC_FLAG_SCI
),
260 !!(x
& ACPI_EC_FLAG_BURST
),
261 !!(x
& ACPI_EC_FLAG_CMD
),
262 !!(x
& ACPI_EC_FLAG_IBF
),
263 !!(x
& ACPI_EC_FLAG_OBF
));
267 static inline u8
acpi_ec_read_data(struct acpi_ec
*ec
)
269 u8 x
= inb(ec
->data_addr
);
271 ec
->timestamp
= jiffies
;
272 ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x
);
276 static inline void acpi_ec_write_cmd(struct acpi_ec
*ec
, u8 command
)
278 ec_dbg_raw("EC_SC(W) = 0x%2.2x", command
);
279 outb(command
, ec
->command_addr
);
280 ec
->timestamp
= jiffies
;
283 static inline void acpi_ec_write_data(struct acpi_ec
*ec
, u8 data
)
285 ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data
);
286 outb(data
, ec
->data_addr
);
287 ec
->timestamp
= jiffies
;
291 static const char *acpi_ec_cmd_string(u8 cmd
)
308 #define acpi_ec_cmd_string(cmd) "UNDEF"
311 /* --------------------------------------------------------------------------
313 * -------------------------------------------------------------------------- */
315 static inline bool acpi_ec_is_gpe_raised(struct acpi_ec
*ec
)
317 acpi_event_status gpe_status
= 0;
319 (void)acpi_get_gpe_status(NULL
, ec
->gpe
, &gpe_status
);
320 return (gpe_status
& ACPI_EVENT_FLAG_STATUS_SET
) ? true : false;
323 static inline void acpi_ec_enable_gpe(struct acpi_ec
*ec
, bool open
)
326 acpi_enable_gpe(NULL
, ec
->gpe
);
328 BUG_ON(ec
->reference_count
< 1);
329 acpi_set_gpe(NULL
, ec
->gpe
, ACPI_GPE_ENABLE
);
331 if (acpi_ec_is_gpe_raised(ec
)) {
333 * On some platforms, EN=1 writes cannot trigger GPE. So
334 * software need to manually trigger a pseudo GPE event on
337 ec_dbg_raw("Polling quirk");
338 advance_transaction(ec
);
342 static inline void acpi_ec_disable_gpe(struct acpi_ec
*ec
, bool close
)
345 acpi_disable_gpe(NULL
, ec
->gpe
);
347 BUG_ON(ec
->reference_count
< 1);
348 acpi_set_gpe(NULL
, ec
->gpe
, ACPI_GPE_DISABLE
);
352 static inline void acpi_ec_clear_gpe(struct acpi_ec
*ec
)
355 * GPE STS is a W1C register, which means:
356 * 1. Software can clear it without worrying about clearing other
357 * GPEs' STS bits when the hardware sets them in parallel.
358 * 2. As long as software can ensure only clearing it when it is
359 * set, hardware won't set it in parallel.
360 * So software can clear GPE in any contexts.
361 * Warning: do not move the check into advance_transaction() as the
362 * EC commands will be sent without GPE raised.
364 if (!acpi_ec_is_gpe_raised(ec
))
366 acpi_clear_gpe(NULL
, ec
->gpe
);
369 /* --------------------------------------------------------------------------
370 * Transaction Management
371 * -------------------------------------------------------------------------- */
373 static void acpi_ec_submit_request(struct acpi_ec
*ec
)
375 ec
->reference_count
++;
376 if (ec
->reference_count
== 1)
377 acpi_ec_enable_gpe(ec
, true);
380 static void acpi_ec_complete_request(struct acpi_ec
*ec
)
382 bool flushed
= false;
384 ec
->reference_count
--;
385 if (ec
->reference_count
== 0)
386 acpi_ec_disable_gpe(ec
, true);
387 flushed
= acpi_ec_flushed(ec
);
392 static void acpi_ec_set_storm(struct acpi_ec
*ec
, u8 flag
)
394 if (!test_bit(flag
, &ec
->flags
)) {
395 acpi_ec_disable_gpe(ec
, false);
396 ec_dbg_drv("Polling enabled");
397 set_bit(flag
, &ec
->flags
);
401 static void acpi_ec_clear_storm(struct acpi_ec
*ec
, u8 flag
)
403 if (test_bit(flag
, &ec
->flags
)) {
404 clear_bit(flag
, &ec
->flags
);
405 acpi_ec_enable_gpe(ec
, false);
406 ec_dbg_drv("Polling disabled");
411 * acpi_ec_submit_flushable_request() - Increase the reference count unless
412 * the flush operation is not in
416 * This function must be used before taking a new action that should hold
417 * the reference count. If this function returns false, then the action
418 * must be discarded or it will prevent the flush operation from being
421 static bool acpi_ec_submit_flushable_request(struct acpi_ec
*ec
)
423 if (!acpi_ec_started(ec
))
425 acpi_ec_submit_request(ec
);
429 static void acpi_ec_submit_query(struct acpi_ec
*ec
)
431 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING
, &ec
->flags
)) {
432 ec_dbg_evt("Command(%s) submitted/blocked",
433 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY
));
434 ec
->nr_pending_queries
++;
435 schedule_work(&ec
->work
);
439 static void acpi_ec_complete_query(struct acpi_ec
*ec
)
441 if (test_bit(EC_FLAGS_QUERY_PENDING
, &ec
->flags
)) {
442 clear_bit(EC_FLAGS_QUERY_PENDING
, &ec
->flags
);
443 ec_dbg_evt("Command(%s) unblocked",
444 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY
));
448 static bool acpi_ec_guard_event(struct acpi_ec
*ec
)
453 spin_lock_irqsave(&ec
->lock
, flags
);
455 * If firmware SCI_EVT clearing timing is "event", we actually
456 * don't know when the SCI_EVT will be cleared by firmware after
457 * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
460 * The guarding period begins when EC_FLAGS_QUERY_PENDING is
461 * flagged, which means SCI_EVT check has just been performed.
462 * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
463 * guarding should have already been performed (via
464 * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
465 * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
466 * ACPI_EC_COMMAND_POLL state immediately.
468 if (ec_event_clearing
== ACPI_EC_EVT_TIMING_STATUS
||
469 ec_event_clearing
== ACPI_EC_EVT_TIMING_QUERY
||
470 !test_bit(EC_FLAGS_QUERY_PENDING
, &ec
->flags
) ||
471 (ec
->curr
&& ec
->curr
->command
== ACPI_EC_COMMAND_QUERY
))
473 spin_unlock_irqrestore(&ec
->lock
, flags
);
477 static int ec_transaction_polled(struct acpi_ec
*ec
)
482 spin_lock_irqsave(&ec
->lock
, flags
);
483 if (ec
->curr
&& (ec
->curr
->flags
& ACPI_EC_COMMAND_POLL
))
485 spin_unlock_irqrestore(&ec
->lock
, flags
);
489 static int ec_transaction_completed(struct acpi_ec
*ec
)
494 spin_lock_irqsave(&ec
->lock
, flags
);
495 if (ec
->curr
&& (ec
->curr
->flags
& ACPI_EC_COMMAND_COMPLETE
))
497 spin_unlock_irqrestore(&ec
->lock
, flags
);
501 static inline void ec_transaction_transition(struct acpi_ec
*ec
, unsigned long flag
)
503 ec
->curr
->flags
|= flag
;
504 if (ec
->curr
->command
== ACPI_EC_COMMAND_QUERY
) {
505 if (ec_event_clearing
== ACPI_EC_EVT_TIMING_STATUS
&&
506 flag
== ACPI_EC_COMMAND_POLL
)
507 acpi_ec_complete_query(ec
);
508 if (ec_event_clearing
== ACPI_EC_EVT_TIMING_QUERY
&&
509 flag
== ACPI_EC_COMMAND_COMPLETE
)
510 acpi_ec_complete_query(ec
);
511 if (ec_event_clearing
== ACPI_EC_EVT_TIMING_EVENT
&&
512 flag
== ACPI_EC_COMMAND_COMPLETE
)
513 set_bit(EC_FLAGS_QUERY_GUARDING
, &ec
->flags
);
517 static void advance_transaction(struct acpi_ec
*ec
)
519 struct transaction
*t
;
523 ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
526 * By always clearing STS before handling all indications, we can
527 * ensure a hardware STS 0->1 change after this clearing can always
528 * trigger a GPE interrupt.
530 acpi_ec_clear_gpe(ec
);
531 status
= acpi_ec_read_status(ec
);
534 * Another IRQ or a guarded polling mode advancement is detected,
535 * the next QR_EC submission is then allowed.
537 if (!t
|| !(t
->flags
& ACPI_EC_COMMAND_POLL
)) {
538 if (ec_event_clearing
== ACPI_EC_EVT_TIMING_EVENT
&&
539 (!ec
->nr_pending_queries
||
540 test_bit(EC_FLAGS_QUERY_GUARDING
, &ec
->flags
))) {
541 clear_bit(EC_FLAGS_QUERY_GUARDING
, &ec
->flags
);
542 acpi_ec_complete_query(ec
);
547 if (t
->flags
& ACPI_EC_COMMAND_POLL
) {
548 if (t
->wlen
> t
->wi
) {
549 if ((status
& ACPI_EC_FLAG_IBF
) == 0)
550 acpi_ec_write_data(ec
, t
->wdata
[t
->wi
++]);
553 } else if (t
->rlen
> t
->ri
) {
554 if ((status
& ACPI_EC_FLAG_OBF
) == 1) {
555 t
->rdata
[t
->ri
++] = acpi_ec_read_data(ec
);
556 if (t
->rlen
== t
->ri
) {
557 ec_transaction_transition(ec
, ACPI_EC_COMMAND_COMPLETE
);
558 if (t
->command
== ACPI_EC_COMMAND_QUERY
)
559 ec_dbg_evt("Command(%s) completed by hardware",
560 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY
));
565 } else if (t
->wlen
== t
->wi
&&
566 (status
& ACPI_EC_FLAG_IBF
) == 0) {
567 ec_transaction_transition(ec
, ACPI_EC_COMMAND_COMPLETE
);
572 if (EC_FLAGS_QUERY_HANDSHAKE
&&
573 !(status
& ACPI_EC_FLAG_SCI
) &&
574 (t
->command
== ACPI_EC_COMMAND_QUERY
)) {
575 ec_transaction_transition(ec
, ACPI_EC_COMMAND_POLL
);
576 t
->rdata
[t
->ri
++] = 0x00;
577 ec_transaction_transition(ec
, ACPI_EC_COMMAND_COMPLETE
);
578 ec_dbg_evt("Command(%s) completed by software",
579 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY
));
581 } else if ((status
& ACPI_EC_FLAG_IBF
) == 0) {
582 acpi_ec_write_cmd(ec
, t
->command
);
583 ec_transaction_transition(ec
, ACPI_EC_COMMAND_POLL
);
590 * If SCI bit is set, then don't think it's a false IRQ
591 * otherwise will take a not handled IRQ as a false one.
593 if (!(status
& ACPI_EC_FLAG_SCI
)) {
594 if (in_interrupt() && t
) {
595 if (t
->irq_count
< ec_storm_threshold
)
597 /* Allow triggering on 0 threshold */
598 if (t
->irq_count
== ec_storm_threshold
)
599 acpi_ec_set_storm(ec
, EC_FLAGS_COMMAND_STORM
);
603 if (status
& ACPI_EC_FLAG_SCI
)
604 acpi_ec_submit_query(ec
);
605 if (wakeup
&& in_interrupt())
609 static void start_transaction(struct acpi_ec
*ec
)
611 ec
->curr
->irq_count
= ec
->curr
->wi
= ec
->curr
->ri
= 0;
615 static int ec_guard(struct acpi_ec
*ec
)
617 unsigned long guard
= usecs_to_jiffies(ec_polling_guard
);
618 unsigned long timeout
= ec
->timestamp
+ guard
;
620 /* Ensure guarding period before polling EC status */
622 if (ec_busy_polling
) {
623 /* Perform busy polling */
624 if (ec_transaction_completed(ec
))
626 udelay(jiffies_to_usecs(guard
));
629 * Perform wait polling
630 * 1. Wait the transaction to be completed by the
631 * GPE handler after the transaction enters
632 * ACPI_EC_COMMAND_POLL state.
633 * 2. A special guarding logic is also required
634 * for event clearing mode "event" before the
635 * transaction enters ACPI_EC_COMMAND_POLL
638 if (!ec_transaction_polled(ec
) &&
639 !acpi_ec_guard_event(ec
))
641 if (wait_event_timeout(ec
->wait
,
642 ec_transaction_completed(ec
),
646 } while (time_before(jiffies
, timeout
));
650 static int ec_poll(struct acpi_ec
*ec
)
653 int repeat
= 5; /* number of command restarts */
656 unsigned long delay
= jiffies
+
657 msecs_to_jiffies(ec_delay
);
661 spin_lock_irqsave(&ec
->lock
, flags
);
662 advance_transaction(ec
);
663 spin_unlock_irqrestore(&ec
->lock
, flags
);
664 } while (time_before(jiffies
, delay
));
665 pr_debug("controller reset, restart transaction\n");
666 spin_lock_irqsave(&ec
->lock
, flags
);
667 start_transaction(ec
);
668 spin_unlock_irqrestore(&ec
->lock
, flags
);
673 static int acpi_ec_transaction_unlocked(struct acpi_ec
*ec
,
674 struct transaction
*t
)
679 /* start transaction */
680 spin_lock_irqsave(&ec
->lock
, tmp
);
681 /* Enable GPE for command processing (IBF=0/OBF=1) */
682 if (!acpi_ec_submit_flushable_request(ec
)) {
686 ec_dbg_ref(ec
, "Increase command");
687 /* following two actions should be kept atomic */
689 ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t
->command
));
690 start_transaction(ec
);
691 spin_unlock_irqrestore(&ec
->lock
, tmp
);
695 spin_lock_irqsave(&ec
->lock
, tmp
);
696 if (t
->irq_count
== ec_storm_threshold
)
697 acpi_ec_clear_storm(ec
, EC_FLAGS_COMMAND_STORM
);
698 ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t
->command
));
700 /* Disable GPE for command processing (IBF=0/OBF=1) */
701 acpi_ec_complete_request(ec
);
702 ec_dbg_ref(ec
, "Decrease command");
704 spin_unlock_irqrestore(&ec
->lock
, tmp
);
708 static int acpi_ec_transaction(struct acpi_ec
*ec
, struct transaction
*t
)
713 if (!ec
|| (!t
) || (t
->wlen
&& !t
->wdata
) || (t
->rlen
&& !t
->rdata
))
716 memset(t
->rdata
, 0, t
->rlen
);
718 mutex_lock(&ec
->mutex
);
719 if (ec
->global_lock
) {
720 status
= acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK
, &glk
);
721 if (ACPI_FAILURE(status
)) {
727 status
= acpi_ec_transaction_unlocked(ec
, t
);
730 acpi_release_global_lock(glk
);
732 mutex_unlock(&ec
->mutex
);
736 static int acpi_ec_burst_enable(struct acpi_ec
*ec
)
739 struct transaction t
= {.command
= ACPI_EC_BURST_ENABLE
,
740 .wdata
= NULL
, .rdata
= &d
,
741 .wlen
= 0, .rlen
= 1};
743 return acpi_ec_transaction(ec
, &t
);
746 static int acpi_ec_burst_disable(struct acpi_ec
*ec
)
748 struct transaction t
= {.command
= ACPI_EC_BURST_DISABLE
,
749 .wdata
= NULL
, .rdata
= NULL
,
750 .wlen
= 0, .rlen
= 0};
752 return (acpi_ec_read_status(ec
) & ACPI_EC_FLAG_BURST
) ?
753 acpi_ec_transaction(ec
, &t
) : 0;
756 static int acpi_ec_read(struct acpi_ec
*ec
, u8 address
, u8
*data
)
760 struct transaction t
= {.command
= ACPI_EC_COMMAND_READ
,
761 .wdata
= &address
, .rdata
= &d
,
762 .wlen
= 1, .rlen
= 1};
764 result
= acpi_ec_transaction(ec
, &t
);
769 static int acpi_ec_write(struct acpi_ec
*ec
, u8 address
, u8 data
)
771 u8 wdata
[2] = { address
, data
};
772 struct transaction t
= {.command
= ACPI_EC_COMMAND_WRITE
,
773 .wdata
= wdata
, .rdata
= NULL
,
774 .wlen
= 2, .rlen
= 0};
776 return acpi_ec_transaction(ec
, &t
);
779 int ec_read(u8 addr
, u8
*val
)
787 err
= acpi_ec_read(first_ec
, addr
, &temp_data
);
795 EXPORT_SYMBOL(ec_read
);
797 int ec_write(u8 addr
, u8 val
)
804 err
= acpi_ec_write(first_ec
, addr
, val
);
808 EXPORT_SYMBOL(ec_write
);
810 int ec_transaction(u8 command
,
811 const u8
*wdata
, unsigned wdata_len
,
812 u8
*rdata
, unsigned rdata_len
)
814 struct transaction t
= {.command
= command
,
815 .wdata
= wdata
, .rdata
= rdata
,
816 .wlen
= wdata_len
, .rlen
= rdata_len
};
821 return acpi_ec_transaction(first_ec
, &t
);
823 EXPORT_SYMBOL(ec_transaction
);
825 /* Get the handle to the EC device */
826 acpi_handle
ec_get_handle(void)
830 return first_ec
->handle
;
832 EXPORT_SYMBOL(ec_get_handle
);
835 * Process _Q events that might have accumulated in the EC.
836 * Run with locked ec mutex.
838 static void acpi_ec_clear(struct acpi_ec
*ec
)
843 for (i
= 0; i
< ACPI_EC_CLEAR_MAX
; i
++) {
844 status
= acpi_ec_query(ec
, &value
);
845 if (status
|| !value
)
849 if (unlikely(i
== ACPI_EC_CLEAR_MAX
))
850 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i
);
852 pr_info("%d stale EC events cleared\n", i
);
855 static void acpi_ec_start(struct acpi_ec
*ec
, bool resuming
)
859 spin_lock_irqsave(&ec
->lock
, flags
);
860 if (!test_and_set_bit(EC_FLAGS_STARTED
, &ec
->flags
)) {
861 ec_dbg_drv("Starting EC");
862 /* Enable GPE for event processing (SCI_EVT=1) */
864 acpi_ec_submit_request(ec
);
865 ec_dbg_ref(ec
, "Increase driver");
867 ec_log_drv("EC started");
869 spin_unlock_irqrestore(&ec
->lock
, flags
);
872 static bool acpi_ec_stopped(struct acpi_ec
*ec
)
877 spin_lock_irqsave(&ec
->lock
, flags
);
878 flushed
= acpi_ec_flushed(ec
);
879 spin_unlock_irqrestore(&ec
->lock
, flags
);
883 static void acpi_ec_stop(struct acpi_ec
*ec
, bool suspending
)
887 spin_lock_irqsave(&ec
->lock
, flags
);
888 if (acpi_ec_started(ec
)) {
889 ec_dbg_drv("Stopping EC");
890 set_bit(EC_FLAGS_STOPPED
, &ec
->flags
);
891 spin_unlock_irqrestore(&ec
->lock
, flags
);
892 wait_event(ec
->wait
, acpi_ec_stopped(ec
));
893 spin_lock_irqsave(&ec
->lock
, flags
);
894 /* Disable GPE for event processing (SCI_EVT=1) */
896 acpi_ec_complete_request(ec
);
897 ec_dbg_ref(ec
, "Decrease driver");
899 clear_bit(EC_FLAGS_STARTED
, &ec
->flags
);
900 clear_bit(EC_FLAGS_STOPPED
, &ec
->flags
);
901 ec_log_drv("EC stopped");
903 spin_unlock_irqrestore(&ec
->lock
, flags
);
906 void acpi_ec_block_transactions(void)
908 struct acpi_ec
*ec
= first_ec
;
913 mutex_lock(&ec
->mutex
);
914 /* Prevent transactions from being carried out */
915 acpi_ec_stop(ec
, true);
916 mutex_unlock(&ec
->mutex
);
919 void acpi_ec_unblock_transactions(void)
921 struct acpi_ec
*ec
= first_ec
;
926 /* Allow transactions to be carried out again */
927 acpi_ec_start(ec
, true);
929 if (EC_FLAGS_CLEAR_ON_RESUME
)
933 void acpi_ec_unblock_transactions_early(void)
936 * Allow transactions to happen again (this function is called from
937 * atomic context during wakeup, so we don't need to acquire the mutex).
940 acpi_ec_start(first_ec
, true);
943 /* --------------------------------------------------------------------------
945 -------------------------------------------------------------------------- */
946 static struct acpi_ec_query_handler
*
947 acpi_ec_get_query_handler(struct acpi_ec_query_handler
*handler
)
950 kref_get(&handler
->kref
);
954 static struct acpi_ec_query_handler
*
955 acpi_ec_get_query_handler_by_value(struct acpi_ec
*ec
, u8 value
)
957 struct acpi_ec_query_handler
*handler
;
960 mutex_lock(&ec
->mutex
);
961 list_for_each_entry(handler
, &ec
->list
, node
) {
962 if (value
== handler
->query_bit
) {
967 mutex_unlock(&ec
->mutex
);
968 return found
? acpi_ec_get_query_handler(handler
) : NULL
;
971 static void acpi_ec_query_handler_release(struct kref
*kref
)
973 struct acpi_ec_query_handler
*handler
=
974 container_of(kref
, struct acpi_ec_query_handler
, kref
);
979 static void acpi_ec_put_query_handler(struct acpi_ec_query_handler
*handler
)
981 kref_put(&handler
->kref
, acpi_ec_query_handler_release
);
984 int acpi_ec_add_query_handler(struct acpi_ec
*ec
, u8 query_bit
,
985 acpi_handle handle
, acpi_ec_query_func func
,
988 struct acpi_ec_query_handler
*handler
=
989 kzalloc(sizeof(struct acpi_ec_query_handler
), GFP_KERNEL
);
994 handler
->query_bit
= query_bit
;
995 handler
->handle
= handle
;
996 handler
->func
= func
;
997 handler
->data
= data
;
998 mutex_lock(&ec
->mutex
);
999 kref_init(&handler
->kref
);
1000 list_add(&handler
->node
, &ec
->list
);
1001 mutex_unlock(&ec
->mutex
);
1004 EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler
);
1006 static void acpi_ec_remove_query_handlers(struct acpi_ec
*ec
,
1007 bool remove_all
, u8 query_bit
)
1009 struct acpi_ec_query_handler
*handler
, *tmp
;
1010 LIST_HEAD(free_list
);
1012 mutex_lock(&ec
->mutex
);
1013 list_for_each_entry_safe(handler
, tmp
, &ec
->list
, node
) {
1014 if (remove_all
|| query_bit
== handler
->query_bit
) {
1015 list_del_init(&handler
->node
);
1016 list_add(&handler
->node
, &free_list
);
1019 mutex_unlock(&ec
->mutex
);
1020 list_for_each_entry_safe(handler
, tmp
, &free_list
, node
)
1021 acpi_ec_put_query_handler(handler
);
1024 void acpi_ec_remove_query_handler(struct acpi_ec
*ec
, u8 query_bit
)
1026 acpi_ec_remove_query_handlers(ec
, false, query_bit
);
1028 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler
);
1030 static struct acpi_ec_query
*acpi_ec_create_query(u8
*pval
)
1032 struct acpi_ec_query
*q
;
1033 struct transaction
*t
;
1035 q
= kzalloc(sizeof (struct acpi_ec_query
), GFP_KERNEL
);
1038 INIT_WORK(&q
->work
, acpi_ec_event_processor
);
1039 t
= &q
->transaction
;
1040 t
->command
= ACPI_EC_COMMAND_QUERY
;
1046 static void acpi_ec_delete_query(struct acpi_ec_query
*q
)
1050 acpi_ec_put_query_handler(q
->handler
);
1055 static void acpi_ec_event_processor(struct work_struct
*work
)
1057 struct acpi_ec_query
*q
= container_of(work
, struct acpi_ec_query
, work
);
1058 struct acpi_ec_query_handler
*handler
= q
->handler
;
1060 ec_dbg_evt("Query(0x%02x) started", handler
->query_bit
);
1062 handler
->func(handler
->data
);
1063 else if (handler
->handle
)
1064 acpi_evaluate_object(handler
->handle
, NULL
, NULL
, NULL
);
1065 ec_dbg_evt("Query(0x%02x) stopped", handler
->query_bit
);
1066 acpi_ec_delete_query(q
);
1069 static int acpi_ec_query(struct acpi_ec
*ec
, u8
*data
)
1073 struct acpi_ec_query
*q
;
1075 q
= acpi_ec_create_query(&value
);
1080 * Query the EC to find out which _Qxx method we need to evaluate.
1081 * Note that successful completion of the query causes the ACPI_EC_SCI
1082 * bit to be cleared (and thus clearing the interrupt source).
1084 result
= acpi_ec_transaction(ec
, &q
->transaction
);
1090 q
->handler
= acpi_ec_get_query_handler_by_value(ec
, value
);
1097 * It is reported that _Qxx are evaluated in a parallel way on
1099 * https://bugzilla.kernel.org/show_bug.cgi?id=94411
1101 * Put this log entry before schedule_work() in order to make
1102 * it appearing before any other log entries occurred during the
1103 * work queue execution.
1105 ec_dbg_evt("Query(0x%02x) scheduled", value
);
1106 if (!queue_work(ec_query_wq
, &q
->work
)) {
1107 ec_dbg_evt("Query(0x%02x) overlapped", value
);
1113 acpi_ec_delete_query(q
);
1119 static void acpi_ec_check_event(struct acpi_ec
*ec
)
1121 unsigned long flags
;
1123 if (ec_event_clearing
== ACPI_EC_EVT_TIMING_EVENT
) {
1125 spin_lock_irqsave(&ec
->lock
, flags
);
1127 * Take care of the SCI_EVT unless no one else is
1128 * taking care of it.
1131 advance_transaction(ec
);
1132 spin_unlock_irqrestore(&ec
->lock
, flags
);
1137 static void acpi_ec_event_handler(struct work_struct
*work
)
1139 unsigned long flags
;
1140 struct acpi_ec
*ec
= container_of(work
, struct acpi_ec
, work
);
1142 ec_dbg_evt("Event started");
1144 spin_lock_irqsave(&ec
->lock
, flags
);
1145 while (ec
->nr_pending_queries
) {
1146 spin_unlock_irqrestore(&ec
->lock
, flags
);
1147 (void)acpi_ec_query(ec
, NULL
);
1148 spin_lock_irqsave(&ec
->lock
, flags
);
1149 ec
->nr_pending_queries
--;
1151 * Before exit, make sure that this work item can be
1152 * scheduled again. There might be QR_EC failures, leaving
1153 * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
1154 * item from being scheduled again.
1156 if (!ec
->nr_pending_queries
) {
1157 if (ec_event_clearing
== ACPI_EC_EVT_TIMING_STATUS
||
1158 ec_event_clearing
== ACPI_EC_EVT_TIMING_QUERY
)
1159 acpi_ec_complete_query(ec
);
1162 spin_unlock_irqrestore(&ec
->lock
, flags
);
1164 ec_dbg_evt("Event stopped");
1166 acpi_ec_check_event(ec
);
1169 static u32
acpi_ec_gpe_handler(acpi_handle gpe_device
,
1170 u32 gpe_number
, void *data
)
1172 unsigned long flags
;
1173 struct acpi_ec
*ec
= data
;
1175 spin_lock_irqsave(&ec
->lock
, flags
);
1176 advance_transaction(ec
);
1177 spin_unlock_irqrestore(&ec
->lock
, flags
);
1178 return ACPI_INTERRUPT_HANDLED
;
1181 /* --------------------------------------------------------------------------
1182 * Address Space Management
1183 * -------------------------------------------------------------------------- */
1186 acpi_ec_space_handler(u32 function
, acpi_physical_address address
,
1187 u32 bits
, u64
*value64
,
1188 void *handler_context
, void *region_context
)
1190 struct acpi_ec
*ec
= handler_context
;
1191 int result
= 0, i
, bytes
= bits
/ 8;
1192 u8
*value
= (u8
*)value64
;
1194 if ((address
> 0xFF) || !value
|| !handler_context
)
1195 return AE_BAD_PARAMETER
;
1197 if (function
!= ACPI_READ
&& function
!= ACPI_WRITE
)
1198 return AE_BAD_PARAMETER
;
1200 if (ec_busy_polling
|| bits
> 8)
1201 acpi_ec_burst_enable(ec
);
1203 for (i
= 0; i
< bytes
; ++i
, ++address
, ++value
)
1204 result
= (function
== ACPI_READ
) ?
1205 acpi_ec_read(ec
, address
, value
) :
1206 acpi_ec_write(ec
, address
, *value
);
1208 if (ec_busy_polling
|| bits
> 8)
1209 acpi_ec_burst_disable(ec
);
1213 return AE_BAD_PARAMETER
;
1215 return AE_NOT_FOUND
;
1223 /* --------------------------------------------------------------------------
1225 * -------------------------------------------------------------------------- */
1228 ec_parse_io_ports(struct acpi_resource
*resource
, void *context
);
1230 static struct acpi_ec
*make_acpi_ec(void)
1232 struct acpi_ec
*ec
= kzalloc(sizeof(struct acpi_ec
), GFP_KERNEL
);
1236 ec
->flags
= 1 << EC_FLAGS_QUERY_PENDING
;
1237 mutex_init(&ec
->mutex
);
1238 init_waitqueue_head(&ec
->wait
);
1239 INIT_LIST_HEAD(&ec
->list
);
1240 spin_lock_init(&ec
->lock
);
1241 INIT_WORK(&ec
->work
, acpi_ec_event_handler
);
1242 ec
->timestamp
= jiffies
;
1247 acpi_ec_register_query_methods(acpi_handle handle
, u32 level
,
1248 void *context
, void **return_value
)
1251 struct acpi_buffer buffer
= { sizeof(node_name
), node_name
};
1252 struct acpi_ec
*ec
= context
;
1256 status
= acpi_get_name(handle
, ACPI_SINGLE_NAME
, &buffer
);
1258 if (ACPI_SUCCESS(status
) && sscanf(node_name
, "_Q%x", &value
) == 1)
1259 acpi_ec_add_query_handler(ec
, value
, handle
, NULL
, NULL
);
1264 ec_parse_device(acpi_handle handle
, u32 Level
, void *context
, void **retval
)
1267 unsigned long long tmp
= 0;
1268 struct acpi_ec
*ec
= context
;
1270 /* clear addr values, ec_parse_io_ports depend on it */
1271 ec
->command_addr
= ec
->data_addr
= 0;
1273 status
= acpi_walk_resources(handle
, METHOD_NAME__CRS
,
1274 ec_parse_io_ports
, ec
);
1275 if (ACPI_FAILURE(status
))
1278 /* Get GPE bit assignment (EC events). */
1279 /* TODO: Add support for _GPE returning a package */
1280 status
= acpi_evaluate_integer(handle
, "_GPE", NULL
, &tmp
);
1281 if (ACPI_FAILURE(status
))
1284 /* Use the global lock for all EC transactions? */
1286 acpi_evaluate_integer(handle
, "_GLK", NULL
, &tmp
);
1287 ec
->global_lock
= tmp
;
1288 ec
->handle
= handle
;
1289 return AE_CTRL_TERMINATE
;
1292 static int ec_install_handlers(struct acpi_ec
*ec
)
1296 if (test_bit(EC_FLAGS_HANDLERS_INSTALLED
, &ec
->flags
))
1298 status
= acpi_install_gpe_raw_handler(NULL
, ec
->gpe
,
1299 ACPI_GPE_EDGE_TRIGGERED
,
1300 &acpi_ec_gpe_handler
, ec
);
1301 if (ACPI_FAILURE(status
))
1304 acpi_ec_start(ec
, false);
1305 status
= acpi_install_address_space_handler(ec
->handle
,
1307 &acpi_ec_space_handler
,
1309 if (ACPI_FAILURE(status
)) {
1310 if (status
== AE_NOT_FOUND
) {
1312 * Maybe OS fails in evaluating the _REG object.
1313 * The AE_NOT_FOUND error will be ignored and OS
1314 * continue to initialize EC.
1316 pr_err("Fail in evaluating the _REG object"
1317 " of EC device. Broken bios is suspected.\n");
1319 acpi_ec_stop(ec
, false);
1320 acpi_remove_gpe_handler(NULL
, ec
->gpe
,
1321 &acpi_ec_gpe_handler
);
1326 set_bit(EC_FLAGS_HANDLERS_INSTALLED
, &ec
->flags
);
1330 static void ec_remove_handlers(struct acpi_ec
*ec
)
1332 if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED
, &ec
->flags
))
1334 acpi_ec_stop(ec
, false);
1335 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec
->handle
,
1336 ACPI_ADR_SPACE_EC
, &acpi_ec_space_handler
)))
1337 pr_err("failed to remove space handler\n");
1338 if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL
, ec
->gpe
,
1339 &acpi_ec_gpe_handler
)))
1340 pr_err("failed to remove gpe handler\n");
1341 clear_bit(EC_FLAGS_HANDLERS_INSTALLED
, &ec
->flags
);
1344 static int acpi_ec_add(struct acpi_device
*device
)
1346 struct acpi_ec
*ec
= NULL
;
1349 strcpy(acpi_device_name(device
), ACPI_EC_DEVICE_NAME
);
1350 strcpy(acpi_device_class(device
), ACPI_EC_CLASS
);
1352 /* Check for boot EC */
1354 (boot_ec
->handle
== device
->handle
||
1355 boot_ec
->handle
== ACPI_ROOT_OBJECT
)) {
1359 ec
= make_acpi_ec();
1363 if (ec_parse_device(device
->handle
, 0, ec
, NULL
) !=
1364 AE_CTRL_TERMINATE
) {
1369 /* Find and register all query methods */
1370 acpi_walk_namespace(ACPI_TYPE_METHOD
, ec
->handle
, 1,
1371 acpi_ec_register_query_methods
, NULL
, ec
, NULL
);
1375 device
->driver_data
= ec
;
1377 ret
= !!request_region(ec
->data_addr
, 1, "EC data");
1378 WARN(!ret
, "Could not request EC data io port 0x%lx", ec
->data_addr
);
1379 ret
= !!request_region(ec
->command_addr
, 1, "EC cmd");
1380 WARN(!ret
, "Could not request EC cmd io port 0x%lx", ec
->command_addr
);
1382 pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
1383 ec
->gpe
, ec
->command_addr
, ec
->data_addr
);
1385 ret
= ec_install_handlers(ec
);
1387 /* Reprobe devices depending on the EC */
1388 acpi_walk_dep_device_list(ec
->handle
);
1390 /* EC is fully operational, allow queries */
1391 clear_bit(EC_FLAGS_QUERY_PENDING
, &ec
->flags
);
1393 /* Clear stale _Q events if hardware might require that */
1394 if (EC_FLAGS_CLEAR_ON_RESUME
)
1399 static int acpi_ec_remove(struct acpi_device
*device
)
1406 ec
= acpi_driver_data(device
);
1407 ec_remove_handlers(ec
);
1408 acpi_ec_remove_query_handlers(ec
, true, 0);
1409 release_region(ec
->data_addr
, 1);
1410 release_region(ec
->command_addr
, 1);
1411 device
->driver_data
= NULL
;
1419 ec_parse_io_ports(struct acpi_resource
*resource
, void *context
)
1421 struct acpi_ec
*ec
= context
;
1423 if (resource
->type
!= ACPI_RESOURCE_TYPE_IO
)
1427 * The first address region returned is the data port, and
1428 * the second address region returned is the status/command
1431 if (ec
->data_addr
== 0)
1432 ec
->data_addr
= resource
->data
.io
.minimum
;
1433 else if (ec
->command_addr
== 0)
1434 ec
->command_addr
= resource
->data
.io
.minimum
;
1436 return AE_CTRL_TERMINATE
;
1441 int __init
acpi_boot_ec_enable(void)
1443 if (!boot_ec
|| test_bit(EC_FLAGS_HANDLERS_INSTALLED
, &boot_ec
->flags
))
1445 if (!ec_install_handlers(boot_ec
)) {
1452 static const struct acpi_device_id ec_device_ids
[] = {
1457 /* Some BIOS do not survive early DSDT scan, skip it */
1458 static int ec_skip_dsdt_scan(const struct dmi_system_id
*id
)
1460 EC_FLAGS_SKIP_DSDT_SCAN
= 1;
1464 /* ASUStek often supplies us with broken ECDT, validate it */
1465 static int ec_validate_ecdt(const struct dmi_system_id
*id
)
1467 EC_FLAGS_VALIDATE_ECDT
= 1;
1473 * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
1474 * set, for which case, we complete the QR_EC without issuing it to the
1476 * https://bugzilla.kernel.org/show_bug.cgi?id=82611
1477 * https://bugzilla.kernel.org/show_bug.cgi?id=97381
1479 static int ec_flag_query_handshake(const struct dmi_system_id
*id
)
1481 pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
1482 EC_FLAGS_QUERY_HANDSHAKE
= 1;
1488 * On some hardware it is necessary to clear events accumulated by the EC during
1489 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
1490 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
1492 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
1494 * Ideally, the EC should also be instructed NOT to accumulate events during
1495 * sleep (which Windows seems to do somehow), but the interface to control this
1496 * behaviour is not known at this time.
1498 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
1499 * however it is very likely that other Samsung models are affected.
1501 * On systems which don't accumulate _Q events during sleep, this extra check
1502 * should be harmless.
1504 static int ec_clear_on_resume(const struct dmi_system_id
*id
)
1506 pr_debug("Detected system needing EC poll on resume.\n");
1507 EC_FLAGS_CLEAR_ON_RESUME
= 1;
1508 ec_event_clearing
= ACPI_EC_EVT_TIMING_STATUS
;
1512 static struct dmi_system_id ec_dmi_table
[] __initdata
= {
1514 ec_skip_dsdt_scan
, "Compal JFL92", {
1515 DMI_MATCH(DMI_BIOS_VENDOR
, "COMPAL"),
1516 DMI_MATCH(DMI_BOARD_NAME
, "JFL92") }, NULL
},
1518 ec_validate_ecdt
, "MSI MS-171F", {
1519 DMI_MATCH(DMI_SYS_VENDOR
, "Micro-Star"),
1520 DMI_MATCH(DMI_PRODUCT_NAME
, "MS-171F"),}, NULL
},
1522 ec_validate_ecdt
, "ASUS hardware", {
1523 DMI_MATCH(DMI_BIOS_VENDOR
, "ASUS") }, NULL
},
1525 ec_validate_ecdt
, "ASUS hardware", {
1526 DMI_MATCH(DMI_BOARD_VENDOR
, "ASUSTeK Computer Inc.") }, NULL
},
1528 ec_skip_dsdt_scan
, "HP Folio 13", {
1529 DMI_MATCH(DMI_SYS_VENDOR
, "Hewlett-Packard"),
1530 DMI_MATCH(DMI_PRODUCT_NAME
, "HP Folio 13"),}, NULL
},
1532 ec_validate_ecdt
, "ASUS hardware", {
1533 DMI_MATCH(DMI_SYS_VENDOR
, "ASUSTek Computer Inc."),
1534 DMI_MATCH(DMI_PRODUCT_NAME
, "L4R"),}, NULL
},
1536 ec_clear_on_resume
, "Samsung hardware", {
1537 DMI_MATCH(DMI_SYS_VENDOR
, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL
},
1541 int __init
acpi_ec_ecdt_probe(void)
1544 struct acpi_ec
*saved_ec
= NULL
;
1545 struct acpi_table_ecdt
*ecdt_ptr
;
1547 boot_ec
= make_acpi_ec();
1551 * Generate a boot ec context
1553 dmi_check_system(ec_dmi_table
);
1554 status
= acpi_get_table(ACPI_SIG_ECDT
, 1,
1555 (struct acpi_table_header
**)&ecdt_ptr
);
1556 if (ACPI_SUCCESS(status
)) {
1557 pr_info("EC description table is found, configuring boot EC\n");
1558 boot_ec
->command_addr
= ecdt_ptr
->control
.address
;
1559 boot_ec
->data_addr
= ecdt_ptr
->data
.address
;
1560 boot_ec
->gpe
= ecdt_ptr
->gpe
;
1561 boot_ec
->handle
= ACPI_ROOT_OBJECT
;
1562 acpi_get_handle(ACPI_ROOT_OBJECT
, ecdt_ptr
->id
,
1564 /* Don't trust ECDT, which comes from ASUSTek */
1565 if (!EC_FLAGS_VALIDATE_ECDT
)
1567 saved_ec
= kmemdup(boot_ec
, sizeof(struct acpi_ec
), GFP_KERNEL
);
1573 if (EC_FLAGS_SKIP_DSDT_SCAN
) {
1578 /* This workaround is needed only on some broken machines,
1579 * which require early EC, but fail to provide ECDT */
1580 pr_debug("Look up EC in DSDT\n");
1581 status
= acpi_get_devices(ec_device_ids
[0].id
, ec_parse_device
,
1583 /* Check that acpi_get_devices actually find something */
1584 if (ACPI_FAILURE(status
) || !boot_ec
->handle
)
1587 /* try to find good ECDT from ASUSTek */
1588 if (saved_ec
->command_addr
!= boot_ec
->command_addr
||
1589 saved_ec
->data_addr
!= boot_ec
->data_addr
||
1590 saved_ec
->gpe
!= boot_ec
->gpe
||
1591 saved_ec
->handle
!= boot_ec
->handle
)
1592 pr_info("ASUSTek keeps feeding us with broken "
1593 "ECDT tables, which are very hard to workaround. "
1594 "Trying to use DSDT EC info instead. Please send "
1595 "output of acpidump to linux-acpi@vger.kernel.org\n");
1599 /* We really need to limit this workaround, the only ASUS,
1600 * which needs it, has fake EC._INI method, so use it as flag.
1601 * Keep boot_ec struct as it will be needed soon.
1603 if (!dmi_name_in_vendors("ASUS") ||
1604 !acpi_has_method(boot_ec
->handle
, "_INI"))
1608 if (!ec_install_handlers(boot_ec
)) {
1619 static int param_set_event_clearing(const char *val
, struct kernel_param
*kp
)
1623 if (!strncmp(val
, "status", sizeof("status") - 1)) {
1624 ec_event_clearing
= ACPI_EC_EVT_TIMING_STATUS
;
1625 pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
1626 } else if (!strncmp(val
, "query", sizeof("query") - 1)) {
1627 ec_event_clearing
= ACPI_EC_EVT_TIMING_QUERY
;
1628 pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
1629 } else if (!strncmp(val
, "event", sizeof("event") - 1)) {
1630 ec_event_clearing
= ACPI_EC_EVT_TIMING_EVENT
;
1631 pr_info("Assuming SCI_EVT clearing on event reads\n");
1637 static int param_get_event_clearing(char *buffer
, struct kernel_param
*kp
)
1639 switch (ec_event_clearing
) {
1640 case ACPI_EC_EVT_TIMING_STATUS
:
1641 return sprintf(buffer
, "status");
1642 case ACPI_EC_EVT_TIMING_QUERY
:
1643 return sprintf(buffer
, "query");
1644 case ACPI_EC_EVT_TIMING_EVENT
:
1645 return sprintf(buffer
, "event");
1647 return sprintf(buffer
, "invalid");
1652 module_param_call(ec_event_clearing
, param_set_event_clearing
, param_get_event_clearing
,
1654 MODULE_PARM_DESC(ec_event_clearing
, "Assumed SCI_EVT clearing timing");
1656 static struct acpi_driver acpi_ec_driver
= {
1658 .class = ACPI_EC_CLASS
,
1659 .ids
= ec_device_ids
,
1662 .remove
= acpi_ec_remove
,
1666 static inline int acpi_ec_query_init(void)
1669 ec_query_wq
= alloc_workqueue("kec_query", 0,
1677 static inline void acpi_ec_query_exit(void)
1680 destroy_workqueue(ec_query_wq
);
1685 int __init
acpi_ec_init(void)
1689 /* register workqueue for _Qxx evaluations */
1690 result
= acpi_ec_query_init();
1693 /* Now register the driver for the EC */
1694 result
= acpi_bus_register_driver(&acpi_ec_driver
);
1700 acpi_ec_query_exit();
1704 /* EC driver currently not unloadable */
1706 static void __exit
acpi_ec_exit(void)
1709 acpi_bus_unregister_driver(&acpi_ec_driver
);
1710 acpi_ec_query_exit();