2 * NVEC: NVIDIA compliant embedded controller interface
4 * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
6 * Authors: Pierre-Hugues Husson <phhusson@free.fr>
7 * Ilya Petrov <ilya.muromec@gmail.com>
8 * Marc Dietrich <marvin24@gmx.de>
9 * Julian Andres Klode <jak@jak-linux.org>
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/atomic.h>
22 #include <linux/clk.h>
23 #include <linux/completion.h>
24 #include <linux/delay.h>
25 #include <linux/err.h>
26 #include <linux/gpio.h>
27 #include <linux/interrupt.h>
29 #include <linux/irq.h>
31 #include <linux/of_gpio.h>
32 #include <linux/list.h>
33 #include <linux/mfd/core.h>
34 #include <linux/mutex.h>
35 #include <linux/notifier.h>
36 #include <linux/platform_device.h>
37 #include <linux/slab.h>
38 #include <linux/spinlock.h>
39 #include <linux/workqueue.h>
42 #include <mach/iomap.h>
47 #define I2C_CNFG_PACKET_MODE_EN (1<<10)
48 #define I2C_CNFG_NEW_MASTER_SFM (1<<11)
49 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
51 #define I2C_SL_CNFG 0x20
52 #define I2C_SL_NEWSL (1<<2)
53 #define I2C_SL_NACK (1<<1)
54 #define I2C_SL_RESP (1<<0)
55 #define I2C_SL_IRQ (1<<3)
56 #define END_TRANS (1<<4)
60 #define I2C_SL_RCVD 0x24
61 #define I2C_SL_STATUS 0x28
62 #define I2C_SL_ADDR1 0x2c
63 #define I2C_SL_ADDR2 0x30
64 #define I2C_SL_DELAY_COUNT 0x3c
67 * enum nvec_msg_category - Message categories for nvec_msg_alloc()
68 * @NVEC_MSG_RX: The message is an incoming message (from EC)
69 * @NVEC_MSG_TX: The message is an outgoing message (to EC)
71 enum nvec_msg_category
{
76 static const unsigned char EC_DISABLE_EVENT_REPORTING
[3] = "\x04\x00\x00";
77 static const unsigned char EC_ENABLE_EVENT_REPORTING
[3] = "\x04\x00\x01";
78 static const unsigned char EC_GET_FIRMWARE_VERSION
[2] = "\x07\x15";
80 static struct nvec_chip
*nvec_power_handle
;
82 static struct mfd_cell nvec_devices
[] = {
100 .name
= "nvec-paz00",
106 * nvec_register_notifier - Register a notifier with nvec
107 * @nvec: A &struct nvec_chip
108 * @nb: The notifier block to register
110 * Registers a notifier with @nvec. The notifier will be added to an atomic
111 * notifier chain that is called for all received messages except those that
112 * correspond to a request initiated by nvec_write_sync().
114 int nvec_register_notifier(struct nvec_chip
*nvec
, struct notifier_block
*nb
,
117 return atomic_notifier_chain_register(&nvec
->notifier_list
, nb
);
119 EXPORT_SYMBOL_GPL(nvec_register_notifier
);
122 * nvec_status_notifier - The final notifier
124 * Prints a message about control events not handled in the notifier
127 static int nvec_status_notifier(struct notifier_block
*nb
,
128 unsigned long event_type
, void *data
)
130 struct nvec_chip
*nvec
= container_of(nb
, struct nvec_chip
,
131 nvec_status_notifier
);
132 unsigned char *msg
= (unsigned char *)data
;
134 if (event_type
!= NVEC_CNTL
)
137 dev_warn(nvec
->dev
, "unhandled msg type %ld\n", event_type
);
138 print_hex_dump(KERN_WARNING
, "payload: ", DUMP_PREFIX_NONE
, 16, 1,
139 msg
, msg
[1] + 2, true);
146 * @nvec: A &struct nvec_chip
147 * @category: Pool category, see &enum nvec_msg_category
149 * Allocate a single &struct nvec_msg object from the message pool of
150 * @nvec. The result shall be passed to nvec_msg_free() if no longer
153 * Outgoing messages are placed in the upper 75% of the pool, keeping the
154 * lower 25% available for RX buffers only. The reason is to prevent a
155 * situation where all buffers are full and a message is thus endlessly
156 * retried because the response could never be processed.
158 static struct nvec_msg
*nvec_msg_alloc(struct nvec_chip
*nvec
,
159 enum nvec_msg_category category
)
161 int i
= (category
== NVEC_MSG_TX
) ? (NVEC_POOL_SIZE
/ 4) : 0;
163 for (; i
< NVEC_POOL_SIZE
; i
++) {
164 if (atomic_xchg(&nvec
->msg_pool
[i
].used
, 1) == 0) {
165 dev_vdbg(nvec
->dev
, "INFO: Allocate %i\n", i
);
166 return &nvec
->msg_pool
[i
];
170 dev_err(nvec
->dev
, "could not allocate %s buffer\n",
171 (category
== NVEC_MSG_TX
) ? "TX" : "RX");
178 * @nvec: A &struct nvec_chip
179 * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
181 * Free the given message
183 inline void nvec_msg_free(struct nvec_chip
*nvec
, struct nvec_msg
*msg
)
185 if (msg
!= &nvec
->tx_scratch
)
186 dev_vdbg(nvec
->dev
, "INFO: Free %ti\n", msg
- nvec
->msg_pool
);
187 atomic_set(&msg
->used
, 0);
189 EXPORT_SYMBOL_GPL(nvec_msg_free
);
192 * nvec_msg_is_event - Return %true if @msg is an event
195 static bool nvec_msg_is_event(struct nvec_msg
*msg
)
197 return msg
->data
[0] >> 7;
201 * nvec_msg_size - Get the size of a message
202 * @msg: The message to get the size for
204 * This only works for received messages, not for outgoing messages.
206 static size_t nvec_msg_size(struct nvec_msg
*msg
)
208 bool is_event
= nvec_msg_is_event(msg
);
209 int event_length
= (msg
->data
[0] & 0x60) >> 5;
211 /* for variable size, payload size in byte 1 + count (1) + cmd (1) */
212 if (!is_event
|| event_length
== NVEC_VAR_SIZE
)
213 return (msg
->pos
|| msg
->size
) ? (msg
->data
[1] + 2) : 0;
214 else if (event_length
== NVEC_2BYTES
)
216 else if (event_length
== NVEC_3BYTES
)
223 * nvec_gpio_set_value - Set the GPIO value
224 * @nvec: A &struct nvec_chip
225 * @value: The value to write (0 or 1)
227 * Like gpio_set_value(), but generating debugging information
229 static void nvec_gpio_set_value(struct nvec_chip
*nvec
, int value
)
231 dev_dbg(nvec
->dev
, "GPIO changed from %u to %u\n",
232 gpio_get_value(nvec
->gpio
), value
);
233 gpio_set_value(nvec
->gpio
, value
);
237 * nvec_write_async - Asynchronously write a message to NVEC
238 * @nvec: An nvec_chip instance
239 * @data: The message data, starting with the request type
240 * @size: The size of @data
242 * Queue a single message to be transferred to the embedded controller
243 * and return immediately.
245 * Returns: 0 on success, a negative error code on failure. If a failure
246 * occured, the nvec driver may print an error.
248 int nvec_write_async(struct nvec_chip
*nvec
, const unsigned char *data
,
251 struct nvec_msg
*msg
;
254 msg
= nvec_msg_alloc(nvec
, NVEC_MSG_TX
);
260 memcpy(msg
->data
+ 1, data
, size
);
261 msg
->size
= size
+ 1;
263 spin_lock_irqsave(&nvec
->tx_lock
, flags
);
264 list_add_tail(&msg
->node
, &nvec
->tx_data
);
265 spin_unlock_irqrestore(&nvec
->tx_lock
, flags
);
267 queue_work(nvec
->wq
, &nvec
->tx_work
);
271 EXPORT_SYMBOL(nvec_write_async
);
274 * nvec_write_sync - Write a message to nvec and read the response
275 * @nvec: An &struct nvec_chip
276 * @data: The data to write
277 * @size: The size of @data
279 * This is similar to nvec_write_async(), but waits for the
280 * request to be answered before returning. This function
281 * uses a mutex and can thus not be called from e.g.
282 * interrupt handlers.
284 * Returns: A pointer to the response message on success,
285 * %NULL on failure. Free with nvec_msg_free() once no longer
288 struct nvec_msg
*nvec_write_sync(struct nvec_chip
*nvec
,
289 const unsigned char *data
, short size
)
291 struct nvec_msg
*msg
;
293 mutex_lock(&nvec
->sync_write_mutex
);
295 nvec
->sync_write_pending
= (data
[1] << 8) + data
[0];
297 if (nvec_write_async(nvec
, data
, size
) < 0)
300 dev_dbg(nvec
->dev
, "nvec_sync_write: 0x%04x\n",
301 nvec
->sync_write_pending
);
302 if (!(wait_for_completion_timeout(&nvec
->sync_write
,
303 msecs_to_jiffies(2000)))) {
304 dev_warn(nvec
->dev
, "timeout waiting for sync write to complete\n");
305 mutex_unlock(&nvec
->sync_write_mutex
);
309 dev_dbg(nvec
->dev
, "nvec_sync_write: pong!\n");
311 msg
= nvec
->last_sync_msg
;
313 mutex_unlock(&nvec
->sync_write_mutex
);
317 EXPORT_SYMBOL(nvec_write_sync
);
320 * nvec_request_master - Process outgoing messages
321 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
323 * Processes all outgoing requests by sending the request and awaiting the
324 * response, then continuing with the next request. Once a request has a
325 * matching response, it will be freed and removed from the list.
327 static void nvec_request_master(struct work_struct
*work
)
329 struct nvec_chip
*nvec
= container_of(work
, struct nvec_chip
, tx_work
);
332 struct nvec_msg
*msg
;
334 spin_lock_irqsave(&nvec
->tx_lock
, flags
);
335 while (!list_empty(&nvec
->tx_data
)) {
336 msg
= list_first_entry(&nvec
->tx_data
, struct nvec_msg
, node
);
337 spin_unlock_irqrestore(&nvec
->tx_lock
, flags
);
338 nvec_gpio_set_value(nvec
, 0);
339 err
= wait_for_completion_interruptible_timeout(
340 &nvec
->ec_transfer
, msecs_to_jiffies(5000));
343 dev_warn(nvec
->dev
, "timeout waiting for ec transfer\n");
344 nvec_gpio_set_value(nvec
, 1);
348 spin_lock_irqsave(&nvec
->tx_lock
, flags
);
351 list_del_init(&msg
->node
);
352 nvec_msg_free(nvec
, msg
);
355 spin_unlock_irqrestore(&nvec
->tx_lock
, flags
);
359 * parse_msg - Print some information and call the notifiers on an RX message
360 * @nvec: A &struct nvec_chip
361 * @msg: A message received by @nvec
363 * Paarse some pieces of the message and then call the chain of notifiers
364 * registered via nvec_register_notifier.
366 static int parse_msg(struct nvec_chip
*nvec
, struct nvec_msg
*msg
)
368 if ((msg
->data
[0] & 1 << 7) == 0 && msg
->data
[3]) {
369 dev_err(nvec
->dev
, "ec responded %02x %02x %02x %02x\n",
370 msg
->data
[0], msg
->data
[1], msg
->data
[2], msg
->data
[3]);
374 if ((msg
->data
[0] >> 7) == 1 && (msg
->data
[0] & 0x0f) == 5)
375 print_hex_dump(KERN_WARNING
, "ec system event ",
376 DUMP_PREFIX_NONE
, 16, 1, msg
->data
,
377 msg
->data
[1] + 2, true);
379 atomic_notifier_call_chain(&nvec
->notifier_list
, msg
->data
[0] & 0x8f,
386 * nvec_dispatch - Process messages received from the EC
387 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
389 * Process messages previously received from the EC and put into the RX
390 * queue of the &struct nvec_chip instance associated with @work.
392 static void nvec_dispatch(struct work_struct
*work
)
394 struct nvec_chip
*nvec
= container_of(work
, struct nvec_chip
, rx_work
);
396 struct nvec_msg
*msg
;
398 spin_lock_irqsave(&nvec
->rx_lock
, flags
);
399 while (!list_empty(&nvec
->rx_data
)) {
400 msg
= list_first_entry(&nvec
->rx_data
, struct nvec_msg
, node
);
401 list_del_init(&msg
->node
);
402 spin_unlock_irqrestore(&nvec
->rx_lock
, flags
);
404 if (nvec
->sync_write_pending
==
405 (msg
->data
[2] << 8) + msg
->data
[0]) {
406 dev_dbg(nvec
->dev
, "sync write completed!\n");
407 nvec
->sync_write_pending
= 0;
408 nvec
->last_sync_msg
= msg
;
409 complete(&nvec
->sync_write
);
411 parse_msg(nvec
, msg
);
412 nvec_msg_free(nvec
, msg
);
414 spin_lock_irqsave(&nvec
->rx_lock
, flags
);
416 spin_unlock_irqrestore(&nvec
->rx_lock
, flags
);
420 * nvec_tx_completed - Complete the current transfer
421 * @nvec: A &struct nvec_chip
423 * This is called when we have received an END_TRANS on a TX transfer.
425 static void nvec_tx_completed(struct nvec_chip
*nvec
)
427 /* We got an END_TRANS, let's skip this, maybe there's an event */
428 if (nvec
->tx
->pos
!= nvec
->tx
->size
) {
429 dev_err(nvec
->dev
, "premature END_TRANS, resending\n");
431 nvec_gpio_set_value(nvec
, 0);
438 * nvec_rx_completed - Complete the current transfer
439 * @nvec: A &struct nvec_chip
441 * This is called when we have received an END_TRANS on a RX transfer.
443 static void nvec_rx_completed(struct nvec_chip
*nvec
)
445 if (nvec
->rx
->pos
!= nvec_msg_size(nvec
->rx
)) {
446 dev_err(nvec
->dev
, "RX incomplete: Expected %u bytes, got %u\n",
447 (uint
) nvec_msg_size(nvec
->rx
),
448 (uint
) nvec
->rx
->pos
);
450 nvec_msg_free(nvec
, nvec
->rx
);
453 /* Battery quirk - Often incomplete, and likes to crash */
454 if (nvec
->rx
->data
[0] == NVEC_BAT
)
455 complete(&nvec
->ec_transfer
);
460 spin_lock(&nvec
->rx_lock
);
462 /* add the received data to the work list
463 and move the ring buffer pointer to the next entry */
464 list_add_tail(&nvec
->rx
->node
, &nvec
->rx_data
);
466 spin_unlock(&nvec
->rx_lock
);
470 if (!nvec_msg_is_event(nvec
->rx
))
471 complete(&nvec
->ec_transfer
);
473 queue_work(nvec
->wq
, &nvec
->rx_work
);
477 * nvec_invalid_flags - Send an error message about invalid flags and jump
478 * @nvec: The nvec device
479 * @status: The status flags
480 * @reset: Whether we shall jump to state 0.
482 static void nvec_invalid_flags(struct nvec_chip
*nvec
, unsigned int status
,
485 dev_err(nvec
->dev
, "unexpected status flags 0x%02x during state %i\n",
486 status
, nvec
->state
);
492 * nvec_tx_set - Set the message to transfer (nvec->tx)
493 * @nvec: A &struct nvec_chip
495 * Gets the first entry from the tx_data list of @nvec and sets the
496 * tx member to it. If the tx_data list is empty, this uses the
497 * tx_scratch message to send a no operation message.
499 static void nvec_tx_set(struct nvec_chip
*nvec
)
501 spin_lock(&nvec
->tx_lock
);
502 if (list_empty(&nvec
->tx_data
)) {
503 dev_err(nvec
->dev
, "empty tx - sending no-op\n");
504 memcpy(nvec
->tx_scratch
.data
, "\x02\x07\x02", 3);
505 nvec
->tx_scratch
.size
= 3;
506 nvec
->tx_scratch
.pos
= 0;
507 nvec
->tx
= &nvec
->tx_scratch
;
508 list_add_tail(&nvec
->tx
->node
, &nvec
->tx_data
);
510 nvec
->tx
= list_first_entry(&nvec
->tx_data
, struct nvec_msg
,
514 spin_unlock(&nvec
->tx_lock
);
516 dev_dbg(nvec
->dev
, "Sending message of length %u, command 0x%x\n",
517 (uint
)nvec
->tx
->size
, nvec
->tx
->data
[1]);
521 * nvec_interrupt - Interrupt handler
523 * @dev: The nvec device
525 * Interrupt handler that fills our RX buffers and empties our TX
526 * buffers. This uses a finite state machine with ridiculous amounts
527 * of error checking, in order to be fairly reliable.
529 static irqreturn_t
nvec_interrupt(int irq
, void *dev
)
531 unsigned long status
;
532 unsigned int received
= 0;
533 unsigned char to_send
= 0xff;
534 const unsigned long irq_mask
= I2C_SL_IRQ
| END_TRANS
| RCVD
| RNW
;
535 struct nvec_chip
*nvec
= dev
;
536 unsigned int state
= nvec
->state
;
538 status
= readl(nvec
->base
+ I2C_SL_STATUS
);
540 /* Filter out some errors */
541 if ((status
& irq_mask
) == 0 && (status
& ~irq_mask
) != 0) {
542 dev_err(nvec
->dev
, "unexpected irq mask %lx\n", status
);
545 if ((status
& I2C_SL_IRQ
) == 0) {
546 dev_err(nvec
->dev
, "Spurious IRQ\n");
550 /* The EC did not request a read, so it send us something, read it */
551 if ((status
& RNW
) == 0) {
552 received
= readl(nvec
->base
+ I2C_SL_RCVD
);
554 writel(0, nvec
->base
+ I2C_SL_RCVD
);
557 if (status
== (I2C_SL_IRQ
| RCVD
))
560 switch (nvec
->state
) {
561 case 0: /* Verify that its a transfer start, the rest later */
562 if (status
!= (I2C_SL_IRQ
| RCVD
))
563 nvec_invalid_flags(nvec
, status
, false);
565 case 1: /* command byte */
566 if (status
!= I2C_SL_IRQ
) {
567 nvec_invalid_flags(nvec
, status
, true);
569 nvec
->rx
= nvec_msg_alloc(nvec
, NVEC_MSG_RX
);
570 /* Should not happen in a normal world */
571 if (unlikely(nvec
->rx
== NULL
)) {
575 nvec
->rx
->data
[0] = received
;
580 case 2: /* first byte after command */
581 if (status
== (I2C_SL_IRQ
| RNW
| RCVD
)) {
583 if (nvec
->rx
->data
[0] != 0x01) {
585 "Read without prior read command\n");
589 nvec_msg_free(nvec
, nvec
->rx
);
592 BUG_ON(nvec
->tx
->size
< 1);
593 to_send
= nvec
->tx
->data
[0];
595 } else if (status
== (I2C_SL_IRQ
)) {
596 BUG_ON(nvec
->rx
== NULL
);
597 nvec
->rx
->data
[1] = received
;
601 nvec_invalid_flags(nvec
, status
, true);
604 case 3: /* EC does a block read, we transmit data */
605 if (status
& END_TRANS
) {
606 nvec_tx_completed(nvec
);
607 } else if ((status
& RNW
) == 0 || (status
& RCVD
)) {
608 nvec_invalid_flags(nvec
, status
, true);
609 } else if (nvec
->tx
&& nvec
->tx
->pos
< nvec
->tx
->size
) {
610 to_send
= nvec
->tx
->data
[nvec
->tx
->pos
++];
612 dev_err(nvec
->dev
, "tx buffer underflow on %p (%u > %u)\n",
614 (uint
) (nvec
->tx
? nvec
->tx
->pos
: 0),
615 (uint
) (nvec
->tx
? nvec
->tx
->size
: 0));
619 case 4: /* EC does some write, we read the data */
620 if ((status
& (END_TRANS
| RNW
)) == END_TRANS
)
621 nvec_rx_completed(nvec
);
622 else if (status
& (RNW
| RCVD
))
623 nvec_invalid_flags(nvec
, status
, true);
624 else if (nvec
->rx
&& nvec
->rx
->pos
< NVEC_MSG_SIZE
)
625 nvec
->rx
->data
[nvec
->rx
->pos
++] = received
;
628 "RX buffer overflow on %p: "
629 "Trying to write byte %u of %u\n",
630 nvec
->rx
, nvec
->rx
->pos
, NVEC_MSG_SIZE
);
636 /* If we are told that a new transfer starts, verify it */
637 if ((status
& (RCVD
| RNW
)) == RCVD
) {
638 if (received
!= nvec
->i2c_addr
)
640 "received address 0x%02x, expected 0x%02x\n",
641 received
, nvec
->i2c_addr
);
645 /* Send data if requested, but not on end of transmission */
646 if ((status
& (RNW
| END_TRANS
)) == RNW
)
647 writel(to_send
, nvec
->base
+ I2C_SL_RCVD
);
649 /* If we have send the first byte */
650 if (status
== (I2C_SL_IRQ
| RNW
| RCVD
))
651 nvec_gpio_set_value(nvec
, 1);
654 "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
655 (status
& RNW
) == 0 ? "received" : "R=",
657 (status
& (RNW
| END_TRANS
)) ? "sent" : "S=",
660 status
& END_TRANS
? " END_TRANS" : "",
661 status
& RCVD
? " RCVD" : "",
662 status
& RNW
? " RNW" : "");
666 * TODO: A correct fix needs to be found for this.
668 * We experience less incomplete messages with this delay than without
669 * it, but we don't know why. Help is appreciated.
676 static void tegra_init_i2c_slave(struct nvec_chip
*nvec
)
680 clk_prepare_enable(nvec
->i2c_clk
);
682 tegra_periph_reset_assert(nvec
->i2c_clk
);
684 tegra_periph_reset_deassert(nvec
->i2c_clk
);
686 val
= I2C_CNFG_NEW_MASTER_SFM
| I2C_CNFG_PACKET_MODE_EN
|
687 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT
);
688 writel(val
, nvec
->base
+ I2C_CNFG
);
690 clk_set_rate(nvec
->i2c_clk
, 8 * 80000);
692 writel(I2C_SL_NEWSL
, nvec
->base
+ I2C_SL_CNFG
);
693 writel(0x1E, nvec
->base
+ I2C_SL_DELAY_COUNT
);
695 writel(nvec
->i2c_addr
>>1, nvec
->base
+ I2C_SL_ADDR1
);
696 writel(0, nvec
->base
+ I2C_SL_ADDR2
);
698 enable_irq(nvec
->irq
);
700 clk_disable_unprepare(nvec
->i2c_clk
);
703 #ifdef CONFIG_PM_SLEEP
704 static void nvec_disable_i2c_slave(struct nvec_chip
*nvec
)
706 disable_irq(nvec
->irq
);
707 writel(I2C_SL_NEWSL
| I2C_SL_NACK
, nvec
->base
+ I2C_SL_CNFG
);
708 clk_disable_unprepare(nvec
->i2c_clk
);
712 static void nvec_power_off(void)
714 nvec_write_async(nvec_power_handle
, EC_DISABLE_EVENT_REPORTING
, 3);
715 nvec_write_async(nvec_power_handle
, "\x04\x01", 2);
718 static int __devinit
tegra_nvec_probe(struct platform_device
*pdev
)
722 struct nvec_platform_data
*pdata
= pdev
->dev
.platform_data
;
723 struct nvec_chip
*nvec
;
724 struct nvec_msg
*msg
;
725 struct resource
*res
;
728 nvec
= devm_kzalloc(&pdev
->dev
, sizeof(struct nvec_chip
), GFP_KERNEL
);
730 dev_err(&pdev
->dev
, "failed to reserve memory\n");
733 platform_set_drvdata(pdev
, nvec
);
734 nvec
->dev
= &pdev
->dev
;
737 nvec
->gpio
= pdata
->gpio
;
738 nvec
->i2c_addr
= pdata
->i2c_addr
;
739 } else if (nvec
->dev
->of_node
) {
740 nvec
->gpio
= of_get_named_gpio(nvec
->dev
->of_node
, "request-gpios", 0);
741 if (nvec
->gpio
< 0) {
742 dev_err(&pdev
->dev
, "no gpio specified");
745 if (of_property_read_u32(nvec
->dev
->of_node
, "slave-addr", &nvec
->i2c_addr
)) {
746 dev_err(&pdev
->dev
, "no i2c address specified");
750 dev_err(&pdev
->dev
, "no platform data\n");
754 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
756 dev_err(&pdev
->dev
, "no mem resource?\n");
760 base
= devm_request_and_ioremap(&pdev
->dev
, res
);
762 dev_err(&pdev
->dev
, "Can't ioremap I2C region\n");
766 res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0);
768 dev_err(&pdev
->dev
, "no irq resource?\n");
772 i2c_clk
= clk_get_sys("tegra-i2c.2", NULL
);
773 if (IS_ERR(i2c_clk
)) {
774 dev_err(nvec
->dev
, "failed to get controller clock\n");
779 nvec
->irq
= res
->start
;
780 nvec
->i2c_clk
= i2c_clk
;
781 nvec
->rx
= &nvec
->msg_pool
[0];
783 ATOMIC_INIT_NOTIFIER_HEAD(&nvec
->notifier_list
);
785 init_completion(&nvec
->sync_write
);
786 init_completion(&nvec
->ec_transfer
);
787 mutex_init(&nvec
->sync_write_mutex
);
788 spin_lock_init(&nvec
->tx_lock
);
789 spin_lock_init(&nvec
->rx_lock
);
790 INIT_LIST_HEAD(&nvec
->rx_data
);
791 INIT_LIST_HEAD(&nvec
->tx_data
);
792 INIT_WORK(&nvec
->rx_work
, nvec_dispatch
);
793 INIT_WORK(&nvec
->tx_work
, nvec_request_master
);
794 nvec
->wq
= alloc_workqueue("nvec", WQ_NON_REENTRANT
, 2);
796 err
= devm_gpio_request_one(&pdev
->dev
, nvec
->gpio
, GPIOF_OUT_INIT_HIGH
,
799 dev_err(nvec
->dev
, "couldn't request gpio\n");
800 destroy_workqueue(nvec
->wq
);
804 err
= devm_request_irq(&pdev
->dev
, nvec
->irq
, nvec_interrupt
, 0,
807 dev_err(nvec
->dev
, "couldn't request irq\n");
808 destroy_workqueue(nvec
->wq
);
811 disable_irq(nvec
->irq
);
813 tegra_init_i2c_slave(nvec
);
815 clk_prepare_enable(i2c_clk
);
818 /* enable event reporting */
819 nvec_write_async(nvec
, EC_ENABLE_EVENT_REPORTING
,
820 sizeof(EC_ENABLE_EVENT_REPORTING
));
822 nvec
->nvec_status_notifier
.notifier_call
= nvec_status_notifier
;
823 nvec_register_notifier(nvec
, &nvec
->nvec_status_notifier
, 0);
825 nvec_power_handle
= nvec
;
826 pm_power_off
= nvec_power_off
;
828 /* Get Firmware Version */
829 msg
= nvec_write_sync(nvec
, EC_GET_FIRMWARE_VERSION
,
830 sizeof(EC_GET_FIRMWARE_VERSION
));
833 dev_warn(nvec
->dev
, "ec firmware version %02x.%02x.%02x / %02x\n",
834 msg
->data
[4], msg
->data
[5], msg
->data
[6], msg
->data
[7]);
836 nvec_msg_free(nvec
, msg
);
839 ret
= mfd_add_devices(nvec
->dev
, -1, nvec_devices
,
840 ARRAY_SIZE(nvec_devices
), base
, 0, NULL
);
842 dev_err(nvec
->dev
, "error adding subdevices\n");
844 /* unmute speakers? */
845 nvec_write_async(nvec
, "\x0d\x10\x59\x95", 4);
847 /* enable lid switch event */
848 nvec_write_async(nvec
, "\x01\x01\x01\x00\x00\x02\x00", 7);
850 /* enable power button event */
851 nvec_write_async(nvec
, "\x01\x01\x01\x00\x00\x80\x00", 7);
856 static int __devexit
tegra_nvec_remove(struct platform_device
*pdev
)
858 struct nvec_chip
*nvec
= platform_get_drvdata(pdev
);
860 nvec_write_async(nvec
, EC_DISABLE_EVENT_REPORTING
, 3);
861 mfd_remove_devices(nvec
->dev
);
862 destroy_workqueue(nvec
->wq
);
867 #ifdef CONFIG_PM_SLEEP
868 static int nvec_suspend(struct device
*dev
)
870 struct platform_device
*pdev
= to_platform_device(dev
);
871 struct nvec_chip
*nvec
= platform_get_drvdata(pdev
);
872 struct nvec_msg
*msg
;
874 dev_dbg(nvec
->dev
, "suspending\n");
876 /* keep these sync or you'll break suspend */
877 msg
= nvec_write_sync(nvec
, EC_DISABLE_EVENT_REPORTING
, 3);
878 nvec_msg_free(nvec
, msg
);
879 msg
= nvec_write_sync(nvec
, "\x04\x02", 2);
880 nvec_msg_free(nvec
, msg
);
882 nvec_disable_i2c_slave(nvec
);
887 static int nvec_resume(struct device
*dev
)
889 struct platform_device
*pdev
= to_platform_device(dev
);
890 struct nvec_chip
*nvec
= platform_get_drvdata(pdev
);
892 dev_dbg(nvec
->dev
, "resuming\n");
893 tegra_init_i2c_slave(nvec
);
894 nvec_write_async(nvec
, EC_ENABLE_EVENT_REPORTING
, 3);
900 static const SIMPLE_DEV_PM_OPS(nvec_pm_ops
, nvec_suspend
, nvec_resume
);
902 /* Match table for of_platform binding */
903 static const struct of_device_id nvidia_nvec_of_match
[] __devinitconst
= {
904 { .compatible
= "nvidia,nvec", },
907 MODULE_DEVICE_TABLE(of
, nvidia_nvec_of_match
);
909 static struct platform_driver nvec_device_driver
= {
910 .probe
= tegra_nvec_probe
,
911 .remove
= __devexit_p(tegra_nvec_remove
),
914 .owner
= THIS_MODULE
,
916 .of_match_table
= nvidia_nvec_of_match
,
920 module_platform_driver(nvec_device_driver
);
922 MODULE_ALIAS("platform:nvec");
923 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
924 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
925 MODULE_LICENSE("GPL");