xtensa: support DMA buffers in high memory
[cris-mirror.git] / drivers / staging / nvec / nvec.c
blob52054a528723f8d54b0ae88bfcd4d9389ed9565b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVEC: NVIDIA compliant embedded controller interface
5 * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
7 * Authors: Pierre-Hugues Husson <phhusson@free.fr>
8 * Ilya Petrov <ilya.muromec@gmail.com>
9 * Marc Dietrich <marvin24@gmx.de>
10 * Julian Andres Klode <jak@jak-linux.org>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/atomic.h>
16 #include <linux/clk.h>
17 #include <linux/completion.h>
18 #include <linux/delay.h>
19 #include <linux/err.h>
20 #include <linux/gpio.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/irq.h>
24 #include <linux/of.h>
25 #include <linux/of_gpio.h>
26 #include <linux/list.h>
27 #include <linux/mfd/core.h>
28 #include <linux/mutex.h>
29 #include <linux/notifier.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/workqueue.h>
34 #include "nvec.h"
36 #define I2C_CNFG 0x00
37 #define I2C_CNFG_PACKET_MODE_EN BIT(10)
38 #define I2C_CNFG_NEW_MASTER_SFM BIT(11)
39 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
41 #define I2C_SL_CNFG 0x20
42 #define I2C_SL_NEWSL BIT(2)
43 #define I2C_SL_NACK BIT(1)
44 #define I2C_SL_RESP BIT(0)
45 #define I2C_SL_IRQ BIT(3)
46 #define END_TRANS BIT(4)
47 #define RCVD BIT(2)
48 #define RNW BIT(1)
50 #define I2C_SL_RCVD 0x24
51 #define I2C_SL_STATUS 0x28
52 #define I2C_SL_ADDR1 0x2c
53 #define I2C_SL_ADDR2 0x30
54 #define I2C_SL_DELAY_COUNT 0x3c
56 /**
57 * enum nvec_msg_category - Message categories for nvec_msg_alloc()
58 * @NVEC_MSG_RX: The message is an incoming message (from EC)
59 * @NVEC_MSG_TX: The message is an outgoing message (to EC)
61 enum nvec_msg_category {
62 NVEC_MSG_RX,
63 NVEC_MSG_TX,
66 enum nvec_sleep_subcmds {
67 GLOBAL_EVENTS,
68 AP_PWR_DOWN,
69 AP_SUSPEND,
72 #define CNF_EVENT_REPORTING 0x01
73 #define GET_FIRMWARE_VERSION 0x15
74 #define LID_SWITCH BIT(1)
75 #define PWR_BUTTON BIT(15)
77 static struct nvec_chip *nvec_power_handle;
79 static const struct mfd_cell nvec_devices[] = {
81 .name = "nvec-kbd",
84 .name = "nvec-mouse",
87 .name = "nvec-power",
88 .id = 0,
91 .name = "nvec-power",
92 .id = 1,
95 .name = "nvec-paz00",
99 /**
100 * nvec_register_notifier - Register a notifier with nvec
101 * @nvec: A &struct nvec_chip
102 * @nb: The notifier block to register
104 * Registers a notifier with @nvec. The notifier will be added to an atomic
105 * notifier chain that is called for all received messages except those that
106 * correspond to a request initiated by nvec_write_sync().
108 int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
109 unsigned int events)
111 return atomic_notifier_chain_register(&nvec->notifier_list, nb);
113 EXPORT_SYMBOL_GPL(nvec_register_notifier);
116 * nvec_unregister_notifier - Unregister a notifier with nvec
117 * @nvec: A &struct nvec_chip
118 * @nb: The notifier block to unregister
120 * Unregisters a notifier with @nvec. The notifier will be removed from the
121 * atomic notifier chain.
123 int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
125 return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
127 EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
130 * nvec_status_notifier - The final notifier
132 * Prints a message about control events not handled in the notifier
133 * chain.
135 static int nvec_status_notifier(struct notifier_block *nb,
136 unsigned long event_type, void *data)
138 struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
139 nvec_status_notifier);
140 unsigned char *msg = data;
142 if (event_type != NVEC_CNTL)
143 return NOTIFY_DONE;
145 dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
146 print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
147 msg, msg[1] + 2, true);
149 return NOTIFY_OK;
153 * nvec_msg_alloc:
154 * @nvec: A &struct nvec_chip
155 * @category: Pool category, see &enum nvec_msg_category
157 * Allocate a single &struct nvec_msg object from the message pool of
158 * @nvec. The result shall be passed to nvec_msg_free() if no longer
159 * used.
161 * Outgoing messages are placed in the upper 75% of the pool, keeping the
162 * lower 25% available for RX buffers only. The reason is to prevent a
163 * situation where all buffers are full and a message is thus endlessly
164 * retried because the response could never be processed.
166 static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
167 enum nvec_msg_category category)
169 int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
171 for (; i < NVEC_POOL_SIZE; i++) {
172 if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
173 dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
174 return &nvec->msg_pool[i];
178 dev_err(nvec->dev, "could not allocate %s buffer\n",
179 (category == NVEC_MSG_TX) ? "TX" : "RX");
181 return NULL;
185 * nvec_msg_free:
186 * @nvec: A &struct nvec_chip
187 * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
189 * Free the given message
191 void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
193 if (msg != &nvec->tx_scratch)
194 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
195 atomic_set(&msg->used, 0);
197 EXPORT_SYMBOL_GPL(nvec_msg_free);
200 * nvec_msg_is_event - Return %true if @msg is an event
201 * @msg: A message
203 static bool nvec_msg_is_event(struct nvec_msg *msg)
205 return msg->data[0] >> 7;
209 * nvec_msg_size - Get the size of a message
210 * @msg: The message to get the size for
212 * This only works for received messages, not for outgoing messages.
214 static size_t nvec_msg_size(struct nvec_msg *msg)
216 bool is_event = nvec_msg_is_event(msg);
217 int event_length = (msg->data[0] & 0x60) >> 5;
219 /* for variable size, payload size in byte 1 + count (1) + cmd (1) */
220 if (!is_event || event_length == NVEC_VAR_SIZE)
221 return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
222 else if (event_length == NVEC_2BYTES)
223 return 2;
224 else if (event_length == NVEC_3BYTES)
225 return 3;
226 return 0;
230 * nvec_gpio_set_value - Set the GPIO value
231 * @nvec: A &struct nvec_chip
232 * @value: The value to write (0 or 1)
234 * Like gpio_set_value(), but generating debugging information
236 static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
238 dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
239 gpio_get_value(nvec->gpio), value);
240 gpio_set_value(nvec->gpio, value);
244 * nvec_write_async - Asynchronously write a message to NVEC
245 * @nvec: An nvec_chip instance
246 * @data: The message data, starting with the request type
247 * @size: The size of @data
249 * Queue a single message to be transferred to the embedded controller
250 * and return immediately.
252 * Returns: 0 on success, a negative error code on failure. If a failure
253 * occurred, the nvec driver may print an error.
255 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
256 short size)
258 struct nvec_msg *msg;
259 unsigned long flags;
261 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
263 if (!msg)
264 return -ENOMEM;
266 msg->data[0] = size;
267 memcpy(msg->data + 1, data, size);
268 msg->size = size + 1;
270 spin_lock_irqsave(&nvec->tx_lock, flags);
271 list_add_tail(&msg->node, &nvec->tx_data);
272 spin_unlock_irqrestore(&nvec->tx_lock, flags);
274 schedule_work(&nvec->tx_work);
276 return 0;
278 EXPORT_SYMBOL(nvec_write_async);
281 * nvec_write_sync - Write a message to nvec and read the response
282 * @nvec: An &struct nvec_chip
283 * @data: The data to write
284 * @size: The size of @data
285 * @msg: The response message received
287 * This is similar to nvec_write_async(), but waits for the
288 * request to be answered before returning. This function
289 * uses a mutex and can thus not be called from e.g.
290 * interrupt handlers.
292 * Returns: 0 on success, a negative error code on failure.
293 * The response message is returned in @msg. Shall be freed with
294 * with nvec_msg_free() once no longer used.
297 int nvec_write_sync(struct nvec_chip *nvec,
298 const unsigned char *data, short size,
299 struct nvec_msg **msg)
301 mutex_lock(&nvec->sync_write_mutex);
303 *msg = NULL;
304 nvec->sync_write_pending = (data[1] << 8) + data[0];
306 if (nvec_write_async(nvec, data, size) < 0) {
307 mutex_unlock(&nvec->sync_write_mutex);
308 return -ENOMEM;
311 dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
312 nvec->sync_write_pending);
313 if (!(wait_for_completion_timeout(&nvec->sync_write,
314 msecs_to_jiffies(2000)))) {
315 dev_warn(nvec->dev,
316 "timeout waiting for sync write to complete\n");
317 mutex_unlock(&nvec->sync_write_mutex);
318 return -ETIMEDOUT;
321 dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
323 *msg = nvec->last_sync_msg;
325 mutex_unlock(&nvec->sync_write_mutex);
327 return 0;
329 EXPORT_SYMBOL(nvec_write_sync);
332 * nvec_toggle_global_events - enables or disables global event reporting
333 * @nvec: nvec handle
334 * @state: true for enable, false for disable
336 * This switches on/off global event reports by the embedded controller.
338 static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
340 unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state };
342 nvec_write_async(nvec, global_events, 3);
346 * nvec_event_mask - fill the command string with event bitfield
347 * ev: points to event command string
348 * mask: bit to insert into the event mask
350 * Configure event command expects a 32 bit bitfield which describes
351 * which events to enable. The bitfield has the following structure
352 * (from highest byte to lowest):
353 * system state bits 7-0
354 * system state bits 15-8
355 * oem system state bits 7-0
356 * oem system state bits 15-8
358 static void nvec_event_mask(char *ev, u32 mask)
360 ev[3] = mask >> 16 & 0xff;
361 ev[4] = mask >> 24 & 0xff;
362 ev[5] = mask >> 0 & 0xff;
363 ev[6] = mask >> 8 & 0xff;
367 * nvec_request_master - Process outgoing messages
368 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
370 * Processes all outgoing requests by sending the request and awaiting the
371 * response, then continuing with the next request. Once a request has a
372 * matching response, it will be freed and removed from the list.
374 static void nvec_request_master(struct work_struct *work)
376 struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
377 unsigned long flags;
378 long err;
379 struct nvec_msg *msg;
381 spin_lock_irqsave(&nvec->tx_lock, flags);
382 while (!list_empty(&nvec->tx_data)) {
383 msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
384 spin_unlock_irqrestore(&nvec->tx_lock, flags);
385 nvec_gpio_set_value(nvec, 0);
386 err = wait_for_completion_interruptible_timeout(
387 &nvec->ec_transfer, msecs_to_jiffies(5000));
389 if (err == 0) {
390 dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
391 nvec_gpio_set_value(nvec, 1);
392 msg->pos = 0;
395 spin_lock_irqsave(&nvec->tx_lock, flags);
397 if (err > 0) {
398 list_del_init(&msg->node);
399 nvec_msg_free(nvec, msg);
402 spin_unlock_irqrestore(&nvec->tx_lock, flags);
406 * parse_msg - Print some information and call the notifiers on an RX message
407 * @nvec: A &struct nvec_chip
408 * @msg: A message received by @nvec
410 * Paarse some pieces of the message and then call the chain of notifiers
411 * registered via nvec_register_notifier.
413 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
415 if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
416 dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data);
417 return -EINVAL;
420 if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
421 print_hex_dump(KERN_WARNING, "ec system event ",
422 DUMP_PREFIX_NONE, 16, 1, msg->data,
423 msg->data[1] + 2, true);
425 atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
426 msg->data);
428 return 0;
432 * nvec_dispatch - Process messages received from the EC
433 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
435 * Process messages previously received from the EC and put into the RX
436 * queue of the &struct nvec_chip instance associated with @work.
438 static void nvec_dispatch(struct work_struct *work)
440 struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
441 unsigned long flags;
442 struct nvec_msg *msg;
444 spin_lock_irqsave(&nvec->rx_lock, flags);
445 while (!list_empty(&nvec->rx_data)) {
446 msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
447 list_del_init(&msg->node);
448 spin_unlock_irqrestore(&nvec->rx_lock, flags);
450 if (nvec->sync_write_pending ==
451 (msg->data[2] << 8) + msg->data[0]) {
452 dev_dbg(nvec->dev, "sync write completed!\n");
453 nvec->sync_write_pending = 0;
454 nvec->last_sync_msg = msg;
455 complete(&nvec->sync_write);
456 } else {
457 parse_msg(nvec, msg);
458 nvec_msg_free(nvec, msg);
460 spin_lock_irqsave(&nvec->rx_lock, flags);
462 spin_unlock_irqrestore(&nvec->rx_lock, flags);
466 * nvec_tx_completed - Complete the current transfer
467 * @nvec: A &struct nvec_chip
469 * This is called when we have received an END_TRANS on a TX transfer.
471 static void nvec_tx_completed(struct nvec_chip *nvec)
473 /* We got an END_TRANS, let's skip this, maybe there's an event */
474 if (nvec->tx->pos != nvec->tx->size) {
475 dev_err(nvec->dev, "premature END_TRANS, resending\n");
476 nvec->tx->pos = 0;
477 nvec_gpio_set_value(nvec, 0);
478 } else {
479 nvec->state = 0;
484 * nvec_rx_completed - Complete the current transfer
485 * @nvec: A &struct nvec_chip
487 * This is called when we have received an END_TRANS on a RX transfer.
489 static void nvec_rx_completed(struct nvec_chip *nvec)
491 if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
492 dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
493 (uint)nvec_msg_size(nvec->rx),
494 (uint)nvec->rx->pos);
496 nvec_msg_free(nvec, nvec->rx);
497 nvec->state = 0;
499 /* Battery quirk - Often incomplete, and likes to crash */
500 if (nvec->rx->data[0] == NVEC_BAT)
501 complete(&nvec->ec_transfer);
503 return;
506 spin_lock(&nvec->rx_lock);
509 * Add the received data to the work list and move the ring buffer
510 * pointer to the next entry.
512 list_add_tail(&nvec->rx->node, &nvec->rx_data);
514 spin_unlock(&nvec->rx_lock);
516 nvec->state = 0;
518 if (!nvec_msg_is_event(nvec->rx))
519 complete(&nvec->ec_transfer);
521 schedule_work(&nvec->rx_work);
525 * nvec_invalid_flags - Send an error message about invalid flags and jump
526 * @nvec: The nvec device
527 * @status: The status flags
528 * @reset: Whether we shall jump to state 0.
530 static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
531 bool reset)
533 dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
534 status, nvec->state);
535 if (reset)
536 nvec->state = 0;
540 * nvec_tx_set - Set the message to transfer (nvec->tx)
541 * @nvec: A &struct nvec_chip
543 * Gets the first entry from the tx_data list of @nvec and sets the
544 * tx member to it. If the tx_data list is empty, this uses the
545 * tx_scratch message to send a no operation message.
547 static void nvec_tx_set(struct nvec_chip *nvec)
549 spin_lock(&nvec->tx_lock);
550 if (list_empty(&nvec->tx_data)) {
551 dev_err(nvec->dev, "empty tx - sending no-op\n");
552 memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
553 nvec->tx_scratch.size = 3;
554 nvec->tx_scratch.pos = 0;
555 nvec->tx = &nvec->tx_scratch;
556 list_add_tail(&nvec->tx->node, &nvec->tx_data);
557 } else {
558 nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
559 node);
560 nvec->tx->pos = 0;
562 spin_unlock(&nvec->tx_lock);
564 dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
565 (uint)nvec->tx->size, nvec->tx->data[1]);
569 * nvec_interrupt - Interrupt handler
570 * @irq: The IRQ
571 * @dev: The nvec device
573 * Interrupt handler that fills our RX buffers and empties our TX
574 * buffers. This uses a finite state machine with ridiculous amounts
575 * of error checking, in order to be fairly reliable.
577 static irqreturn_t nvec_interrupt(int irq, void *dev)
579 unsigned long status;
580 unsigned int received = 0;
581 unsigned char to_send = 0xff;
582 const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
583 struct nvec_chip *nvec = dev;
584 unsigned int state = nvec->state;
586 status = readl(nvec->base + I2C_SL_STATUS);
588 /* Filter out some errors */
589 if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
590 dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
591 return IRQ_HANDLED;
593 if ((status & I2C_SL_IRQ) == 0) {
594 dev_err(nvec->dev, "Spurious IRQ\n");
595 return IRQ_HANDLED;
598 /* The EC did not request a read, so it send us something, read it */
599 if ((status & RNW) == 0) {
600 received = readl(nvec->base + I2C_SL_RCVD);
601 if (status & RCVD)
602 writel(0, nvec->base + I2C_SL_RCVD);
605 if (status == (I2C_SL_IRQ | RCVD))
606 nvec->state = 0;
608 switch (nvec->state) {
609 case 0: /* Verify that its a transfer start, the rest later */
610 if (status != (I2C_SL_IRQ | RCVD))
611 nvec_invalid_flags(nvec, status, false);
612 break;
613 case 1: /* command byte */
614 if (status != I2C_SL_IRQ) {
615 nvec_invalid_flags(nvec, status, true);
616 } else {
617 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
618 /* Should not happen in a normal world */
619 if (unlikely(!nvec->rx)) {
620 nvec->state = 0;
621 break;
623 nvec->rx->data[0] = received;
624 nvec->rx->pos = 1;
625 nvec->state = 2;
627 break;
628 case 2: /* first byte after command */
629 if (status == (I2C_SL_IRQ | RNW | RCVD)) {
630 udelay(33);
631 if (nvec->rx->data[0] != 0x01) {
632 dev_err(nvec->dev,
633 "Read without prior read command\n");
634 nvec->state = 0;
635 break;
637 nvec_msg_free(nvec, nvec->rx);
638 nvec->state = 3;
639 nvec_tx_set(nvec);
640 to_send = nvec->tx->data[0];
641 nvec->tx->pos = 1;
642 } else if (status == (I2C_SL_IRQ)) {
643 nvec->rx->data[1] = received;
644 nvec->rx->pos = 2;
645 nvec->state = 4;
646 } else {
647 nvec_invalid_flags(nvec, status, true);
649 break;
650 case 3: /* EC does a block read, we transmit data */
651 if (status & END_TRANS) {
652 nvec_tx_completed(nvec);
653 } else if ((status & RNW) == 0 || (status & RCVD)) {
654 nvec_invalid_flags(nvec, status, true);
655 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
656 to_send = nvec->tx->data[nvec->tx->pos++];
657 } else {
658 dev_err(nvec->dev,
659 "tx buffer underflow on %p (%u > %u)\n",
660 nvec->tx,
661 (uint)(nvec->tx ? nvec->tx->pos : 0),
662 (uint)(nvec->tx ? nvec->tx->size : 0));
663 nvec->state = 0;
665 break;
666 case 4: /* EC does some write, we read the data */
667 if ((status & (END_TRANS | RNW)) == END_TRANS)
668 nvec_rx_completed(nvec);
669 else if (status & (RNW | RCVD))
670 nvec_invalid_flags(nvec, status, true);
671 else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
672 nvec->rx->data[nvec->rx->pos++] = received;
673 else
674 dev_err(nvec->dev,
675 "RX buffer overflow on %p: Trying to write byte %u of %u\n",
676 nvec->rx, nvec->rx ? nvec->rx->pos : 0,
677 NVEC_MSG_SIZE);
678 break;
679 default:
680 nvec->state = 0;
683 /* If we are told that a new transfer starts, verify it */
684 if ((status & (RCVD | RNW)) == RCVD) {
685 if (received != nvec->i2c_addr)
686 dev_err(nvec->dev,
687 "received address 0x%02x, expected 0x%02x\n",
688 received, nvec->i2c_addr);
689 nvec->state = 1;
692 /* Send data if requested, but not on end of transmission */
693 if ((status & (RNW | END_TRANS)) == RNW)
694 writel(to_send, nvec->base + I2C_SL_RCVD);
696 /* If we have send the first byte */
697 if (status == (I2C_SL_IRQ | RNW | RCVD))
698 nvec_gpio_set_value(nvec, 1);
700 dev_dbg(nvec->dev,
701 "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
702 (status & RNW) == 0 ? "received" : "R=",
703 received,
704 (status & (RNW | END_TRANS)) ? "sent" : "S=",
705 to_send,
706 state,
707 status & END_TRANS ? " END_TRANS" : "",
708 status & RCVD ? " RCVD" : "",
709 status & RNW ? " RNW" : "");
712 * TODO: A correct fix needs to be found for this.
714 * We experience less incomplete messages with this delay than without
715 * it, but we don't know why. Help is appreciated.
717 udelay(100);
719 return IRQ_HANDLED;
722 static void tegra_init_i2c_slave(struct nvec_chip *nvec)
724 u32 val;
726 clk_prepare_enable(nvec->i2c_clk);
728 reset_control_assert(nvec->rst);
729 udelay(2);
730 reset_control_deassert(nvec->rst);
732 val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
733 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
734 writel(val, nvec->base + I2C_CNFG);
736 clk_set_rate(nvec->i2c_clk, 8 * 80000);
738 writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
739 writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
741 writel(nvec->i2c_addr >> 1, nvec->base + I2C_SL_ADDR1);
742 writel(0, nvec->base + I2C_SL_ADDR2);
744 enable_irq(nvec->irq);
747 #ifdef CONFIG_PM_SLEEP
748 static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
750 disable_irq(nvec->irq);
751 writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
752 clk_disable_unprepare(nvec->i2c_clk);
754 #endif
756 static void nvec_power_off(void)
758 char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN };
760 nvec_toggle_global_events(nvec_power_handle, false);
761 nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
765 * Parse common device tree data
767 static int nvec_i2c_parse_dt_pdata(struct nvec_chip *nvec)
769 nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
771 if (nvec->gpio < 0) {
772 dev_err(nvec->dev, "no gpio specified");
773 return -ENODEV;
776 if (of_property_read_u32(nvec->dev->of_node, "slave-addr",
777 &nvec->i2c_addr)) {
778 dev_err(nvec->dev, "no i2c address specified");
779 return -ENODEV;
782 return 0;
785 static int tegra_nvec_probe(struct platform_device *pdev)
787 int err, ret;
788 struct clk *i2c_clk;
789 struct nvec_chip *nvec;
790 struct nvec_msg *msg;
791 struct resource *res;
792 void __iomem *base;
793 char get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
794 unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
795 enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
797 if (!pdev->dev.of_node) {
798 dev_err(&pdev->dev, "must be instantiated using device tree\n");
799 return -ENODEV;
802 nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
803 if (!nvec)
804 return -ENOMEM;
806 platform_set_drvdata(pdev, nvec);
807 nvec->dev = &pdev->dev;
809 err = nvec_i2c_parse_dt_pdata(nvec);
810 if (err < 0)
811 return err;
813 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
814 base = devm_ioremap_resource(&pdev->dev, res);
815 if (IS_ERR(base))
816 return PTR_ERR(base);
818 nvec->irq = platform_get_irq(pdev, 0);
819 if (nvec->irq < 0) {
820 dev_err(&pdev->dev, "no irq resource?\n");
821 return -ENODEV;
824 i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
825 if (IS_ERR(i2c_clk)) {
826 dev_err(nvec->dev, "failed to get controller clock\n");
827 return -ENODEV;
830 nvec->rst = devm_reset_control_get_exclusive(&pdev->dev, "i2c");
831 if (IS_ERR(nvec->rst)) {
832 dev_err(nvec->dev, "failed to get controller reset\n");
833 return PTR_ERR(nvec->rst);
836 nvec->base = base;
837 nvec->i2c_clk = i2c_clk;
838 nvec->rx = &nvec->msg_pool[0];
840 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
842 init_completion(&nvec->sync_write);
843 init_completion(&nvec->ec_transfer);
844 mutex_init(&nvec->sync_write_mutex);
845 spin_lock_init(&nvec->tx_lock);
846 spin_lock_init(&nvec->rx_lock);
847 INIT_LIST_HEAD(&nvec->rx_data);
848 INIT_LIST_HEAD(&nvec->tx_data);
849 INIT_WORK(&nvec->rx_work, nvec_dispatch);
850 INIT_WORK(&nvec->tx_work, nvec_request_master);
852 err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
853 "nvec gpio");
854 if (err < 0) {
855 dev_err(nvec->dev, "couldn't request gpio\n");
856 return -ENODEV;
859 err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
860 "nvec", nvec);
861 if (err) {
862 dev_err(nvec->dev, "couldn't request irq\n");
863 return -ENODEV;
865 disable_irq(nvec->irq);
867 tegra_init_i2c_slave(nvec);
869 /* enable event reporting */
870 nvec_toggle_global_events(nvec, true);
872 nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
873 nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
875 nvec_power_handle = nvec;
876 pm_power_off = nvec_power_off;
878 /* Get Firmware Version */
879 err = nvec_write_sync(nvec, get_firmware_version, 2, &msg);
881 if (!err) {
882 dev_warn(nvec->dev,
883 "ec firmware version %02x.%02x.%02x / %02x\n",
884 msg->data[4], msg->data[5],
885 msg->data[6], msg->data[7]);
887 nvec_msg_free(nvec, msg);
890 ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
891 ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
892 if (ret)
893 dev_err(nvec->dev, "error adding subdevices\n");
895 /* unmute speakers? */
896 nvec_write_async(nvec, unmute_speakers, 4);
898 /* enable lid switch event */
899 nvec_event_mask(enable_event, LID_SWITCH);
900 nvec_write_async(nvec, enable_event, 7);
902 /* enable power button event */
903 nvec_event_mask(enable_event, PWR_BUTTON);
904 nvec_write_async(nvec, enable_event, 7);
906 return 0;
909 static int tegra_nvec_remove(struct platform_device *pdev)
911 struct nvec_chip *nvec = platform_get_drvdata(pdev);
913 nvec_toggle_global_events(nvec, false);
914 mfd_remove_devices(nvec->dev);
915 nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
916 cancel_work_sync(&nvec->rx_work);
917 cancel_work_sync(&nvec->tx_work);
918 /* FIXME: needs check whether nvec is responsible for power off */
919 pm_power_off = NULL;
921 return 0;
924 #ifdef CONFIG_PM_SLEEP
925 static int nvec_suspend(struct device *dev)
927 int err;
928 struct platform_device *pdev = to_platform_device(dev);
929 struct nvec_chip *nvec = platform_get_drvdata(pdev);
930 struct nvec_msg *msg;
931 char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
933 dev_dbg(nvec->dev, "suspending\n");
935 /* keep these sync or you'll break suspend */
936 nvec_toggle_global_events(nvec, false);
938 err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg);
939 if (!err)
940 nvec_msg_free(nvec, msg);
942 nvec_disable_i2c_slave(nvec);
944 return 0;
947 static int nvec_resume(struct device *dev)
949 struct platform_device *pdev = to_platform_device(dev);
950 struct nvec_chip *nvec = platform_get_drvdata(pdev);
952 dev_dbg(nvec->dev, "resuming\n");
953 tegra_init_i2c_slave(nvec);
954 nvec_toggle_global_events(nvec, true);
956 return 0;
958 #endif
960 static SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume);
962 /* Match table for of_platform binding */
963 static const struct of_device_id nvidia_nvec_of_match[] = {
964 { .compatible = "nvidia,nvec", },
967 MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
969 static struct platform_driver nvec_device_driver = {
970 .probe = tegra_nvec_probe,
971 .remove = tegra_nvec_remove,
972 .driver = {
973 .name = "nvec",
974 .pm = &nvec_pm_ops,
975 .of_match_table = nvidia_nvec_of_match,
979 module_platform_driver(nvec_device_driver);
981 MODULE_ALIAS("platform:nvec");
982 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
983 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
984 MODULE_LICENSE("GPL");