3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/pci.h>
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
26 #include "hw-me-regs.h"
28 #include "mei-trace.h"
31 * mei_me_reg_read - Reads 32bit data from the mei device
33 * @hw: the me hardware structure
34 * @offset: offset from which to read the data
36 * Return: register value (u32)
38 static inline u32
mei_me_reg_read(const struct mei_me_hw
*hw
,
41 return ioread32(hw
->mem_addr
+ offset
);
46 * mei_me_reg_write - Writes 32bit data to the mei device
48 * @hw: the me hardware structure
49 * @offset: offset from which to write the data
50 * @value: register value to write (u32)
52 static inline void mei_me_reg_write(const struct mei_me_hw
*hw
,
53 unsigned long offset
, u32 value
)
55 iowrite32(value
, hw
->mem_addr
+ offset
);
59 * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
60 * read window register
62 * @dev: the device structure
64 * Return: ME_CB_RW register value (u32)
66 static inline u32
mei_me_mecbrw_read(const struct mei_device
*dev
)
68 return mei_me_reg_read(to_me_hw(dev
), ME_CB_RW
);
72 * mei_me_hcbww_write - write 32bit data to the host circular buffer
74 * @dev: the device structure
75 * @data: 32bit data to be written to the host circular buffer
77 static inline void mei_me_hcbww_write(struct mei_device
*dev
, u32 data
)
79 mei_me_reg_write(to_me_hw(dev
), H_CB_WW
, data
);
83 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
85 * @dev: the device structure
87 * Return: ME_CSR_HA register value (u32)
89 static inline u32
mei_me_mecsr_read(const struct mei_device
*dev
)
93 reg
= mei_me_reg_read(to_me_hw(dev
), ME_CSR_HA
);
94 trace_mei_reg_read(dev
->dev
, "ME_CSR_HA", ME_CSR_HA
, reg
);
100 * mei_hcsr_read - Reads 32bit data from the host CSR
102 * @dev: the device structure
104 * Return: H_CSR register value (u32)
106 static inline u32
mei_hcsr_read(const struct mei_device
*dev
)
110 reg
= mei_me_reg_read(to_me_hw(dev
), H_CSR
);
111 trace_mei_reg_read(dev
->dev
, "H_CSR", H_CSR
, reg
);
117 * mei_hcsr_write - writes H_CSR register to the mei device
119 * @dev: the device structure
120 * @reg: new register value
122 static inline void mei_hcsr_write(struct mei_device
*dev
, u32 reg
)
124 trace_mei_reg_write(dev
->dev
, "H_CSR", H_CSR
, reg
);
125 mei_me_reg_write(to_me_hw(dev
), H_CSR
, reg
);
129 * mei_hcsr_set - writes H_CSR register to the mei device,
130 * and ignores the H_IS bit for it is write-one-to-zero.
132 * @dev: the device structure
133 * @reg: new register value
135 static inline void mei_hcsr_set(struct mei_device
*dev
, u32 reg
)
138 mei_hcsr_write(dev
, reg
);
142 * mei_me_fw_status - read fw status register from pci config space
145 * @fw_status: fw status register values
147 * Return: 0 on success, error otherwise
149 static int mei_me_fw_status(struct mei_device
*dev
,
150 struct mei_fw_status
*fw_status
)
152 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
153 struct mei_me_hw
*hw
= to_me_hw(dev
);
154 const struct mei_fw_status
*fw_src
= &hw
->cfg
->fw_status
;
161 fw_status
->count
= fw_src
->count
;
162 for (i
= 0; i
< fw_src
->count
&& i
< MEI_FW_STATUS_MAX
; i
++) {
163 ret
= pci_read_config_dword(pdev
,
164 fw_src
->status
[i
], &fw_status
->status
[i
]);
173 * mei_me_hw_config - configure hw dependent settings
177 static void mei_me_hw_config(struct mei_device
*dev
)
179 struct mei_me_hw
*hw
= to_me_hw(dev
);
180 u32 hcsr
= mei_hcsr_read(dev
);
181 /* Doesn't change in runtime */
182 dev
->hbuf_depth
= (hcsr
& H_CBD
) >> 24;
184 hw
->pg_state
= MEI_PG_OFF
;
188 * mei_me_pg_state - translate internal pg state
189 * to the mei power gating state
193 * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
195 static inline enum mei_pg_state
mei_me_pg_state(struct mei_device
*dev
)
197 struct mei_me_hw
*hw
= to_me_hw(dev
);
203 * mei_me_intr_clear - clear and stop interrupts
205 * @dev: the device structure
207 static void mei_me_intr_clear(struct mei_device
*dev
)
209 u32 hcsr
= mei_hcsr_read(dev
);
211 if ((hcsr
& H_IS
) == H_IS
)
212 mei_hcsr_write(dev
, hcsr
);
215 * mei_me_intr_enable - enables mei device interrupts
217 * @dev: the device structure
219 static void mei_me_intr_enable(struct mei_device
*dev
)
221 u32 hcsr
= mei_hcsr_read(dev
);
224 mei_hcsr_set(dev
, hcsr
);
228 * mei_me_intr_disable - disables mei device interrupts
230 * @dev: the device structure
232 static void mei_me_intr_disable(struct mei_device
*dev
)
234 u32 hcsr
= mei_hcsr_read(dev
);
237 mei_hcsr_set(dev
, hcsr
);
241 * mei_me_hw_reset_release - release device from the reset
243 * @dev: the device structure
245 static void mei_me_hw_reset_release(struct mei_device
*dev
)
247 u32 hcsr
= mei_hcsr_read(dev
);
251 mei_hcsr_set(dev
, hcsr
);
253 /* complete this write before we set host ready on another CPU */
257 * mei_me_hw_reset - resets fw via mei csr register.
259 * @dev: the device structure
260 * @intr_enable: if interrupt should be enabled after reset.
264 static int mei_me_hw_reset(struct mei_device
*dev
, bool intr_enable
)
266 u32 hcsr
= mei_hcsr_read(dev
);
268 /* H_RST may be found lit before reset is started,
269 * for example if preceding reset flow hasn't completed.
270 * In that case asserting H_RST will be ignored, therefore
271 * we need to clean H_RST bit to start a successful reset sequence.
273 if ((hcsr
& H_RST
) == H_RST
) {
274 dev_warn(dev
->dev
, "H_RST is set = 0x%08X", hcsr
);
276 mei_hcsr_set(dev
, hcsr
);
277 hcsr
= mei_hcsr_read(dev
);
280 hcsr
|= H_RST
| H_IG
| H_IS
;
287 dev
->recvd_hw_ready
= false;
288 mei_hcsr_write(dev
, hcsr
);
291 * Host reads the H_CSR once to ensure that the
292 * posted write to H_CSR completes.
294 hcsr
= mei_hcsr_read(dev
);
296 if ((hcsr
& H_RST
) == 0)
297 dev_warn(dev
->dev
, "H_RST is not set = 0x%08X", hcsr
);
299 if ((hcsr
& H_RDY
) == H_RDY
)
300 dev_warn(dev
->dev
, "H_RDY is not cleared 0x%08X", hcsr
);
302 if (intr_enable
== false)
303 mei_me_hw_reset_release(dev
);
309 * mei_me_host_set_ready - enable device
313 static void mei_me_host_set_ready(struct mei_device
*dev
)
315 u32 hcsr
= mei_hcsr_read(dev
);
317 hcsr
|= H_IE
| H_IG
| H_RDY
;
318 mei_hcsr_set(dev
, hcsr
);
322 * mei_me_host_is_ready - check whether the host has turned ready
327 static bool mei_me_host_is_ready(struct mei_device
*dev
)
329 u32 hcsr
= mei_hcsr_read(dev
);
331 return (hcsr
& H_RDY
) == H_RDY
;
335 * mei_me_hw_is_ready - check whether the me(hw) has turned ready
340 static bool mei_me_hw_is_ready(struct mei_device
*dev
)
342 u32 mecsr
= mei_me_mecsr_read(dev
);
344 return (mecsr
& ME_RDY_HRA
) == ME_RDY_HRA
;
348 * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
349 * or timeout is reached
352 * Return: 0 on success, error otherwise
354 static int mei_me_hw_ready_wait(struct mei_device
*dev
)
356 mutex_unlock(&dev
->device_lock
);
357 wait_event_timeout(dev
->wait_hw_ready
,
359 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT
));
360 mutex_lock(&dev
->device_lock
);
361 if (!dev
->recvd_hw_ready
) {
362 dev_err(dev
->dev
, "wait hw ready failed\n");
366 mei_me_hw_reset_release(dev
);
367 dev
->recvd_hw_ready
= false;
372 * mei_me_hw_start - hw start routine
375 * Return: 0 on success, error otherwise
377 static int mei_me_hw_start(struct mei_device
*dev
)
379 int ret
= mei_me_hw_ready_wait(dev
);
383 dev_dbg(dev
->dev
, "hw is ready\n");
385 mei_me_host_set_ready(dev
);
391 * mei_hbuf_filled_slots - gets number of device filled buffer slots
393 * @dev: the device structure
395 * Return: number of filled slots
397 static unsigned char mei_hbuf_filled_slots(struct mei_device
*dev
)
400 char read_ptr
, write_ptr
;
402 hcsr
= mei_hcsr_read(dev
);
404 read_ptr
= (char) ((hcsr
& H_CBRP
) >> 8);
405 write_ptr
= (char) ((hcsr
& H_CBWP
) >> 16);
407 return (unsigned char) (write_ptr
- read_ptr
);
411 * mei_me_hbuf_is_empty - checks if host buffer is empty.
413 * @dev: the device structure
415 * Return: true if empty, false - otherwise.
417 static bool mei_me_hbuf_is_empty(struct mei_device
*dev
)
419 return mei_hbuf_filled_slots(dev
) == 0;
423 * mei_me_hbuf_empty_slots - counts write empty slots.
425 * @dev: the device structure
427 * Return: -EOVERFLOW if overflow, otherwise empty slots count
429 static int mei_me_hbuf_empty_slots(struct mei_device
*dev
)
431 unsigned char filled_slots
, empty_slots
;
433 filled_slots
= mei_hbuf_filled_slots(dev
);
434 empty_slots
= dev
->hbuf_depth
- filled_slots
;
436 /* check for overflow */
437 if (filled_slots
> dev
->hbuf_depth
)
444 * mei_me_hbuf_max_len - returns size of hw buffer.
446 * @dev: the device structure
448 * Return: size of hw buffer in bytes
450 static size_t mei_me_hbuf_max_len(const struct mei_device
*dev
)
452 return dev
->hbuf_depth
* sizeof(u32
) - sizeof(struct mei_msg_hdr
);
457 * mei_me_write_message - writes a message to mei device.
459 * @dev: the device structure
460 * @header: mei HECI header of message
461 * @buf: message payload will be written
463 * Return: -EIO if write has failed
465 static int mei_me_write_message(struct mei_device
*dev
,
466 struct mei_msg_hdr
*header
,
470 unsigned long length
= header
->length
;
471 u32
*reg_buf
= (u32
*)buf
;
477 dev_dbg(dev
->dev
, MEI_HDR_FMT
, MEI_HDR_PRM(header
));
479 empty_slots
= mei_hbuf_empty_slots(dev
);
480 dev_dbg(dev
->dev
, "empty slots = %hu.\n", empty_slots
);
482 dw_cnt
= mei_data2slots(length
);
483 if (empty_slots
< 0 || dw_cnt
> empty_slots
)
486 mei_me_hcbww_write(dev
, *((u32
*) header
));
488 for (i
= 0; i
< length
/ 4; i
++)
489 mei_me_hcbww_write(dev
, reg_buf
[i
]);
495 memcpy(®
, &buf
[length
- rem
], rem
);
496 mei_me_hcbww_write(dev
, reg
);
499 hcsr
= mei_hcsr_read(dev
) | H_IG
;
500 mei_hcsr_set(dev
, hcsr
);
501 if (!mei_me_hw_is_ready(dev
))
508 * mei_me_count_full_read_slots - counts read full slots.
510 * @dev: the device structure
512 * Return: -EOVERFLOW if overflow, otherwise filled slots count
514 static int mei_me_count_full_read_slots(struct mei_device
*dev
)
517 char read_ptr
, write_ptr
;
518 unsigned char buffer_depth
, filled_slots
;
520 me_csr
= mei_me_mecsr_read(dev
);
521 buffer_depth
= (unsigned char)((me_csr
& ME_CBD_HRA
) >> 24);
522 read_ptr
= (char) ((me_csr
& ME_CBRP_HRA
) >> 8);
523 write_ptr
= (char) ((me_csr
& ME_CBWP_HRA
) >> 16);
524 filled_slots
= (unsigned char) (write_ptr
- read_ptr
);
526 /* check for overflow */
527 if (filled_slots
> buffer_depth
)
530 dev_dbg(dev
->dev
, "filled_slots =%08x\n", filled_slots
);
531 return (int)filled_slots
;
535 * mei_me_read_slots - reads a message from mei device.
537 * @dev: the device structure
538 * @buffer: message buffer will be written
539 * @buffer_length: message size will be read
543 static int mei_me_read_slots(struct mei_device
*dev
, unsigned char *buffer
,
544 unsigned long buffer_length
)
546 u32
*reg_buf
= (u32
*)buffer
;
549 for (; buffer_length
>= sizeof(u32
); buffer_length
-= sizeof(u32
))
550 *reg_buf
++ = mei_me_mecbrw_read(dev
);
552 if (buffer_length
> 0) {
553 u32 reg
= mei_me_mecbrw_read(dev
);
555 memcpy(reg_buf
, ®
, buffer_length
);
558 hcsr
= mei_hcsr_read(dev
) | H_IG
;
559 mei_hcsr_set(dev
, hcsr
);
564 * mei_me_pg_set - write pg enter register
566 * @dev: the device structure
568 static void mei_me_pg_set(struct mei_device
*dev
)
570 struct mei_me_hw
*hw
= to_me_hw(dev
);
573 reg
= mei_me_reg_read(hw
, H_HPG_CSR
);
574 trace_mei_reg_read(dev
->dev
, "H_HPG_CSR", H_HPG_CSR
, reg
);
576 reg
|= H_HPG_CSR_PGI
;
578 trace_mei_reg_write(dev
->dev
, "H_HPG_CSR", H_HPG_CSR
, reg
);
579 mei_me_reg_write(hw
, H_HPG_CSR
, reg
);
583 * mei_me_pg_unset - write pg exit register
585 * @dev: the device structure
587 static void mei_me_pg_unset(struct mei_device
*dev
)
589 struct mei_me_hw
*hw
= to_me_hw(dev
);
592 reg
= mei_me_reg_read(hw
, H_HPG_CSR
);
593 trace_mei_reg_read(dev
->dev
, "H_HPG_CSR", H_HPG_CSR
, reg
);
595 WARN(!(reg
& H_HPG_CSR_PGI
), "PGI is not set\n");
597 reg
|= H_HPG_CSR_PGIHEXR
;
599 trace_mei_reg_write(dev
->dev
, "H_HPG_CSR", H_HPG_CSR
, reg
);
600 mei_me_reg_write(hw
, H_HPG_CSR
, reg
);
604 * mei_me_pg_enter_sync - perform pg entry procedure
606 * @dev: the device structure
608 * Return: 0 on success an error code otherwise
610 int mei_me_pg_enter_sync(struct mei_device
*dev
)
612 struct mei_me_hw
*hw
= to_me_hw(dev
);
613 unsigned long timeout
= mei_secs_to_jiffies(MEI_PGI_TIMEOUT
);
616 dev
->pg_event
= MEI_PG_EVENT_WAIT
;
618 ret
= mei_hbm_pg(dev
, MEI_PG_ISOLATION_ENTRY_REQ_CMD
);
622 mutex_unlock(&dev
->device_lock
);
623 wait_event_timeout(dev
->wait_pg
,
624 dev
->pg_event
== MEI_PG_EVENT_RECEIVED
, timeout
);
625 mutex_lock(&dev
->device_lock
);
627 if (dev
->pg_event
== MEI_PG_EVENT_RECEIVED
) {
634 dev
->pg_event
= MEI_PG_EVENT_IDLE
;
635 hw
->pg_state
= MEI_PG_ON
;
641 * mei_me_pg_exit_sync - perform pg exit procedure
643 * @dev: the device structure
645 * Return: 0 on success an error code otherwise
647 int mei_me_pg_exit_sync(struct mei_device
*dev
)
649 struct mei_me_hw
*hw
= to_me_hw(dev
);
650 unsigned long timeout
= mei_secs_to_jiffies(MEI_PGI_TIMEOUT
);
653 if (dev
->pg_event
== MEI_PG_EVENT_RECEIVED
)
656 dev
->pg_event
= MEI_PG_EVENT_WAIT
;
658 mei_me_pg_unset(dev
);
660 mutex_unlock(&dev
->device_lock
);
661 wait_event_timeout(dev
->wait_pg
,
662 dev
->pg_event
== MEI_PG_EVENT_RECEIVED
, timeout
);
663 mutex_lock(&dev
->device_lock
);
666 if (dev
->pg_event
!= MEI_PG_EVENT_RECEIVED
) {
671 dev
->pg_event
= MEI_PG_EVENT_INTR_WAIT
;
672 ret
= mei_hbm_pg(dev
, MEI_PG_ISOLATION_EXIT_RES_CMD
);
676 mutex_unlock(&dev
->device_lock
);
677 wait_event_timeout(dev
->wait_pg
,
678 dev
->pg_event
== MEI_PG_EVENT_INTR_RECEIVED
, timeout
);
679 mutex_lock(&dev
->device_lock
);
681 if (dev
->pg_event
== MEI_PG_EVENT_INTR_RECEIVED
)
687 dev
->pg_event
= MEI_PG_EVENT_IDLE
;
688 hw
->pg_state
= MEI_PG_OFF
;
694 * mei_me_pg_in_transition - is device now in pg transition
696 * @dev: the device structure
698 * Return: true if in pg transition, false otherwise
700 static bool mei_me_pg_in_transition(struct mei_device
*dev
)
702 return dev
->pg_event
>= MEI_PG_EVENT_WAIT
&&
703 dev
->pg_event
<= MEI_PG_EVENT_INTR_WAIT
;
707 * mei_me_pg_is_enabled - detect if PG is supported by HW
709 * @dev: the device structure
711 * Return: true is pg supported, false otherwise
713 static bool mei_me_pg_is_enabled(struct mei_device
*dev
)
715 u32 reg
= mei_me_mecsr_read(dev
);
717 if ((reg
& ME_PGIC_HRA
) == 0)
720 if (!dev
->hbm_f_pg_supported
)
726 dev_dbg(dev
->dev
, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
727 !!(reg
& ME_PGIC_HRA
),
728 dev
->version
.major_version
,
729 dev
->version
.minor_version
,
730 HBM_MAJOR_VERSION_PGI
,
731 HBM_MINOR_VERSION_PGI
);
737 * mei_me_pg_intr - perform pg processing in interrupt thread handler
739 * @dev: the device structure
741 static void mei_me_pg_intr(struct mei_device
*dev
)
743 struct mei_me_hw
*hw
= to_me_hw(dev
);
745 if (dev
->pg_event
!= MEI_PG_EVENT_INTR_WAIT
)
748 dev
->pg_event
= MEI_PG_EVENT_INTR_RECEIVED
;
749 hw
->pg_state
= MEI_PG_OFF
;
750 if (waitqueue_active(&dev
->wait_pg
))
751 wake_up(&dev
->wait_pg
);
755 * mei_me_irq_quick_handler - The ISR of the MEI device
757 * @irq: The irq number
758 * @dev_id: pointer to the device structure
760 * Return: irqreturn_t
763 irqreturn_t
mei_me_irq_quick_handler(int irq
, void *dev_id
)
765 struct mei_device
*dev
= (struct mei_device
*) dev_id
;
766 u32 hcsr
= mei_hcsr_read(dev
);
768 if ((hcsr
& H_IS
) != H_IS
)
771 /* clear H_IS bit in H_CSR */
772 mei_hcsr_write(dev
, hcsr
);
774 return IRQ_WAKE_THREAD
;
778 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
781 * @irq: The irq number
782 * @dev_id: pointer to the device structure
784 * Return: irqreturn_t
787 irqreturn_t
mei_me_irq_thread_handler(int irq
, void *dev_id
)
789 struct mei_device
*dev
= (struct mei_device
*) dev_id
;
790 struct mei_cl_cb complete_list
;
794 dev_dbg(dev
->dev
, "function called after ISR to handle the interrupt processing.\n");
795 /* initialize our complete list */
796 mutex_lock(&dev
->device_lock
);
797 mei_io_list_init(&complete_list
);
799 /* Ack the interrupt here
800 * In case of MSI we don't go through the quick handler */
801 if (pci_dev_msi_enabled(to_pci_dev(dev
->dev
)))
802 mei_clear_interrupts(dev
);
804 /* check if ME wants a reset */
805 if (!mei_hw_is_ready(dev
) && dev
->dev_state
!= MEI_DEV_RESETTING
) {
806 dev_warn(dev
->dev
, "FW not ready: resetting.\n");
807 schedule_work(&dev
->reset_work
);
813 /* check if we need to start the dev */
814 if (!mei_host_is_ready(dev
)) {
815 if (mei_hw_is_ready(dev
)) {
816 dev_dbg(dev
->dev
, "we need to start the dev.\n");
817 dev
->recvd_hw_ready
= true;
818 wake_up(&dev
->wait_hw_ready
);
820 dev_dbg(dev
->dev
, "Spurious Interrupt\n");
824 /* check slots available for reading */
825 slots
= mei_count_full_read_slots(dev
);
827 dev_dbg(dev
->dev
, "slots to read = %08x\n", slots
);
828 rets
= mei_irq_read_handler(dev
, &complete_list
, &slots
);
829 /* There is a race between ME write and interrupt delivery:
830 * Not all data is always available immediately after the
831 * interrupt, so try to read again on the next interrupt.
833 if (rets
== -ENODATA
)
836 if (rets
&& dev
->dev_state
!= MEI_DEV_RESETTING
) {
837 dev_err(dev
->dev
, "mei_irq_read_handler ret = %d.\n",
839 schedule_work(&dev
->reset_work
);
844 dev
->hbuf_is_ready
= mei_hbuf_is_ready(dev
);
847 * During PG handshake only allowed write is the replay to the
848 * PG exit message, so block calling write function
849 * if the pg event is in PG handshake
851 if (dev
->pg_event
!= MEI_PG_EVENT_WAIT
&&
852 dev
->pg_event
!= MEI_PG_EVENT_RECEIVED
) {
853 rets
= mei_irq_write_handler(dev
, &complete_list
);
854 dev
->hbuf_is_ready
= mei_hbuf_is_ready(dev
);
857 mei_irq_compl_handler(dev
, &complete_list
);
860 dev_dbg(dev
->dev
, "interrupt thread end ret = %d\n", rets
);
861 mutex_unlock(&dev
->device_lock
);
865 static const struct mei_hw_ops mei_me_hw_ops
= {
867 .fw_status
= mei_me_fw_status
,
868 .pg_state
= mei_me_pg_state
,
870 .host_is_ready
= mei_me_host_is_ready
,
872 .hw_is_ready
= mei_me_hw_is_ready
,
873 .hw_reset
= mei_me_hw_reset
,
874 .hw_config
= mei_me_hw_config
,
875 .hw_start
= mei_me_hw_start
,
877 .pg_in_transition
= mei_me_pg_in_transition
,
878 .pg_is_enabled
= mei_me_pg_is_enabled
,
880 .intr_clear
= mei_me_intr_clear
,
881 .intr_enable
= mei_me_intr_enable
,
882 .intr_disable
= mei_me_intr_disable
,
884 .hbuf_free_slots
= mei_me_hbuf_empty_slots
,
885 .hbuf_is_ready
= mei_me_hbuf_is_empty
,
886 .hbuf_max_len
= mei_me_hbuf_max_len
,
888 .write
= mei_me_write_message
,
890 .rdbuf_full_slots
= mei_me_count_full_read_slots
,
891 .read_hdr
= mei_me_mecbrw_read
,
892 .read
= mei_me_read_slots
895 static bool mei_me_fw_type_nm(struct pci_dev
*pdev
)
899 pci_read_config_dword(pdev
, PCI_CFG_HFS_2
, ®
);
900 /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
901 return (reg
& 0x600) == 0x200;
904 #define MEI_CFG_FW_NM \
905 .quirk_probe = mei_me_fw_type_nm
907 static bool mei_me_fw_type_sps(struct pci_dev
*pdev
)
910 /* Read ME FW Status check for SPS Firmware */
911 pci_read_config_dword(pdev
, PCI_CFG_HFS_1
, ®
);
912 /* if bits [19:16] = 15, running SPS Firmware */
913 return (reg
& 0xf0000) == 0xf0000;
916 #define MEI_CFG_FW_SPS \
917 .quirk_probe = mei_me_fw_type_sps
920 #define MEI_CFG_LEGACY_HFS \
923 #define MEI_CFG_ICH_HFS \
924 .fw_status.count = 1, \
925 .fw_status.status[0] = PCI_CFG_HFS_1
927 #define MEI_CFG_PCH_HFS \
928 .fw_status.count = 2, \
929 .fw_status.status[0] = PCI_CFG_HFS_1, \
930 .fw_status.status[1] = PCI_CFG_HFS_2
932 #define MEI_CFG_PCH8_HFS \
933 .fw_status.count = 6, \
934 .fw_status.status[0] = PCI_CFG_HFS_1, \
935 .fw_status.status[1] = PCI_CFG_HFS_2, \
936 .fw_status.status[2] = PCI_CFG_HFS_3, \
937 .fw_status.status[3] = PCI_CFG_HFS_4, \
938 .fw_status.status[4] = PCI_CFG_HFS_5, \
939 .fw_status.status[5] = PCI_CFG_HFS_6
941 /* ICH Legacy devices */
942 const struct mei_cfg mei_me_legacy_cfg
= {
947 const struct mei_cfg mei_me_ich_cfg
= {
952 const struct mei_cfg mei_me_pch_cfg
= {
957 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
958 const struct mei_cfg mei_me_pch_cpt_pbg_cfg
= {
963 /* PCH8 Lynx Point and newer devices */
964 const struct mei_cfg mei_me_pch8_cfg
= {
968 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
969 const struct mei_cfg mei_me_pch8_sps_cfg
= {
975 * mei_me_dev_init - allocates and initializes the mei device structure
977 * @pdev: The pci device structure
978 * @cfg: per device generation config
980 * Return: The mei_device_device pointer on success, NULL on failure.
982 struct mei_device
*mei_me_dev_init(struct pci_dev
*pdev
,
983 const struct mei_cfg
*cfg
)
985 struct mei_device
*dev
;
986 struct mei_me_hw
*hw
;
988 dev
= kzalloc(sizeof(struct mei_device
) +
989 sizeof(struct mei_me_hw
), GFP_KERNEL
);
994 mei_device_init(dev
, &pdev
->dev
, &mei_me_hw_ops
);