1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (c) 2012-2017 ASPEED Technology Inc.
3 // Copyright (c) 2018-2021 Intel Corporation
5 #include <linux/unaligned.h>
7 #include <linux/bitfield.h>
9 #include <linux/clkdev.h>
10 #include <linux/clk-provider.h>
11 #include <linux/delay.h>
12 #include <linux/interrupt.h>
14 #include <linux/iopoll.h>
15 #include <linux/jiffies.h>
16 #include <linux/math.h>
17 #include <linux/module.h>
19 #include <linux/peci.h>
20 #include <linux/platform_device.h>
21 #include <linux/reset.h>
23 /* ASPEED PECI Registers */
24 /* Control Register */
25 #define ASPEED_PECI_CTRL 0x00
26 #define ASPEED_PECI_CTRL_SAMPLING_MASK GENMASK(19, 16)
27 #define ASPEED_PECI_CTRL_RD_MODE_MASK GENMASK(13, 12)
28 #define ASPEED_PECI_CTRL_RD_MODE_DBG BIT(13)
29 #define ASPEED_PECI_CTRL_RD_MODE_COUNT BIT(12)
30 #define ASPEED_PECI_CTRL_CLK_SRC_HCLK BIT(11)
31 #define ASPEED_PECI_CTRL_CLK_DIV_MASK GENMASK(10, 8)
32 #define ASPEED_PECI_CTRL_INVERT_OUT BIT(7)
33 #define ASPEED_PECI_CTRL_INVERT_IN BIT(6)
34 #define ASPEED_PECI_CTRL_BUS_CONTENTION_EN BIT(5)
35 #define ASPEED_PECI_CTRL_PECI_EN BIT(4)
36 #define ASPEED_PECI_CTRL_PECI_CLK_EN BIT(0)
38 /* Timing Negotiation Register */
39 #define ASPEED_PECI_TIMING_NEGOTIATION 0x04
40 #define ASPEED_PECI_T_NEGO_MSG_MASK GENMASK(15, 8)
41 #define ASPEED_PECI_T_NEGO_ADDR_MASK GENMASK(7, 0)
43 /* Command Register */
44 #define ASPEED_PECI_CMD 0x08
45 #define ASPEED_PECI_CMD_PIN_MONITORING BIT(31)
46 #define ASPEED_PECI_CMD_STS_MASK GENMASK(27, 24)
47 #define ASPEED_PECI_CMD_STS_ADDR_T_NEGO 0x3
48 #define ASPEED_PECI_CMD_IDLE_MASK \
49 (ASPEED_PECI_CMD_STS_MASK | ASPEED_PECI_CMD_PIN_MONITORING)
50 #define ASPEED_PECI_CMD_FIRE BIT(0)
52 /* Read/Write Length Register */
53 #define ASPEED_PECI_RW_LENGTH 0x0c
54 #define ASPEED_PECI_AW_FCS_EN BIT(31)
55 #define ASPEED_PECI_RD_LEN_MASK GENMASK(23, 16)
56 #define ASPEED_PECI_WR_LEN_MASK GENMASK(15, 8)
57 #define ASPEED_PECI_TARGET_ADDR_MASK GENMASK(7, 0)
59 /* Expected FCS Data Register */
60 #define ASPEED_PECI_EXPECTED_FCS 0x10
61 #define ASPEED_PECI_EXPECTED_RD_FCS_MASK GENMASK(23, 16)
62 #define ASPEED_PECI_EXPECTED_AW_FCS_AUTO_MASK GENMASK(15, 8)
63 #define ASPEED_PECI_EXPECTED_WR_FCS_MASK GENMASK(7, 0)
65 /* Captured FCS Data Register */
66 #define ASPEED_PECI_CAPTURED_FCS 0x14
67 #define ASPEED_PECI_CAPTURED_RD_FCS_MASK GENMASK(23, 16)
68 #define ASPEED_PECI_CAPTURED_WR_FCS_MASK GENMASK(7, 0)
70 /* Interrupt Register */
71 #define ASPEED_PECI_INT_CTRL 0x18
72 #define ASPEED_PECI_TIMING_NEGO_SEL_MASK GENMASK(31, 30)
73 #define ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO 0
74 #define ASPEED_PECI_2ND_BIT_OF_ADDR_NEGO 1
75 #define ASPEED_PECI_MESSAGE_NEGO 2
76 #define ASPEED_PECI_INT_MASK GENMASK(4, 0)
77 #define ASPEED_PECI_INT_BUS_TIMEOUT BIT(4)
78 #define ASPEED_PECI_INT_BUS_CONTENTION BIT(3)
79 #define ASPEED_PECI_INT_WR_FCS_BAD BIT(2)
80 #define ASPEED_PECI_INT_WR_FCS_ABORT BIT(1)
81 #define ASPEED_PECI_INT_CMD_DONE BIT(0)
83 /* Interrupt Status Register */
84 #define ASPEED_PECI_INT_STS 0x1c
85 #define ASPEED_PECI_INT_TIMING_RESULT_MASK GENMASK(29, 16)
86 /* bits[4..0]: Same bit fields in the 'Interrupt Register' */
88 /* Rx/Tx Data Buffer Registers */
89 #define ASPEED_PECI_WR_DATA0 0x20
90 #define ASPEED_PECI_WR_DATA1 0x24
91 #define ASPEED_PECI_WR_DATA2 0x28
92 #define ASPEED_PECI_WR_DATA3 0x2c
93 #define ASPEED_PECI_RD_DATA0 0x30
94 #define ASPEED_PECI_RD_DATA1 0x34
95 #define ASPEED_PECI_RD_DATA2 0x38
96 #define ASPEED_PECI_RD_DATA3 0x3c
97 #define ASPEED_PECI_WR_DATA4 0x40
98 #define ASPEED_PECI_WR_DATA5 0x44
99 #define ASPEED_PECI_WR_DATA6 0x48
100 #define ASPEED_PECI_WR_DATA7 0x4c
101 #define ASPEED_PECI_RD_DATA4 0x50
102 #define ASPEED_PECI_RD_DATA5 0x54
103 #define ASPEED_PECI_RD_DATA6 0x58
104 #define ASPEED_PECI_RD_DATA7 0x5c
105 #define ASPEED_PECI_DATA_BUF_SIZE_MAX 32
107 /* Timing Negotiation */
108 #define ASPEED_PECI_CLK_FREQUENCY_MIN 2000
109 #define ASPEED_PECI_CLK_FREQUENCY_DEFAULT 1000000
110 #define ASPEED_PECI_CLK_FREQUENCY_MAX 2000000
111 #define ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT 8
113 #define ASPEED_PECI_IDLE_CHECK_TIMEOUT_US (50 * USEC_PER_MSEC)
114 #define ASPEED_PECI_IDLE_CHECK_INTERVAL_US (10 * USEC_PER_MSEC)
115 #define ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT 1000
116 #define ASPEED_PECI_CMD_TIMEOUT_MS_MAX 1000
118 #define ASPEED_PECI_CLK_DIV1(msg_timing) (4 * (msg_timing) + 1)
119 #define ASPEED_PECI_CLK_DIV2(clk_div_exp) BIT(clk_div_exp)
120 #define ASPEED_PECI_CLK_DIV(msg_timing, clk_div_exp) \
121 (4 * ASPEED_PECI_CLK_DIV1(msg_timing) * ASPEED_PECI_CLK_DIV2(clk_div_exp))
124 struct peci_controller
*controller
;
127 struct reset_control
*rst
;
129 spinlock_t lock
; /* to sync completion status handling */
130 struct completion xfer_complete
;
137 struct clk_aspeed_peci
{
139 struct aspeed_peci
*aspeed_peci
;
142 static void aspeed_peci_controller_enable(struct aspeed_peci
*priv
)
144 u32 val
= readl(priv
->base
+ ASPEED_PECI_CTRL
);
146 val
|= ASPEED_PECI_CTRL_PECI_CLK_EN
;
147 val
|= ASPEED_PECI_CTRL_PECI_EN
;
149 writel(val
, priv
->base
+ ASPEED_PECI_CTRL
);
152 static void aspeed_peci_init_regs(struct aspeed_peci
*priv
)
156 /* Clear interrupts */
157 writel(ASPEED_PECI_INT_MASK
, priv
->base
+ ASPEED_PECI_INT_STS
);
159 /* Set timing negotiation mode and enable interrupts */
160 val
= FIELD_PREP(ASPEED_PECI_TIMING_NEGO_SEL_MASK
, ASPEED_PECI_1ST_BIT_OF_ADDR_NEGO
);
161 val
|= ASPEED_PECI_INT_MASK
;
162 writel(val
, priv
->base
+ ASPEED_PECI_INT_CTRL
);
164 val
= FIELD_PREP(ASPEED_PECI_CTRL_SAMPLING_MASK
, ASPEED_PECI_RD_SAMPLING_POINT_DEFAULT
);
165 writel(val
, priv
->base
+ ASPEED_PECI_CTRL
);
168 static int aspeed_peci_check_idle(struct aspeed_peci
*priv
)
170 u32 cmd_sts
= readl(priv
->base
+ ASPEED_PECI_CMD
);
174 * Under normal circumstances, we expect to be idle here.
175 * In case there were any errors/timeouts that led to the situation
176 * where the hardware is not in idle state - we need to reset and
177 * reinitialize it to avoid potential controller hang.
179 if (FIELD_GET(ASPEED_PECI_CMD_STS_MASK
, cmd_sts
)) {
180 ret
= reset_control_assert(priv
->rst
);
182 dev_err(priv
->dev
, "cannot assert reset control\n");
186 ret
= reset_control_deassert(priv
->rst
);
188 dev_err(priv
->dev
, "cannot deassert reset control\n");
192 aspeed_peci_init_regs(priv
);
194 ret
= clk_set_rate(priv
->clk
, priv
->clk_frequency
);
196 dev_err(priv
->dev
, "cannot set clock frequency\n");
200 aspeed_peci_controller_enable(priv
);
203 return readl_poll_timeout(priv
->base
+ ASPEED_PECI_CMD
,
205 !(cmd_sts
& ASPEED_PECI_CMD_IDLE_MASK
),
206 ASPEED_PECI_IDLE_CHECK_INTERVAL_US
,
207 ASPEED_PECI_IDLE_CHECK_TIMEOUT_US
);
210 static int aspeed_peci_xfer(struct peci_controller
*controller
,
211 u8 addr
, struct peci_request
*req
)
213 struct aspeed_peci
*priv
= dev_get_drvdata(controller
->dev
.parent
);
214 unsigned long timeout
= msecs_to_jiffies(priv
->cmd_timeout_ms
);
218 if (req
->tx
.len
> ASPEED_PECI_DATA_BUF_SIZE_MAX
||
219 req
->rx
.len
> ASPEED_PECI_DATA_BUF_SIZE_MAX
)
222 /* Check command sts and bus idle state */
223 ret
= aspeed_peci_check_idle(priv
);
225 return ret
; /* -ETIMEDOUT */
227 spin_lock_irq(&priv
->lock
);
228 reinit_completion(&priv
->xfer_complete
);
230 peci_head
= FIELD_PREP(ASPEED_PECI_TARGET_ADDR_MASK
, addr
) |
231 FIELD_PREP(ASPEED_PECI_WR_LEN_MASK
, req
->tx
.len
) |
232 FIELD_PREP(ASPEED_PECI_RD_LEN_MASK
, req
->rx
.len
);
234 writel(peci_head
, priv
->base
+ ASPEED_PECI_RW_LENGTH
);
236 for (i
= 0; i
< req
->tx
.len
; i
+= 4) {
237 u32 reg
= (i
< 16 ? ASPEED_PECI_WR_DATA0
: ASPEED_PECI_WR_DATA4
) + i
% 16;
239 writel(get_unaligned_le32(&req
->tx
.buf
[i
]), priv
->base
+ reg
);
242 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
243 dev_dbg(priv
->dev
, "HEAD : %#08x\n", peci_head
);
244 print_hex_dump_bytes("TX : ", DUMP_PREFIX_NONE
, req
->tx
.buf
, req
->tx
.len
);
248 writel(ASPEED_PECI_CMD_FIRE
, priv
->base
+ ASPEED_PECI_CMD
);
249 spin_unlock_irq(&priv
->lock
);
251 ret
= wait_for_completion_interruptible_timeout(&priv
->xfer_complete
, timeout
);
256 dev_dbg(priv
->dev
, "timeout waiting for a response\n");
260 spin_lock_irq(&priv
->lock
);
262 if (priv
->status
!= ASPEED_PECI_INT_CMD_DONE
) {
263 spin_unlock_irq(&priv
->lock
);
264 dev_dbg(priv
->dev
, "no valid response, status: %#02x\n", priv
->status
);
268 spin_unlock_irq(&priv
->lock
);
271 * We need to use dword reads for register access, make sure that the
272 * buffer size is multiple of 4-bytes.
274 BUILD_BUG_ON(PECI_REQUEST_MAX_BUF_SIZE
% 4);
276 for (i
= 0; i
< req
->rx
.len
; i
+= 4) {
277 u32 reg
= (i
< 16 ? ASPEED_PECI_RD_DATA0
: ASPEED_PECI_RD_DATA4
) + i
% 16;
278 u32 rx_data
= readl(priv
->base
+ reg
);
280 put_unaligned_le32(rx_data
, &req
->rx
.buf
[i
]);
283 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
284 print_hex_dump_bytes("RX : ", DUMP_PREFIX_NONE
, req
->rx
.buf
, req
->rx
.len
);
289 static irqreturn_t
aspeed_peci_irq_handler(int irq
, void *arg
)
291 struct aspeed_peci
*priv
= arg
;
294 spin_lock(&priv
->lock
);
295 status
= readl(priv
->base
+ ASPEED_PECI_INT_STS
);
296 writel(status
, priv
->base
+ ASPEED_PECI_INT_STS
);
297 priv
->status
|= (status
& ASPEED_PECI_INT_MASK
);
300 * All commands should be ended up with a ASPEED_PECI_INT_CMD_DONE bit
301 * set even in an error case.
303 if (status
& ASPEED_PECI_INT_CMD_DONE
)
304 complete(&priv
->xfer_complete
);
306 writel(0, priv
->base
+ ASPEED_PECI_CMD
);
308 spin_unlock(&priv
->lock
);
313 static void clk_aspeed_peci_find_div_values(unsigned long rate
, int *msg_timing
, int *clk_div_exp
)
315 unsigned long best_diff
= ~0ul, diff
;
316 int msg_timing_temp
, clk_div_exp_temp
, i
, j
;
318 for (i
= 1; i
<= 255; i
++)
319 for (j
= 0; j
< 8; j
++) {
320 diff
= abs(rate
- ASPEED_PECI_CLK_DIV1(i
) * ASPEED_PECI_CLK_DIV2(j
));
321 if (diff
< best_diff
) {
323 clk_div_exp_temp
= j
;
328 *msg_timing
= msg_timing_temp
;
329 *clk_div_exp
= clk_div_exp_temp
;
332 static int clk_aspeed_peci_get_div(unsigned long rate
, const unsigned long *prate
)
334 unsigned long this_rate
= *prate
/ (4 * rate
);
335 int msg_timing
, clk_div_exp
;
337 clk_aspeed_peci_find_div_values(this_rate
, &msg_timing
, &clk_div_exp
);
339 return ASPEED_PECI_CLK_DIV(msg_timing
, clk_div_exp
);
342 static int clk_aspeed_peci_set_rate(struct clk_hw
*hw
, unsigned long rate
,
345 struct clk_aspeed_peci
*peci_clk
= container_of(hw
, struct clk_aspeed_peci
, hw
);
346 struct aspeed_peci
*aspeed_peci
= peci_clk
->aspeed_peci
;
347 unsigned long this_rate
= prate
/ (4 * rate
);
348 int clk_div_exp
, msg_timing
;
351 clk_aspeed_peci_find_div_values(this_rate
, &msg_timing
, &clk_div_exp
);
353 val
= readl(aspeed_peci
->base
+ ASPEED_PECI_CTRL
);
354 val
&= ~ASPEED_PECI_CTRL_CLK_DIV_MASK
;
355 val
|= FIELD_PREP(ASPEED_PECI_CTRL_CLK_DIV_MASK
, clk_div_exp
);
356 writel(val
, aspeed_peci
->base
+ ASPEED_PECI_CTRL
);
358 val
= FIELD_PREP(ASPEED_PECI_T_NEGO_MSG_MASK
, msg_timing
);
359 val
|= FIELD_PREP(ASPEED_PECI_T_NEGO_ADDR_MASK
, msg_timing
);
360 writel(val
, aspeed_peci
->base
+ ASPEED_PECI_TIMING_NEGOTIATION
);
365 static long clk_aspeed_peci_round_rate(struct clk_hw
*hw
, unsigned long rate
,
366 unsigned long *prate
)
368 int div
= clk_aspeed_peci_get_div(rate
, prate
);
370 return DIV_ROUND_UP_ULL(*prate
, div
);
373 static unsigned long clk_aspeed_peci_recalc_rate(struct clk_hw
*hw
, unsigned long prate
)
375 struct clk_aspeed_peci
*peci_clk
= container_of(hw
, struct clk_aspeed_peci
, hw
);
376 struct aspeed_peci
*aspeed_peci
= peci_clk
->aspeed_peci
;
377 int div
, msg_timing
, addr_timing
, clk_div_exp
;
380 reg
= readl(aspeed_peci
->base
+ ASPEED_PECI_TIMING_NEGOTIATION
);
381 msg_timing
= FIELD_GET(ASPEED_PECI_T_NEGO_MSG_MASK
, reg
);
382 addr_timing
= FIELD_GET(ASPEED_PECI_T_NEGO_ADDR_MASK
, reg
);
384 if (msg_timing
!= addr_timing
)
387 reg
= readl(aspeed_peci
->base
+ ASPEED_PECI_CTRL
);
388 clk_div_exp
= FIELD_GET(ASPEED_PECI_CTRL_CLK_DIV_MASK
, reg
);
390 div
= ASPEED_PECI_CLK_DIV(msg_timing
, clk_div_exp
);
392 return DIV_ROUND_UP_ULL(prate
, div
);
395 static const struct clk_ops clk_aspeed_peci_ops
= {
396 .set_rate
= clk_aspeed_peci_set_rate
,
397 .round_rate
= clk_aspeed_peci_round_rate
,
398 .recalc_rate
= clk_aspeed_peci_recalc_rate
,
402 * PECI HW contains a clock divider which is a combination of:
403 * div0: 4 (fixed divider)
406 * In other words, out_clk = in_clk / (div0 * div1 * div2)
407 * The resulting frequency is used by PECI Controller to drive the PECI bus to
408 * negotiate optimal transfer rate.
410 static struct clk
*devm_aspeed_peci_register_clk_div(struct device
*dev
, struct clk
*parent
,
411 struct aspeed_peci
*priv
)
413 struct clk_aspeed_peci
*peci_clk
;
414 struct clk_init_data init
;
415 const char *parent_name
;
419 snprintf(name
, sizeof(name
), "%s_div", dev_name(dev
));
421 parent_name
= __clk_get_name(parent
);
423 init
.ops
= &clk_aspeed_peci_ops
;
425 init
.parent_names
= (const char* []) { parent_name
};
426 init
.num_parents
= 1;
429 peci_clk
= devm_kzalloc(dev
, sizeof(struct clk_aspeed_peci
), GFP_KERNEL
);
431 return ERR_PTR(-ENOMEM
);
433 peci_clk
->hw
.init
= &init
;
434 peci_clk
->aspeed_peci
= priv
;
436 ret
= devm_clk_hw_register(dev
, &peci_clk
->hw
);
440 return peci_clk
->hw
.clk
;
443 static void aspeed_peci_property_sanitize(struct device
*dev
, const char *propname
,
444 u32 min
, u32 max
, u32 default_val
, u32
*propval
)
449 ret
= device_property_read_u32(dev
, propname
, &val
);
452 } else if (val
> max
|| val
< min
) {
453 dev_warn(dev
, "invalid %s: %u, falling back to: %u\n",
454 propname
, val
, default_val
);
462 static void aspeed_peci_property_setup(struct aspeed_peci
*priv
)
464 aspeed_peci_property_sanitize(priv
->dev
, "clock-frequency",
465 ASPEED_PECI_CLK_FREQUENCY_MIN
, ASPEED_PECI_CLK_FREQUENCY_MAX
,
466 ASPEED_PECI_CLK_FREQUENCY_DEFAULT
, &priv
->clk_frequency
);
467 aspeed_peci_property_sanitize(priv
->dev
, "cmd-timeout-ms",
468 1, ASPEED_PECI_CMD_TIMEOUT_MS_MAX
,
469 ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT
, &priv
->cmd_timeout_ms
);
472 static const struct peci_controller_ops aspeed_ops
= {
473 .xfer
= aspeed_peci_xfer
,
476 static void aspeed_peci_reset_control_release(void *data
)
478 reset_control_assert(data
);
481 static int devm_aspeed_peci_reset_control_deassert(struct device
*dev
, struct reset_control
*rst
)
485 ret
= reset_control_deassert(rst
);
489 return devm_add_action_or_reset(dev
, aspeed_peci_reset_control_release
, rst
);
492 static void aspeed_peci_clk_release(void *data
)
494 clk_disable_unprepare(data
);
497 static int devm_aspeed_peci_clk_enable(struct device
*dev
, struct clk
*clk
)
501 ret
= clk_prepare_enable(clk
);
505 return devm_add_action_or_reset(dev
, aspeed_peci_clk_release
, clk
);
508 static int aspeed_peci_probe(struct platform_device
*pdev
)
510 struct peci_controller
*controller
;
511 struct aspeed_peci
*priv
;
515 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
519 priv
->dev
= &pdev
->dev
;
520 dev_set_drvdata(priv
->dev
, priv
);
522 priv
->base
= devm_platform_ioremap_resource(pdev
, 0);
523 if (IS_ERR(priv
->base
))
524 return PTR_ERR(priv
->base
);
526 priv
->irq
= platform_get_irq(pdev
, 0);
530 ret
= devm_request_irq(&pdev
->dev
, priv
->irq
, aspeed_peci_irq_handler
,
531 0, "peci-aspeed", priv
);
535 init_completion(&priv
->xfer_complete
);
536 spin_lock_init(&priv
->lock
);
538 priv
->rst
= devm_reset_control_get(&pdev
->dev
, NULL
);
539 if (IS_ERR(priv
->rst
))
540 return dev_err_probe(priv
->dev
, PTR_ERR(priv
->rst
),
541 "failed to get reset control\n");
543 ret
= devm_aspeed_peci_reset_control_deassert(priv
->dev
, priv
->rst
);
545 return dev_err_probe(priv
->dev
, ret
, "cannot deassert reset control\n");
547 aspeed_peci_property_setup(priv
);
549 aspeed_peci_init_regs(priv
);
551 ref_clk
= devm_clk_get(priv
->dev
, NULL
);
553 return dev_err_probe(priv
->dev
, PTR_ERR(ref_clk
), "failed to get ref clock\n");
555 priv
->clk
= devm_aspeed_peci_register_clk_div(priv
->dev
, ref_clk
, priv
);
556 if (IS_ERR(priv
->clk
))
557 return dev_err_probe(priv
->dev
, PTR_ERR(priv
->clk
), "cannot register clock\n");
559 ret
= clk_set_rate(priv
->clk
, priv
->clk_frequency
);
561 return dev_err_probe(priv
->dev
, ret
, "cannot set clock frequency\n");
563 ret
= devm_aspeed_peci_clk_enable(priv
->dev
, priv
->clk
);
565 return dev_err_probe(priv
->dev
, ret
, "failed to enable clock\n");
567 aspeed_peci_controller_enable(priv
);
569 controller
= devm_peci_controller_add(priv
->dev
, &aspeed_ops
);
570 if (IS_ERR(controller
))
571 return dev_err_probe(priv
->dev
, PTR_ERR(controller
),
572 "failed to add aspeed peci controller\n");
574 priv
->controller
= controller
;
579 static const struct of_device_id aspeed_peci_of_table
[] = {
580 { .compatible
= "aspeed,ast2400-peci", },
581 { .compatible
= "aspeed,ast2500-peci", },
582 { .compatible
= "aspeed,ast2600-peci", },
585 MODULE_DEVICE_TABLE(of
, aspeed_peci_of_table
);
587 static struct platform_driver aspeed_peci_driver
= {
588 .probe
= aspeed_peci_probe
,
590 .name
= "peci-aspeed",
591 .of_match_table
= aspeed_peci_of_table
,
594 module_platform_driver(aspeed_peci_driver
);
596 MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
597 MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
598 MODULE_DESCRIPTION("ASPEED PECI driver");
599 MODULE_LICENSE("GPL");
600 MODULE_IMPORT_NS("PECI");