1 // SPDX-License-Identifier: GPL-2.0
3 * MAX10 BMC Platform Management Component Interface (PMCI) based
6 * Copyright (C) 2020-2023 Intel Corporation.
9 #include <linux/bitfield.h>
10 #include <linux/device.h>
11 #include <linux/dfl.h>
12 #include <linux/mfd/core.h>
13 #include <linux/mfd/intel-m10-bmc.h>
14 #include <linux/minmax.h>
15 #include <linux/module.h>
16 #include <linux/regmap.h>
18 struct m10bmc_pmci_device
{
20 struct intel_m10bmc m10bmc
;
21 struct mutex flash_mutex
; /* protects flash_busy and serializes flash read/read */
26 * Intel FGPA indirect register access via hardware controller/bridge.
28 #define INDIRECT_CMD_OFF 0
29 #define INDIRECT_CMD_CLR 0
30 #define INDIRECT_CMD_RD BIT(0)
31 #define INDIRECT_CMD_WR BIT(1)
32 #define INDIRECT_CMD_ACK BIT(2)
34 #define INDIRECT_ADDR_OFF 0x4
35 #define INDIRECT_RD_OFF 0x8
36 #define INDIRECT_WR_OFF 0xc
38 #define INDIRECT_INT_US 1
39 #define INDIRECT_TIMEOUT_US 10000
46 static int indirect_clear_cmd(struct indirect_ctx
*ctx
)
51 writel(INDIRECT_CMD_CLR
, ctx
->base
+ INDIRECT_CMD_OFF
);
53 ret
= readl_poll_timeout(ctx
->base
+ INDIRECT_CMD_OFF
, cmd
,
54 cmd
== INDIRECT_CMD_CLR
,
55 INDIRECT_INT_US
, INDIRECT_TIMEOUT_US
);
57 dev_err(ctx
->dev
, "timed out waiting clear cmd (residual cmd=0x%x)\n", cmd
);
62 static int indirect_reg_read(void *context
, unsigned int reg
, unsigned int *val
)
64 struct indirect_ctx
*ctx
= context
;
65 unsigned int cmd
, ack
, tmpval
;
68 cmd
= readl(ctx
->base
+ INDIRECT_CMD_OFF
);
69 if (cmd
!= INDIRECT_CMD_CLR
)
70 dev_warn(ctx
->dev
, "residual cmd 0x%x on read entry\n", cmd
);
72 writel(reg
, ctx
->base
+ INDIRECT_ADDR_OFF
);
73 writel(INDIRECT_CMD_RD
, ctx
->base
+ INDIRECT_CMD_OFF
);
75 ret
= readl_poll_timeout(ctx
->base
+ INDIRECT_CMD_OFF
, ack
,
76 (ack
& INDIRECT_CMD_ACK
) == INDIRECT_CMD_ACK
,
77 INDIRECT_INT_US
, INDIRECT_TIMEOUT_US
);
79 dev_err(ctx
->dev
, "read timed out on reg 0x%x ack 0x%x\n", reg
, ack
);
81 tmpval
= readl(ctx
->base
+ INDIRECT_RD_OFF
);
83 ret2
= indirect_clear_cmd(ctx
);
94 static int indirect_reg_write(void *context
, unsigned int reg
, unsigned int val
)
96 struct indirect_ctx
*ctx
= context
;
97 unsigned int cmd
, ack
;
100 cmd
= readl(ctx
->base
+ INDIRECT_CMD_OFF
);
101 if (cmd
!= INDIRECT_CMD_CLR
)
102 dev_warn(ctx
->dev
, "residual cmd 0x%x on write entry\n", cmd
);
104 writel(val
, ctx
->base
+ INDIRECT_WR_OFF
);
105 writel(reg
, ctx
->base
+ INDIRECT_ADDR_OFF
);
106 writel(INDIRECT_CMD_WR
, ctx
->base
+ INDIRECT_CMD_OFF
);
108 ret
= readl_poll_timeout(ctx
->base
+ INDIRECT_CMD_OFF
, ack
,
109 (ack
& INDIRECT_CMD_ACK
) == INDIRECT_CMD_ACK
,
110 INDIRECT_INT_US
, INDIRECT_TIMEOUT_US
);
112 dev_err(ctx
->dev
, "write timed out on reg 0x%x ack 0x%x\n", reg
, ack
);
114 ret2
= indirect_clear_cmd(ctx
);
121 static void pmci_write_fifo(void __iomem
*base
, const u32
*buf
, size_t count
)
124 writel(*buf
++, base
);
127 static void pmci_read_fifo(void __iomem
*base
, u32
*buf
, size_t count
)
130 *buf
++ = readl(base
);
133 static u32
pmci_get_write_space(struct m10bmc_pmci_device
*pmci
)
138 ret
= read_poll_timeout(readl
, val
,
139 FIELD_GET(M10BMC_N6000_FLASH_FIFO_SPACE
, val
) ==
140 M10BMC_N6000_FIFO_MAX_WORDS
,
141 M10BMC_FLASH_INT_US
, M10BMC_FLASH_TIMEOUT_US
,
142 false, pmci
->base
+ M10BMC_N6000_FLASH_CTRL
);
143 if (ret
== -ETIMEDOUT
)
146 return FIELD_GET(M10BMC_N6000_FLASH_FIFO_SPACE
, val
) * M10BMC_N6000_FIFO_WORD_SIZE
;
149 static int pmci_flash_bulk_write(struct intel_m10bmc
*m10bmc
, const u8
*buf
, u32 size
)
151 struct m10bmc_pmci_device
*pmci
= container_of(m10bmc
, struct m10bmc_pmci_device
, m10bmc
);
152 u32 blk_size
, offset
= 0, write_count
;
155 blk_size
= min(pmci_get_write_space(pmci
), size
);
157 dev_err(m10bmc
->dev
, "get FIFO available size fail\n");
161 if (size
< M10BMC_N6000_FIFO_WORD_SIZE
)
164 write_count
= blk_size
/ M10BMC_N6000_FIFO_WORD_SIZE
;
165 pmci_write_fifo(pmci
->base
+ M10BMC_N6000_FLASH_FIFO
,
166 (u32
*)(buf
+ offset
), write_count
);
172 /* Handle remainder (less than M10BMC_N6000_FIFO_WORD_SIZE bytes) */
176 memcpy(&tmp
, buf
+ offset
, size
);
177 pmci_write_fifo(pmci
->base
+ M10BMC_N6000_FLASH_FIFO
, &tmp
, 1);
183 static int pmci_flash_bulk_read(struct intel_m10bmc
*m10bmc
, u8
*buf
, u32 addr
, u32 size
)
185 struct m10bmc_pmci_device
*pmci
= container_of(m10bmc
, struct m10bmc_pmci_device
, m10bmc
);
186 u32 blk_size
, offset
= 0, val
, full_read_count
, read_count
;
190 blk_size
= min_t(u32
, size
, M10BMC_N6000_READ_BLOCK_SIZE
);
191 full_read_count
= blk_size
/ M10BMC_N6000_FIFO_WORD_SIZE
;
193 read_count
= full_read_count
;
194 if (full_read_count
* M10BMC_N6000_FIFO_WORD_SIZE
< blk_size
)
197 writel(addr
+ offset
, pmci
->base
+ M10BMC_N6000_FLASH_ADDR
);
198 writel(FIELD_PREP(M10BMC_N6000_FLASH_READ_COUNT
, read_count
) |
199 M10BMC_N6000_FLASH_RD_MODE
,
200 pmci
->base
+ M10BMC_N6000_FLASH_CTRL
);
202 ret
= readl_poll_timeout((pmci
->base
+ M10BMC_N6000_FLASH_CTRL
), val
,
203 !(val
& M10BMC_N6000_FLASH_BUSY
),
204 M10BMC_FLASH_INT_US
, M10BMC_FLASH_TIMEOUT_US
);
206 dev_err(m10bmc
->dev
, "read timed out on reading flash 0x%xn", val
);
210 pmci_read_fifo(pmci
->base
+ M10BMC_N6000_FLASH_FIFO
,
211 (u32
*)(buf
+ offset
), full_read_count
);
216 if (full_read_count
< read_count
)
219 writel(0, pmci
->base
+ M10BMC_N6000_FLASH_CTRL
);
222 /* Handle remainder (less than M10BMC_N6000_FIFO_WORD_SIZE bytes) */
226 pmci_read_fifo(pmci
->base
+ M10BMC_N6000_FLASH_FIFO
, &tmp
, 1);
227 memcpy(buf
+ offset
, &tmp
, size
);
229 writel(0, pmci
->base
+ M10BMC_N6000_FLASH_CTRL
);
235 static int m10bmc_pmci_set_flash_host_mux(struct intel_m10bmc
*m10bmc
, bool request
)
240 ret
= regmap_update_bits(m10bmc
->regmap
, M10BMC_N6000_FLASH_MUX_CTRL
,
241 M10BMC_N6000_FLASH_HOST_REQUEST
,
242 FIELD_PREP(M10BMC_N6000_FLASH_HOST_REQUEST
, request
));
246 return regmap_read_poll_timeout(m10bmc
->regmap
,
247 M10BMC_N6000_FLASH_MUX_CTRL
, ctrl
,
249 (get_flash_mux(ctrl
) == M10BMC_N6000_FLASH_MUX_HOST
) :
250 (get_flash_mux(ctrl
) != M10BMC_N6000_FLASH_MUX_HOST
),
251 M10BMC_FLASH_INT_US
, M10BMC_FLASH_TIMEOUT_US
);
254 static int m10bmc_pmci_flash_read(struct intel_m10bmc
*m10bmc
, u8
*buf
, u32 addr
, u32 size
)
256 struct m10bmc_pmci_device
*pmci
= container_of(m10bmc
, struct m10bmc_pmci_device
, m10bmc
);
259 mutex_lock(&pmci
->flash_mutex
);
260 if (pmci
->flash_busy
) {
265 ret
= m10bmc_pmci_set_flash_host_mux(m10bmc
, true);
269 ret
= pmci_flash_bulk_read(m10bmc
, buf
, addr
, size
);
272 ret2
= m10bmc_pmci_set_flash_host_mux(m10bmc
, false);
275 mutex_unlock(&pmci
->flash_mutex
);
281 static int m10bmc_pmci_flash_write(struct intel_m10bmc
*m10bmc
, const u8
*buf
, u32 offset
, u32 size
)
283 struct m10bmc_pmci_device
*pmci
= container_of(m10bmc
, struct m10bmc_pmci_device
, m10bmc
);
286 mutex_lock(&pmci
->flash_mutex
);
287 WARN_ON_ONCE(!pmci
->flash_busy
);
288 /* On write, firmware manages flash MUX */
289 ret
= pmci_flash_bulk_write(m10bmc
, buf
+ offset
, size
);
290 mutex_unlock(&pmci
->flash_mutex
);
295 static int m10bmc_pmci_flash_lock(struct intel_m10bmc
*m10bmc
)
297 struct m10bmc_pmci_device
*pmci
= container_of(m10bmc
, struct m10bmc_pmci_device
, m10bmc
);
300 mutex_lock(&pmci
->flash_mutex
);
301 if (pmci
->flash_busy
) {
306 pmci
->flash_busy
= true;
309 mutex_unlock(&pmci
->flash_mutex
);
313 static void m10bmc_pmci_flash_unlock(struct intel_m10bmc
*m10bmc
)
315 struct m10bmc_pmci_device
*pmci
= container_of(m10bmc
, struct m10bmc_pmci_device
, m10bmc
);
317 mutex_lock(&pmci
->flash_mutex
);
318 WARN_ON_ONCE(!pmci
->flash_busy
);
319 pmci
->flash_busy
= false;
320 mutex_unlock(&pmci
->flash_mutex
);
323 static const struct intel_m10bmc_flash_bulk_ops m10bmc_pmci_flash_bulk_ops
= {
324 .read
= m10bmc_pmci_flash_read
,
325 .write
= m10bmc_pmci_flash_write
,
326 .lock_write
= m10bmc_pmci_flash_lock
,
327 .unlock_write
= m10bmc_pmci_flash_unlock
,
330 static const struct regmap_range m10bmc_pmci_regmap_range
[] = {
331 regmap_reg_range(M10BMC_N6000_SYS_BASE
, M10BMC_N6000_SYS_END
),
334 static const struct regmap_access_table m10bmc_pmci_access_table
= {
335 .yes_ranges
= m10bmc_pmci_regmap_range
,
336 .n_yes_ranges
= ARRAY_SIZE(m10bmc_pmci_regmap_range
),
339 static const struct regmap_config m10bmc_pmci_regmap_config
= {
343 .wr_table
= &m10bmc_pmci_access_table
,
344 .rd_table
= &m10bmc_pmci_access_table
,
345 .reg_read
= &indirect_reg_read
,
346 .reg_write
= &indirect_reg_write
,
347 .max_register
= M10BMC_N6000_SYS_END
,
350 static struct mfd_cell m10bmc_pmci_n6000_bmc_subdevs
[] = {
351 { .name
= "n6000bmc-hwmon" },
352 { .name
= "n6000bmc-sec-update" },
355 static const struct m10bmc_csr_map m10bmc_n6000_csr_map
= {
356 .base
= M10BMC_N6000_SYS_BASE
,
357 .build_version
= M10BMC_N6000_BUILD_VER
,
358 .fw_version
= NIOS2_N6000_FW_VERSION
,
359 .mac_low
= M10BMC_N6000_MAC_LOW
,
360 .mac_high
= M10BMC_N6000_MAC_HIGH
,
361 .doorbell
= M10BMC_N6000_DOORBELL
,
362 .auth_result
= M10BMC_N6000_AUTH_RESULT
,
363 .bmc_prog_addr
= M10BMC_N6000_BMC_PROG_ADDR
,
364 .bmc_reh_addr
= M10BMC_N6000_BMC_REH_ADDR
,
365 .bmc_magic
= M10BMC_N6000_BMC_PROG_MAGIC
,
366 .sr_prog_addr
= M10BMC_N6000_SR_PROG_ADDR
,
367 .sr_reh_addr
= M10BMC_N6000_SR_REH_ADDR
,
368 .sr_magic
= M10BMC_N6000_SR_PROG_MAGIC
,
369 .pr_prog_addr
= M10BMC_N6000_PR_PROG_ADDR
,
370 .pr_reh_addr
= M10BMC_N6000_PR_REH_ADDR
,
371 .pr_magic
= M10BMC_N6000_PR_PROG_MAGIC
,
372 .rsu_update_counter
= M10BMC_N6000_STAGING_FLASH_COUNT
,
373 .staging_size
= M10BMC_STAGING_SIZE
,
376 static const struct intel_m10bmc_platform_info m10bmc_pmci_n6000
= {
377 .cells
= m10bmc_pmci_n6000_bmc_subdevs
,
378 .n_cells
= ARRAY_SIZE(m10bmc_pmci_n6000_bmc_subdevs
),
379 .csr_map
= &m10bmc_n6000_csr_map
,
382 static int m10bmc_pmci_probe(struct dfl_device
*ddev
)
384 struct device
*dev
= &ddev
->dev
;
385 struct m10bmc_pmci_device
*pmci
;
386 struct indirect_ctx
*ctx
;
389 pmci
= devm_kzalloc(dev
, sizeof(*pmci
), GFP_KERNEL
);
393 pmci
->m10bmc
.flash_bulk_ops
= &m10bmc_pmci_flash_bulk_ops
;
394 pmci
->m10bmc
.dev
= dev
;
396 pmci
->base
= devm_ioremap_resource(dev
, &ddev
->mmio_res
);
397 if (IS_ERR(pmci
->base
))
398 return PTR_ERR(pmci
->base
);
400 ctx
= devm_kzalloc(dev
, sizeof(*ctx
), GFP_KERNEL
);
404 mutex_init(&pmci
->flash_mutex
);
406 ctx
->base
= pmci
->base
+ M10BMC_N6000_INDIRECT_BASE
;
408 indirect_clear_cmd(ctx
);
409 pmci
->m10bmc
.regmap
= devm_regmap_init(dev
, NULL
, ctx
, &m10bmc_pmci_regmap_config
);
411 if (IS_ERR(pmci
->m10bmc
.regmap
)) {
412 ret
= PTR_ERR(pmci
->m10bmc
.regmap
);
416 ret
= m10bmc_dev_init(&pmci
->m10bmc
, &m10bmc_pmci_n6000
);
422 mutex_destroy(&pmci
->flash_mutex
);
426 static void m10bmc_pmci_remove(struct dfl_device
*ddev
)
428 struct intel_m10bmc
*m10bmc
= dev_get_drvdata(&ddev
->dev
);
429 struct m10bmc_pmci_device
*pmci
= container_of(m10bmc
, struct m10bmc_pmci_device
, m10bmc
);
431 mutex_destroy(&pmci
->flash_mutex
);
434 #define FME_FEATURE_ID_M10BMC_PMCI 0x12
436 static const struct dfl_device_id m10bmc_pmci_ids
[] = {
437 { FME_ID
, FME_FEATURE_ID_M10BMC_PMCI
},
440 MODULE_DEVICE_TABLE(dfl
, m10bmc_pmci_ids
);
442 static struct dfl_driver m10bmc_pmci_driver
= {
444 .name
= "intel-m10-bmc",
445 .dev_groups
= m10bmc_dev_groups
,
447 .id_table
= m10bmc_pmci_ids
,
448 .probe
= m10bmc_pmci_probe
,
449 .remove
= m10bmc_pmci_remove
,
452 module_dfl_driver(m10bmc_pmci_driver
);
454 MODULE_DESCRIPTION("MAX10 BMC PMCI-based interface");
455 MODULE_AUTHOR("Intel Corporation");
456 MODULE_LICENSE("GPL");
457 MODULE_IMPORT_NS("INTEL_M10_BMC_CORE");