1 // SPDX-License-Identifier: BSD-3-Clause
2 /* Copyright (c) 2016-2018, NXP Semiconductors
3 * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
4 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
6 #include <linux/spi/spi.h>
7 #include <linux/packing.h>
10 #define SJA1105_SIZE_RESET_CMD 4
11 #define SJA1105_SIZE_SPI_MSG_HEADER 4
12 #define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
14 struct sja1105_chunk
{
21 sja1105_spi_message_pack(void *buf
, const struct sja1105_spi_message
*msg
)
23 const int size
= SJA1105_SIZE_SPI_MSG_HEADER
;
27 sja1105_pack(buf
, &msg
->access
, 31, 31, size
);
28 sja1105_pack(buf
, &msg
->read_count
, 30, 25, size
);
29 sja1105_pack(buf
, &msg
->address
, 24, 4, size
);
32 #define sja1105_hdr_xfer(xfers, chunk) \
33 ((xfers) + 2 * (chunk))
34 #define sja1105_chunk_xfer(xfers, chunk) \
35 ((xfers) + 2 * (chunk) + 1)
36 #define sja1105_hdr_buf(hdr_bufs, chunk) \
37 ((hdr_bufs) + (chunk) * SJA1105_SIZE_SPI_MSG_HEADER)
40 * - SPI_WRITE: creates and sends an SPI write message at absolute
41 * address reg_addr, taking @len bytes from *buf
42 * - SPI_READ: creates and sends an SPI read message from absolute
43 * address reg_addr, writing @len bytes into *buf
45 static int sja1105_xfer(const struct sja1105_private
*priv
,
46 sja1105_spi_rw_mode_t rw
, u64 reg_addr
, u8
*buf
,
47 size_t len
, struct ptp_system_timestamp
*ptp_sts
)
49 struct sja1105_chunk chunk
= {
50 .len
= min_t(size_t, len
, SJA1105_SIZE_SPI_MSG_MAXLEN
),
54 struct spi_device
*spi
= priv
->spidev
;
55 struct spi_transfer
*xfers
;
60 num_chunks
= DIV_ROUND_UP(len
, SJA1105_SIZE_SPI_MSG_MAXLEN
);
62 /* One transfer for each message header, one for each message
65 xfers
= kcalloc(2 * num_chunks
, sizeof(struct spi_transfer
),
70 /* Packed buffers for the num_chunks SPI message headers,
71 * stored as a contiguous array
73 hdr_bufs
= kcalloc(num_chunks
, SJA1105_SIZE_SPI_MSG_HEADER
,
80 for (i
= 0; i
< num_chunks
; i
++) {
81 struct spi_transfer
*chunk_xfer
= sja1105_chunk_xfer(xfers
, i
);
82 struct spi_transfer
*hdr_xfer
= sja1105_hdr_xfer(xfers
, i
);
83 u8
*hdr_buf
= sja1105_hdr_buf(hdr_bufs
, i
);
84 struct spi_transfer
*ptp_sts_xfer
;
85 struct sja1105_spi_message msg
;
87 /* Populate the transfer's header buffer */
88 msg
.address
= chunk
.reg_addr
;
91 msg
.read_count
= chunk
.len
/ 4;
95 sja1105_spi_message_pack(hdr_buf
, &msg
);
96 hdr_xfer
->tx_buf
= hdr_buf
;
97 hdr_xfer
->len
= SJA1105_SIZE_SPI_MSG_HEADER
;
99 /* Populate the transfer's data buffer */
101 chunk_xfer
->rx_buf
= chunk
.buf
;
103 chunk_xfer
->tx_buf
= chunk
.buf
;
104 chunk_xfer
->len
= chunk
.len
;
106 /* Request timestamping for the transfer. Instead of letting
107 * callers specify which byte they want to timestamp, we can
108 * make certain assumptions:
109 * - A read operation will request a software timestamp when
110 * what's being read is the PTP time. That is snapshotted by
111 * the switch hardware at the end of the command portion
113 * - A write operation will request a software timestamp on
114 * actions that modify the PTP time. Taking clock stepping as
115 * an example, the switch writes the PTP time at the end of
116 * the data portion (chunk_xfer).
119 ptp_sts_xfer
= hdr_xfer
;
121 ptp_sts_xfer
= chunk_xfer
;
122 ptp_sts_xfer
->ptp_sts_word_pre
= ptp_sts_xfer
->len
- 1;
123 ptp_sts_xfer
->ptp_sts_word_post
= ptp_sts_xfer
->len
- 1;
124 ptp_sts_xfer
->ptp_sts
= ptp_sts
;
126 /* Calculate next chunk */
127 chunk
.buf
+= chunk
.len
;
128 chunk
.reg_addr
+= chunk
.len
/ 4;
129 chunk
.len
= min_t(size_t, (ptrdiff_t)(buf
+ len
- chunk
.buf
),
130 SJA1105_SIZE_SPI_MSG_MAXLEN
);
132 /* De-assert the chip select after each chunk. */
134 chunk_xfer
->cs_change
= 1;
137 rc
= spi_sync_transfer(spi
, xfers
, 2 * num_chunks
);
139 dev_err(&spi
->dev
, "SPI transfer failed: %d\n", rc
);
147 int sja1105_xfer_buf(const struct sja1105_private
*priv
,
148 sja1105_spi_rw_mode_t rw
, u64 reg_addr
,
151 return sja1105_xfer(priv
, rw
, reg_addr
, buf
, len
, NULL
);
155 * - SPI_WRITE: creates and sends an SPI write message at absolute
157 * - SPI_READ: creates and sends an SPI read message from absolute
160 * The u64 *value is unpacked, meaning that it's stored in the native
161 * CPU endianness and directly usable by software running on the core.
163 int sja1105_xfer_u64(const struct sja1105_private
*priv
,
164 sja1105_spi_rw_mode_t rw
, u64 reg_addr
, u64
*value
,
165 struct ptp_system_timestamp
*ptp_sts
)
171 sja1105_pack(packed_buf
, value
, 63, 0, 8);
173 rc
= sja1105_xfer(priv
, rw
, reg_addr
, packed_buf
, 8, ptp_sts
);
176 sja1105_unpack(packed_buf
, value
, 63, 0, 8);
181 /* Same as above, but transfers only a 4 byte word */
182 int sja1105_xfer_u32(const struct sja1105_private
*priv
,
183 sja1105_spi_rw_mode_t rw
, u64 reg_addr
, u32
*value
,
184 struct ptp_system_timestamp
*ptp_sts
)
190 if (rw
== SPI_WRITE
) {
191 /* The packing API only supports u64 as CPU word size,
192 * so we need to convert.
195 sja1105_pack(packed_buf
, &tmp
, 31, 0, 4);
198 rc
= sja1105_xfer(priv
, rw
, reg_addr
, packed_buf
, 4, ptp_sts
);
200 if (rw
== SPI_READ
) {
201 sja1105_unpack(packed_buf
, &tmp
, 31, 0, 4);
208 static int sja1105et_reset_cmd(struct dsa_switch
*ds
)
210 struct sja1105_private
*priv
= ds
->priv
;
211 const struct sja1105_regs
*regs
= priv
->info
->regs
;
212 u8 packed_buf
[SJA1105_SIZE_RESET_CMD
] = {0};
213 const int size
= SJA1105_SIZE_RESET_CMD
;
216 sja1105_pack(packed_buf
, &cold_rst
, 3, 3, size
);
218 return sja1105_xfer_buf(priv
, SPI_WRITE
, regs
->rgu
, packed_buf
,
219 SJA1105_SIZE_RESET_CMD
);
222 static int sja1105pqrs_reset_cmd(struct dsa_switch
*ds
)
224 struct sja1105_private
*priv
= ds
->priv
;
225 const struct sja1105_regs
*regs
= priv
->info
->regs
;
226 u8 packed_buf
[SJA1105_SIZE_RESET_CMD
] = {0};
227 const int size
= SJA1105_SIZE_RESET_CMD
;
230 sja1105_pack(packed_buf
, &cold_rst
, 2, 2, size
);
232 return sja1105_xfer_buf(priv
, SPI_WRITE
, regs
->rgu
, packed_buf
,
233 SJA1105_SIZE_RESET_CMD
);
236 int sja1105_inhibit_tx(const struct sja1105_private
*priv
,
237 unsigned long port_bitmap
, bool tx_inhibited
)
239 const struct sja1105_regs
*regs
= priv
->info
->regs
;
243 rc
= sja1105_xfer_u32(priv
, SPI_READ
, regs
->port_control
,
249 inhibit_cmd
|= port_bitmap
;
251 inhibit_cmd
&= ~port_bitmap
;
253 return sja1105_xfer_u32(priv
, SPI_WRITE
, regs
->port_control
,
257 struct sja1105_status
{
264 /* This is not reading the entire General Status area, which is also
265 * divergent between E/T and P/Q/R/S, but only the relevant bits for
266 * ensuring that the static config upload procedure was successful.
268 static void sja1105_status_unpack(void *buf
, struct sja1105_status
*status
)
270 /* So that addition translates to 4 bytes */
273 /* device_id is missing from the buffer, but we don't
274 * want to diverge from the manual definition of the
275 * register addresses, so we'll back off one step with
276 * the register pointer, and never access p[0].
279 sja1105_unpack(p
+ 0x1, &status
->configs
, 31, 31, 4);
280 sja1105_unpack(p
+ 0x1, &status
->crcchkl
, 30, 30, 4);
281 sja1105_unpack(p
+ 0x1, &status
->ids
, 29, 29, 4);
282 sja1105_unpack(p
+ 0x1, &status
->crcchkg
, 28, 28, 4);
285 static int sja1105_status_get(struct sja1105_private
*priv
,
286 struct sja1105_status
*status
)
288 const struct sja1105_regs
*regs
= priv
->info
->regs
;
292 rc
= sja1105_xfer_buf(priv
, SPI_READ
, regs
->status
, packed_buf
, 4);
296 sja1105_status_unpack(packed_buf
, status
);
301 /* Not const because unpacking priv->static_config into buffers and preparing
302 * for upload requires the recalculation of table CRCs and updating the
303 * structures with these.
306 static_config_buf_prepare_for_upload(struct sja1105_private
*priv
,
307 void *config_buf
, int buf_len
)
309 struct sja1105_static_config
*config
= &priv
->static_config
;
310 struct sja1105_table_header final_header
;
311 sja1105_config_valid_t valid
;
312 char *final_header_ptr
;
315 valid
= sja1105_static_config_check_valid(config
);
316 if (valid
!= SJA1105_CONFIG_OK
) {
317 dev_err(&priv
->spidev
->dev
,
318 sja1105_static_config_error_msg
[valid
]);
322 /* Write Device ID and config tables to config_buf */
323 sja1105_static_config_pack(config_buf
, config
);
324 /* Recalculate CRC of the last header (right now 0xDEADBEEF).
325 * Don't include the CRC field itself.
327 crc_len
= buf_len
- 4;
328 /* Read the whole table header */
329 final_header_ptr
= config_buf
+ buf_len
- SJA1105_SIZE_TABLE_HEADER
;
330 sja1105_table_header_packing(final_header_ptr
, &final_header
, UNPACK
);
332 final_header
.crc
= sja1105_crc32(config_buf
, crc_len
);
334 sja1105_table_header_packing(final_header_ptr
, &final_header
, PACK
);
341 int sja1105_static_config_upload(struct sja1105_private
*priv
)
343 unsigned long port_bitmap
= GENMASK_ULL(SJA1105_NUM_PORTS
- 1, 0);
344 struct sja1105_static_config
*config
= &priv
->static_config
;
345 const struct sja1105_regs
*regs
= priv
->info
->regs
;
346 struct device
*dev
= &priv
->spidev
->dev
;
347 struct sja1105_status status
;
348 int rc
, retries
= RETRIES
;
352 buf_len
= sja1105_static_config_get_length(config
);
353 config_buf
= kcalloc(buf_len
, sizeof(char), GFP_KERNEL
);
357 rc
= static_config_buf_prepare_for_upload(priv
, config_buf
, buf_len
);
359 dev_err(dev
, "Invalid config, cannot upload\n");
363 /* Prevent PHY jabbering during switch reset by inhibiting
364 * Tx on all ports and waiting for current packet to drain.
365 * Otherwise, the PHY will see an unterminated Ethernet packet.
367 rc
= sja1105_inhibit_tx(priv
, port_bitmap
, true);
369 dev_err(dev
, "Failed to inhibit Tx on ports\n");
373 /* Wait for an eventual egress packet to finish transmission
374 * (reach IFG). It is guaranteed that a second one will not
375 * follow, and that switch cold reset is thus safe
377 usleep_range(500, 1000);
379 /* Put the SJA1105 in programming mode */
380 rc
= priv
->info
->reset_cmd(priv
->ds
);
382 dev_err(dev
, "Failed to reset switch, retrying...\n");
385 /* Wait for the switch to come out of reset */
386 usleep_range(1000, 5000);
387 /* Upload the static config to the device */
388 rc
= sja1105_xfer_buf(priv
, SPI_WRITE
, regs
->config
,
389 config_buf
, buf_len
);
391 dev_err(dev
, "Failed to upload config, retrying...\n");
394 /* Check that SJA1105 responded well to the config upload */
395 rc
= sja1105_status_get(priv
, &status
);
399 if (status
.ids
== 1) {
400 dev_err(dev
, "Mismatch between hardware and static config "
401 "device id. Wrote 0x%llx, wants 0x%llx\n",
402 config
->device_id
, priv
->info
->device_id
);
405 if (status
.crcchkl
== 1) {
406 dev_err(dev
, "Switch reported invalid local CRC on "
407 "the uploaded config, retrying...\n");
410 if (status
.crcchkg
== 1) {
411 dev_err(dev
, "Switch reported invalid global CRC on "
412 "the uploaded config, retrying...\n");
415 if (status
.configs
== 0) {
416 dev_err(dev
, "Switch reported that configuration is "
417 "invalid, retrying...\n");
426 dev_err(dev
, "Failed to upload config to device, giving up\n");
428 } else if (retries
!= RETRIES
) {
429 dev_info(dev
, "Succeeded after %d tried\n", RETRIES
- retries
);
437 static struct sja1105_regs sja1105et_regs
= {
441 .port_control
= 0x11,
444 /* UM10944.pdf, Table 86, ACU Register overview */
445 .pad_mii_tx
= {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
446 .rmii_pll1
= 0x10000A,
447 .cgu_idiv
= {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
448 .mac
= {0x200, 0x202, 0x204, 0x206, 0x208},
449 .mac_hl1
= {0x400, 0x410, 0x420, 0x430, 0x440},
450 .mac_hl2
= {0x600, 0x610, 0x620, 0x630, 0x640},
451 /* UM10944.pdf, Table 78, CGU Register overview */
452 .mii_tx_clk
= {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F},
453 .mii_rx_clk
= {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030},
454 .mii_ext_tx_clk
= {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
455 .mii_ext_rx_clk
= {0x100019, 0x100020, 0x100027, 0x10002E, 0x100035},
456 .rgmii_tx_clk
= {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
457 .rmii_ref_clk
= {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
458 .rmii_ext_tx_clk
= {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
459 .ptpegr_ts
= {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
460 .ptpschtm
= 0x12, /* Spans 0x12 to 0x13 */
462 .ptpclkval
= 0x18, /* Spans 0x18 to 0x19 */
467 static struct sja1105_regs sja1105pqrs_regs
= {
471 .port_control
= 0x12,
474 /* UM10944.pdf, Table 86, ACU Register overview */
475 .pad_mii_tx
= {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
476 .pad_mii_id
= {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
477 .rmii_pll1
= 0x10000A,
478 .cgu_idiv
= {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
479 .mac
= {0x200, 0x202, 0x204, 0x206, 0x208},
480 .mac_hl1
= {0x400, 0x410, 0x420, 0x430, 0x440},
481 .mac_hl2
= {0x600, 0x610, 0x620, 0x630, 0x640},
482 /* UM11040.pdf, Table 114 */
483 .mii_tx_clk
= {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
484 .mii_rx_clk
= {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
485 .mii_ext_tx_clk
= {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
486 .mii_ext_rx_clk
= {0x100018, 0x10001E, 0x100024, 0x10002A, 0x100030},
487 .rgmii_tx_clk
= {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E},
488 .rmii_ref_clk
= {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
489 .rmii_ext_tx_clk
= {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
490 .qlevel
= {0x604, 0x614, 0x624, 0x634, 0x644},
491 .ptpegr_ts
= {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
492 .ptpschtm
= 0x13, /* Spans 0x13 to 0x14 */
499 struct sja1105_info sja1105e_info
= {
500 .device_id
= SJA1105E_DEVICE_ID
,
501 .part_no
= SJA1105ET_PART_NO
,
502 .static_ops
= sja1105e_table_ops
,
503 .dyn_ops
= sja1105et_dyn_ops
,
505 .ptpegr_ts_bytes
= 4,
506 .reset_cmd
= sja1105et_reset_cmd
,
507 .fdb_add_cmd
= sja1105et_fdb_add
,
508 .fdb_del_cmd
= sja1105et_fdb_del
,
509 .ptp_cmd_packing
= sja1105et_ptp_cmd_packing
,
510 .regs
= &sja1105et_regs
,
513 struct sja1105_info sja1105t_info
= {
514 .device_id
= SJA1105T_DEVICE_ID
,
515 .part_no
= SJA1105ET_PART_NO
,
516 .static_ops
= sja1105t_table_ops
,
517 .dyn_ops
= sja1105et_dyn_ops
,
519 .ptpegr_ts_bytes
= 4,
520 .reset_cmd
= sja1105et_reset_cmd
,
521 .fdb_add_cmd
= sja1105et_fdb_add
,
522 .fdb_del_cmd
= sja1105et_fdb_del
,
523 .ptp_cmd_packing
= sja1105et_ptp_cmd_packing
,
524 .regs
= &sja1105et_regs
,
527 struct sja1105_info sja1105p_info
= {
528 .device_id
= SJA1105PR_DEVICE_ID
,
529 .part_no
= SJA1105P_PART_NO
,
530 .static_ops
= sja1105p_table_ops
,
531 .dyn_ops
= sja1105pqrs_dyn_ops
,
533 .ptpegr_ts_bytes
= 8,
534 .setup_rgmii_delay
= sja1105pqrs_setup_rgmii_delay
,
535 .reset_cmd
= sja1105pqrs_reset_cmd
,
536 .fdb_add_cmd
= sja1105pqrs_fdb_add
,
537 .fdb_del_cmd
= sja1105pqrs_fdb_del
,
538 .ptp_cmd_packing
= sja1105pqrs_ptp_cmd_packing
,
539 .regs
= &sja1105pqrs_regs
,
542 struct sja1105_info sja1105q_info
= {
543 .device_id
= SJA1105QS_DEVICE_ID
,
544 .part_no
= SJA1105Q_PART_NO
,
545 .static_ops
= sja1105q_table_ops
,
546 .dyn_ops
= sja1105pqrs_dyn_ops
,
548 .ptpegr_ts_bytes
= 8,
549 .setup_rgmii_delay
= sja1105pqrs_setup_rgmii_delay
,
550 .reset_cmd
= sja1105pqrs_reset_cmd
,
551 .fdb_add_cmd
= sja1105pqrs_fdb_add
,
552 .fdb_del_cmd
= sja1105pqrs_fdb_del
,
553 .ptp_cmd_packing
= sja1105pqrs_ptp_cmd_packing
,
554 .regs
= &sja1105pqrs_regs
,
557 struct sja1105_info sja1105r_info
= {
558 .device_id
= SJA1105PR_DEVICE_ID
,
559 .part_no
= SJA1105R_PART_NO
,
560 .static_ops
= sja1105r_table_ops
,
561 .dyn_ops
= sja1105pqrs_dyn_ops
,
563 .ptpegr_ts_bytes
= 8,
564 .setup_rgmii_delay
= sja1105pqrs_setup_rgmii_delay
,
565 .reset_cmd
= sja1105pqrs_reset_cmd
,
566 .fdb_add_cmd
= sja1105pqrs_fdb_add
,
567 .fdb_del_cmd
= sja1105pqrs_fdb_del
,
568 .ptp_cmd_packing
= sja1105pqrs_ptp_cmd_packing
,
569 .regs
= &sja1105pqrs_regs
,
572 struct sja1105_info sja1105s_info
= {
573 .device_id
= SJA1105QS_DEVICE_ID
,
574 .part_no
= SJA1105S_PART_NO
,
575 .static_ops
= sja1105s_table_ops
,
576 .dyn_ops
= sja1105pqrs_dyn_ops
,
577 .regs
= &sja1105pqrs_regs
,
579 .ptpegr_ts_bytes
= 8,
580 .setup_rgmii_delay
= sja1105pqrs_setup_rgmii_delay
,
581 .reset_cmd
= sja1105pqrs_reset_cmd
,
582 .fdb_add_cmd
= sja1105pqrs_fdb_add
,
583 .fdb_del_cmd
= sja1105pqrs_fdb_del
,
584 .ptp_cmd_packing
= sja1105pqrs_ptp_cmd_packing
,