Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / net / ethernet / oa_tc6.c
blobf9c0dcd965c2e7f4c877ca98f056b168fb099b35
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
5 * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
6 */
8 #include <linux/bitfield.h>
9 #include <linux/iopoll.h>
10 #include <linux/mdio.h>
11 #include <linux/phy.h>
12 #include <linux/oa_tc6.h>
14 /* OPEN Alliance TC6 registers */
15 /* Standard Capabilities Register */
16 #define OA_TC6_REG_STDCAP 0x0002
17 #define STDCAP_DIRECT_PHY_REG_ACCESS BIT(8)
19 /* Reset Control and Status Register */
20 #define OA_TC6_REG_RESET 0x0003
21 #define RESET_SWRESET BIT(0) /* Software Reset */
23 /* Configuration Register #0 */
24 #define OA_TC6_REG_CONFIG0 0x0004
25 #define CONFIG0_SYNC BIT(15)
26 #define CONFIG0_ZARFE_ENABLE BIT(12)
28 /* Status Register #0 */
29 #define OA_TC6_REG_STATUS0 0x0008
30 #define STATUS0_RESETC BIT(6) /* Reset Complete */
31 #define STATUS0_HEADER_ERROR BIT(5)
32 #define STATUS0_LOSS_OF_FRAME_ERROR BIT(4)
33 #define STATUS0_RX_BUFFER_OVERFLOW_ERROR BIT(3)
34 #define STATUS0_TX_PROTOCOL_ERROR BIT(0)
36 /* Buffer Status Register */
37 #define OA_TC6_REG_BUFFER_STATUS 0x000B
38 #define BUFFER_STATUS_TX_CREDITS_AVAILABLE GENMASK(15, 8)
39 #define BUFFER_STATUS_RX_CHUNKS_AVAILABLE GENMASK(7, 0)
41 /* Interrupt Mask Register #0 */
42 #define OA_TC6_REG_INT_MASK0 0x000C
43 #define INT_MASK0_HEADER_ERR_MASK BIT(5)
44 #define INT_MASK0_LOSS_OF_FRAME_ERR_MASK BIT(4)
45 #define INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK BIT(3)
46 #define INT_MASK0_TX_PROTOCOL_ERR_MASK BIT(0)
48 /* PHY Clause 22 registers base address and mask */
49 #define OA_TC6_PHY_STD_REG_ADDR_BASE 0xFF00
50 #define OA_TC6_PHY_STD_REG_ADDR_MASK 0x1F
52 /* Control command header */
53 #define OA_TC6_CTRL_HEADER_DATA_NOT_CTRL BIT(31)
54 #define OA_TC6_CTRL_HEADER_WRITE_NOT_READ BIT(29)
55 #define OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR GENMASK(27, 24)
56 #define OA_TC6_CTRL_HEADER_ADDR GENMASK(23, 8)
57 #define OA_TC6_CTRL_HEADER_LENGTH GENMASK(7, 1)
58 #define OA_TC6_CTRL_HEADER_PARITY BIT(0)
60 /* Data header */
61 #define OA_TC6_DATA_HEADER_DATA_NOT_CTRL BIT(31)
62 #define OA_TC6_DATA_HEADER_DATA_VALID BIT(21)
63 #define OA_TC6_DATA_HEADER_START_VALID BIT(20)
64 #define OA_TC6_DATA_HEADER_START_WORD_OFFSET GENMASK(19, 16)
65 #define OA_TC6_DATA_HEADER_END_VALID BIT(14)
66 #define OA_TC6_DATA_HEADER_END_BYTE_OFFSET GENMASK(13, 8)
67 #define OA_TC6_DATA_HEADER_PARITY BIT(0)
69 /* Data footer */
70 #define OA_TC6_DATA_FOOTER_EXTENDED_STS BIT(31)
71 #define OA_TC6_DATA_FOOTER_RXD_HEADER_BAD BIT(30)
72 #define OA_TC6_DATA_FOOTER_CONFIG_SYNC BIT(29)
73 #define OA_TC6_DATA_FOOTER_RX_CHUNKS GENMASK(28, 24)
74 #define OA_TC6_DATA_FOOTER_DATA_VALID BIT(21)
75 #define OA_TC6_DATA_FOOTER_START_VALID BIT(20)
76 #define OA_TC6_DATA_FOOTER_START_WORD_OFFSET GENMASK(19, 16)
77 #define OA_TC6_DATA_FOOTER_END_VALID BIT(14)
78 #define OA_TC6_DATA_FOOTER_END_BYTE_OFFSET GENMASK(13, 8)
79 #define OA_TC6_DATA_FOOTER_TX_CREDITS GENMASK(5, 1)
81 /* PHY – Clause 45 registers memory map selector (MMS) as per table 6 in the
82 * OPEN Alliance specification.
84 #define OA_TC6_PHY_C45_PCS_MMS2 2 /* MMD 3 */
85 #define OA_TC6_PHY_C45_PMA_PMD_MMS3 3 /* MMD 1 */
86 #define OA_TC6_PHY_C45_VS_PLCA_MMS4 4 /* MMD 31 */
87 #define OA_TC6_PHY_C45_AUTO_NEG_MMS5 5 /* MMD 7 */
88 #define OA_TC6_PHY_C45_POWER_UNIT_MMS6 6 /* MMD 13 */
90 #define OA_TC6_CTRL_HEADER_SIZE 4
91 #define OA_TC6_CTRL_REG_VALUE_SIZE 4
92 #define OA_TC6_CTRL_IGNORED_SIZE 4
93 #define OA_TC6_CTRL_MAX_REGISTERS 128
94 #define OA_TC6_CTRL_SPI_BUF_SIZE (OA_TC6_CTRL_HEADER_SIZE +\
95 (OA_TC6_CTRL_MAX_REGISTERS *\
96 OA_TC6_CTRL_REG_VALUE_SIZE) +\
97 OA_TC6_CTRL_IGNORED_SIZE)
98 #define OA_TC6_CHUNK_PAYLOAD_SIZE 64
99 #define OA_TC6_DATA_HEADER_SIZE 4
100 #define OA_TC6_CHUNK_SIZE (OA_TC6_DATA_HEADER_SIZE +\
101 OA_TC6_CHUNK_PAYLOAD_SIZE)
102 #define OA_TC6_MAX_TX_CHUNKS 48
103 #define OA_TC6_SPI_DATA_BUF_SIZE (OA_TC6_MAX_TX_CHUNKS *\
104 OA_TC6_CHUNK_SIZE)
105 #define STATUS0_RESETC_POLL_DELAY 1000
106 #define STATUS0_RESETC_POLL_TIMEOUT 1000000
108 /* Internal structure for MAC-PHY drivers */
109 struct oa_tc6 {
110 struct device *dev;
111 struct net_device *netdev;
112 struct phy_device *phydev;
113 struct mii_bus *mdiobus;
114 struct spi_device *spi;
115 struct mutex spi_ctrl_lock; /* Protects spi control transfer */
116 void *spi_ctrl_tx_buf;
117 void *spi_ctrl_rx_buf;
118 void *spi_data_tx_buf;
119 void *spi_data_rx_buf;
120 struct sk_buff *ongoing_tx_skb;
121 struct sk_buff *waiting_tx_skb;
122 struct sk_buff *rx_skb;
123 struct task_struct *spi_thread;
124 wait_queue_head_t spi_wq;
125 u16 tx_skb_offset;
126 u16 spi_data_tx_buf_offset;
127 u16 tx_credits;
128 u8 rx_chunks_available;
129 bool rx_buf_overflow;
130 bool int_flag;
133 enum oa_tc6_header_type {
134 OA_TC6_CTRL_HEADER,
135 OA_TC6_DATA_HEADER,
138 enum oa_tc6_register_op {
139 OA_TC6_CTRL_REG_READ = 0,
140 OA_TC6_CTRL_REG_WRITE = 1,
143 enum oa_tc6_data_valid_info {
144 OA_TC6_DATA_INVALID,
145 OA_TC6_DATA_VALID,
148 enum oa_tc6_data_start_valid_info {
149 OA_TC6_DATA_START_INVALID,
150 OA_TC6_DATA_START_VALID,
153 enum oa_tc6_data_end_valid_info {
154 OA_TC6_DATA_END_INVALID,
155 OA_TC6_DATA_END_VALID,
158 static int oa_tc6_spi_transfer(struct oa_tc6 *tc6,
159 enum oa_tc6_header_type header_type, u16 length)
161 struct spi_transfer xfer = { 0 };
162 struct spi_message msg;
164 if (header_type == OA_TC6_DATA_HEADER) {
165 xfer.tx_buf = tc6->spi_data_tx_buf;
166 xfer.rx_buf = tc6->spi_data_rx_buf;
167 } else {
168 xfer.tx_buf = tc6->spi_ctrl_tx_buf;
169 xfer.rx_buf = tc6->spi_ctrl_rx_buf;
171 xfer.len = length;
173 spi_message_init(&msg);
174 spi_message_add_tail(&xfer, &msg);
176 return spi_sync(tc6->spi, &msg);
179 static int oa_tc6_get_parity(u32 p)
181 /* Public domain code snippet, lifted from
182 * http://www-graphics.stanford.edu/~seander/bithacks.html
184 p ^= p >> 1;
185 p ^= p >> 2;
186 p = (p & 0x11111111U) * 0x11111111U;
188 /* Odd parity is used here */
189 return !((p >> 28) & 1);
192 static __be32 oa_tc6_prepare_ctrl_header(u32 addr, u8 length,
193 enum oa_tc6_register_op reg_op)
195 u32 header;
197 header = FIELD_PREP(OA_TC6_CTRL_HEADER_DATA_NOT_CTRL,
198 OA_TC6_CTRL_HEADER) |
199 FIELD_PREP(OA_TC6_CTRL_HEADER_WRITE_NOT_READ, reg_op) |
200 FIELD_PREP(OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR, addr >> 16) |
201 FIELD_PREP(OA_TC6_CTRL_HEADER_ADDR, addr) |
202 FIELD_PREP(OA_TC6_CTRL_HEADER_LENGTH, length - 1);
203 header |= FIELD_PREP(OA_TC6_CTRL_HEADER_PARITY,
204 oa_tc6_get_parity(header));
206 return cpu_to_be32(header);
209 static void oa_tc6_update_ctrl_write_data(struct oa_tc6 *tc6, u32 value[],
210 u8 length)
212 __be32 *tx_buf = tc6->spi_ctrl_tx_buf + OA_TC6_CTRL_HEADER_SIZE;
214 for (int i = 0; i < length; i++)
215 *tx_buf++ = cpu_to_be32(value[i]);
218 static u16 oa_tc6_calculate_ctrl_buf_size(u8 length)
220 /* Control command consists 4 bytes header + 4 bytes register value for
221 * each register + 4 bytes ignored value.
223 return OA_TC6_CTRL_HEADER_SIZE + OA_TC6_CTRL_REG_VALUE_SIZE * length +
224 OA_TC6_CTRL_IGNORED_SIZE;
227 static void oa_tc6_prepare_ctrl_spi_buf(struct oa_tc6 *tc6, u32 address,
228 u32 value[], u8 length,
229 enum oa_tc6_register_op reg_op)
231 __be32 *tx_buf = tc6->spi_ctrl_tx_buf;
233 *tx_buf = oa_tc6_prepare_ctrl_header(address, length, reg_op);
235 if (reg_op == OA_TC6_CTRL_REG_WRITE)
236 oa_tc6_update_ctrl_write_data(tc6, value, length);
239 static int oa_tc6_check_ctrl_write_reply(struct oa_tc6 *tc6, u8 size)
241 u8 *tx_buf = tc6->spi_ctrl_tx_buf;
242 u8 *rx_buf = tc6->spi_ctrl_rx_buf;
244 rx_buf += OA_TC6_CTRL_IGNORED_SIZE;
246 /* The echoed control write must match with the one that was
247 * transmitted.
249 if (memcmp(tx_buf, rx_buf, size - OA_TC6_CTRL_IGNORED_SIZE))
250 return -EPROTO;
252 return 0;
255 static int oa_tc6_check_ctrl_read_reply(struct oa_tc6 *tc6, u8 size)
257 u32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE;
258 u32 *tx_buf = tc6->spi_ctrl_tx_buf;
260 /* The echoed control read header must match with the one that was
261 * transmitted.
263 if (*tx_buf != *rx_buf)
264 return -EPROTO;
266 return 0;
269 static void oa_tc6_copy_ctrl_read_data(struct oa_tc6 *tc6, u32 value[],
270 u8 length)
272 __be32 *rx_buf = tc6->spi_ctrl_rx_buf + OA_TC6_CTRL_IGNORED_SIZE +
273 OA_TC6_CTRL_HEADER_SIZE;
275 for (int i = 0; i < length; i++)
276 value[i] = be32_to_cpu(*rx_buf++);
279 static int oa_tc6_perform_ctrl(struct oa_tc6 *tc6, u32 address, u32 value[],
280 u8 length, enum oa_tc6_register_op reg_op)
282 u16 size;
283 int ret;
285 /* Prepare control command and copy to SPI control buffer */
286 oa_tc6_prepare_ctrl_spi_buf(tc6, address, value, length, reg_op);
288 size = oa_tc6_calculate_ctrl_buf_size(length);
290 /* Perform SPI transfer */
291 ret = oa_tc6_spi_transfer(tc6, OA_TC6_CTRL_HEADER, size);
292 if (ret) {
293 dev_err(&tc6->spi->dev, "SPI transfer failed for control: %d\n",
294 ret);
295 return ret;
298 /* Check echoed/received control write command reply for errors */
299 if (reg_op == OA_TC6_CTRL_REG_WRITE)
300 return oa_tc6_check_ctrl_write_reply(tc6, size);
302 /* Check echoed/received control read command reply for errors */
303 ret = oa_tc6_check_ctrl_read_reply(tc6, size);
304 if (ret)
305 return ret;
307 oa_tc6_copy_ctrl_read_data(tc6, value, length);
309 return 0;
313 * oa_tc6_read_registers - function for reading multiple consecutive registers.
314 * @tc6: oa_tc6 struct.
315 * @address: address of the first register to be read in the MAC-PHY.
316 * @value: values to be read from the starting register address @address.
317 * @length: number of consecutive registers to be read from @address.
319 * Maximum of 128 consecutive registers can be read starting at @address.
321 * Return: 0 on success otherwise failed.
323 int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
324 u8 length)
326 int ret;
328 if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
329 dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
330 return -EINVAL;
333 mutex_lock(&tc6->spi_ctrl_lock);
334 ret = oa_tc6_perform_ctrl(tc6, address, value, length,
335 OA_TC6_CTRL_REG_READ);
336 mutex_unlock(&tc6->spi_ctrl_lock);
338 return ret;
340 EXPORT_SYMBOL_GPL(oa_tc6_read_registers);
343 * oa_tc6_read_register - function for reading a MAC-PHY register.
344 * @tc6: oa_tc6 struct.
345 * @address: register address of the MAC-PHY to be read.
346 * @value: value read from the @address register address of the MAC-PHY.
348 * Return: 0 on success otherwise failed.
350 int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value)
352 return oa_tc6_read_registers(tc6, address, value, 1);
354 EXPORT_SYMBOL_GPL(oa_tc6_read_register);
357 * oa_tc6_write_registers - function for writing multiple consecutive registers.
358 * @tc6: oa_tc6 struct.
359 * @address: address of the first register to be written in the MAC-PHY.
360 * @value: values to be written from the starting register address @address.
361 * @length: number of consecutive registers to be written from @address.
363 * Maximum of 128 consecutive registers can be written starting at @address.
365 * Return: 0 on success otherwise failed.
367 int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
368 u8 length)
370 int ret;
372 if (!length || length > OA_TC6_CTRL_MAX_REGISTERS) {
373 dev_err(&tc6->spi->dev, "Invalid register length parameter\n");
374 return -EINVAL;
377 mutex_lock(&tc6->spi_ctrl_lock);
378 ret = oa_tc6_perform_ctrl(tc6, address, value, length,
379 OA_TC6_CTRL_REG_WRITE);
380 mutex_unlock(&tc6->spi_ctrl_lock);
382 return ret;
384 EXPORT_SYMBOL_GPL(oa_tc6_write_registers);
387 * oa_tc6_write_register - function for writing a MAC-PHY register.
388 * @tc6: oa_tc6 struct.
389 * @address: register address of the MAC-PHY to be written.
390 * @value: value to be written in the @address register address of the MAC-PHY.
392 * Return: 0 on success otherwise failed.
394 int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value)
396 return oa_tc6_write_registers(tc6, address, &value, 1);
398 EXPORT_SYMBOL_GPL(oa_tc6_write_register);
400 static int oa_tc6_check_phy_reg_direct_access_capability(struct oa_tc6 *tc6)
402 u32 regval;
403 int ret;
405 ret = oa_tc6_read_register(tc6, OA_TC6_REG_STDCAP, &regval);
406 if (ret)
407 return ret;
409 if (!(regval & STDCAP_DIRECT_PHY_REG_ACCESS))
410 return -ENODEV;
412 return 0;
415 static void oa_tc6_handle_link_change(struct net_device *netdev)
417 phy_print_status(netdev->phydev);
420 static int oa_tc6_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
422 struct oa_tc6 *tc6 = bus->priv;
423 u32 regval;
424 bool ret;
426 ret = oa_tc6_read_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
427 (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
428 &regval);
429 if (ret)
430 return ret;
432 return regval;
435 static int oa_tc6_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
436 u16 val)
438 struct oa_tc6 *tc6 = bus->priv;
440 return oa_tc6_write_register(tc6, OA_TC6_PHY_STD_REG_ADDR_BASE |
441 (regnum & OA_TC6_PHY_STD_REG_ADDR_MASK),
442 val);
445 static int oa_tc6_get_phy_c45_mms(int devnum)
447 switch (devnum) {
448 case MDIO_MMD_PCS:
449 return OA_TC6_PHY_C45_PCS_MMS2;
450 case MDIO_MMD_PMAPMD:
451 return OA_TC6_PHY_C45_PMA_PMD_MMS3;
452 case MDIO_MMD_VEND2:
453 return OA_TC6_PHY_C45_VS_PLCA_MMS4;
454 case MDIO_MMD_AN:
455 return OA_TC6_PHY_C45_AUTO_NEG_MMS5;
456 case MDIO_MMD_POWER_UNIT:
457 return OA_TC6_PHY_C45_POWER_UNIT_MMS6;
458 default:
459 return -EOPNOTSUPP;
463 static int oa_tc6_mdiobus_read_c45(struct mii_bus *bus, int addr, int devnum,
464 int regnum)
466 struct oa_tc6 *tc6 = bus->priv;
467 u32 regval;
468 int ret;
470 ret = oa_tc6_get_phy_c45_mms(devnum);
471 if (ret < 0)
472 return ret;
474 ret = oa_tc6_read_register(tc6, (ret << 16) | regnum, &regval);
475 if (ret)
476 return ret;
478 return regval;
481 static int oa_tc6_mdiobus_write_c45(struct mii_bus *bus, int addr, int devnum,
482 int regnum, u16 val)
484 struct oa_tc6 *tc6 = bus->priv;
485 int ret;
487 ret = oa_tc6_get_phy_c45_mms(devnum);
488 if (ret < 0)
489 return ret;
491 return oa_tc6_write_register(tc6, (ret << 16) | regnum, val);
494 static int oa_tc6_mdiobus_register(struct oa_tc6 *tc6)
496 int ret;
498 tc6->mdiobus = mdiobus_alloc();
499 if (!tc6->mdiobus) {
500 netdev_err(tc6->netdev, "MDIO bus alloc failed\n");
501 return -ENOMEM;
504 tc6->mdiobus->priv = tc6;
505 tc6->mdiobus->read = oa_tc6_mdiobus_read;
506 tc6->mdiobus->write = oa_tc6_mdiobus_write;
507 /* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and
508 * C45 registers space. If the PHY is discovered via C22 bus protocol it
509 * assumes it uses C22 protocol and always uses C22 registers indirect
510 * access to access C45 registers. This is because, we don't have a
511 * clean separation between C22/C45 register space and C22/C45 MDIO bus
512 * protocols. Resulting, PHY C45 registers direct access can't be used
513 * which can save multiple SPI bus access. To support this feature, PHY
514 * drivers can set .read_mmd/.write_mmd in the PHY driver to call
515 * .read_c45/.write_c45. Ex: drivers/net/phy/microchip_t1s.c
517 tc6->mdiobus->read_c45 = oa_tc6_mdiobus_read_c45;
518 tc6->mdiobus->write_c45 = oa_tc6_mdiobus_write_c45;
519 tc6->mdiobus->name = "oa-tc6-mdiobus";
520 tc6->mdiobus->parent = tc6->dev;
522 snprintf(tc6->mdiobus->id, ARRAY_SIZE(tc6->mdiobus->id), "%s",
523 dev_name(&tc6->spi->dev));
525 ret = mdiobus_register(tc6->mdiobus);
526 if (ret) {
527 netdev_err(tc6->netdev, "Could not register MDIO bus\n");
528 mdiobus_free(tc6->mdiobus);
529 return ret;
532 return 0;
535 static void oa_tc6_mdiobus_unregister(struct oa_tc6 *tc6)
537 mdiobus_unregister(tc6->mdiobus);
538 mdiobus_free(tc6->mdiobus);
541 static int oa_tc6_phy_init(struct oa_tc6 *tc6)
543 int ret;
545 ret = oa_tc6_check_phy_reg_direct_access_capability(tc6);
546 if (ret) {
547 netdev_err(tc6->netdev,
548 "Direct PHY register access is not supported by the MAC-PHY\n");
549 return ret;
552 ret = oa_tc6_mdiobus_register(tc6);
553 if (ret)
554 return ret;
556 tc6->phydev = phy_find_first(tc6->mdiobus);
557 if (!tc6->phydev) {
558 netdev_err(tc6->netdev, "No PHY found\n");
559 oa_tc6_mdiobus_unregister(tc6);
560 return -ENODEV;
563 tc6->phydev->is_internal = true;
564 ret = phy_connect_direct(tc6->netdev, tc6->phydev,
565 &oa_tc6_handle_link_change,
566 PHY_INTERFACE_MODE_INTERNAL);
567 if (ret) {
568 netdev_err(tc6->netdev, "Can't attach PHY to %s\n",
569 tc6->mdiobus->id);
570 oa_tc6_mdiobus_unregister(tc6);
571 return ret;
574 phy_attached_info(tc6->netdev->phydev);
576 return 0;
579 static void oa_tc6_phy_exit(struct oa_tc6 *tc6)
581 phy_disconnect(tc6->phydev);
582 oa_tc6_mdiobus_unregister(tc6);
585 static int oa_tc6_read_status0(struct oa_tc6 *tc6)
587 u32 regval;
588 int ret;
590 ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &regval);
591 if (ret) {
592 dev_err(&tc6->spi->dev, "STATUS0 register read failed: %d\n",
593 ret);
594 return 0;
597 return regval;
600 static int oa_tc6_sw_reset_macphy(struct oa_tc6 *tc6)
602 u32 regval = RESET_SWRESET;
603 int ret;
605 ret = oa_tc6_write_register(tc6, OA_TC6_REG_RESET, regval);
606 if (ret)
607 return ret;
609 /* Poll for soft reset complete for every 1ms until 1s timeout */
610 ret = readx_poll_timeout(oa_tc6_read_status0, tc6, regval,
611 regval & STATUS0_RESETC,
612 STATUS0_RESETC_POLL_DELAY,
613 STATUS0_RESETC_POLL_TIMEOUT);
614 if (ret)
615 return -ENODEV;
617 /* Clear the reset complete status */
618 return oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, regval);
621 static int oa_tc6_unmask_macphy_error_interrupts(struct oa_tc6 *tc6)
623 u32 regval;
624 int ret;
626 ret = oa_tc6_read_register(tc6, OA_TC6_REG_INT_MASK0, &regval);
627 if (ret)
628 return ret;
630 regval &= ~(INT_MASK0_TX_PROTOCOL_ERR_MASK |
631 INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK |
632 INT_MASK0_LOSS_OF_FRAME_ERR_MASK |
633 INT_MASK0_HEADER_ERR_MASK);
635 return oa_tc6_write_register(tc6, OA_TC6_REG_INT_MASK0, regval);
638 static int oa_tc6_enable_data_transfer(struct oa_tc6 *tc6)
640 u32 value;
641 int ret;
643 ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &value);
644 if (ret)
645 return ret;
647 /* Enable configuration synchronization for data transfer */
648 value |= CONFIG0_SYNC;
650 return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, value);
653 static void oa_tc6_cleanup_ongoing_rx_skb(struct oa_tc6 *tc6)
655 if (tc6->rx_skb) {
656 tc6->netdev->stats.rx_dropped++;
657 kfree_skb(tc6->rx_skb);
658 tc6->rx_skb = NULL;
662 static void oa_tc6_cleanup_ongoing_tx_skb(struct oa_tc6 *tc6)
664 if (tc6->ongoing_tx_skb) {
665 tc6->netdev->stats.tx_dropped++;
666 kfree_skb(tc6->ongoing_tx_skb);
667 tc6->ongoing_tx_skb = NULL;
671 static int oa_tc6_process_extended_status(struct oa_tc6 *tc6)
673 u32 value;
674 int ret;
676 ret = oa_tc6_read_register(tc6, OA_TC6_REG_STATUS0, &value);
677 if (ret) {
678 netdev_err(tc6->netdev, "STATUS0 register read failed: %d\n",
679 ret);
680 return ret;
683 /* Clear the error interrupts status */
684 ret = oa_tc6_write_register(tc6, OA_TC6_REG_STATUS0, value);
685 if (ret) {
686 netdev_err(tc6->netdev, "STATUS0 register write failed: %d\n",
687 ret);
688 return ret;
691 if (FIELD_GET(STATUS0_RX_BUFFER_OVERFLOW_ERROR, value)) {
692 tc6->rx_buf_overflow = true;
693 oa_tc6_cleanup_ongoing_rx_skb(tc6);
694 net_err_ratelimited("%s: Receive buffer overflow error\n",
695 tc6->netdev->name);
696 return -EAGAIN;
698 if (FIELD_GET(STATUS0_TX_PROTOCOL_ERROR, value)) {
699 netdev_err(tc6->netdev, "Transmit protocol error\n");
700 return -ENODEV;
702 /* TODO: Currently loss of frame and header errors are treated as
703 * non-recoverable errors. They will be handled in the next version.
705 if (FIELD_GET(STATUS0_LOSS_OF_FRAME_ERROR, value)) {
706 netdev_err(tc6->netdev, "Loss of frame error\n");
707 return -ENODEV;
709 if (FIELD_GET(STATUS0_HEADER_ERROR, value)) {
710 netdev_err(tc6->netdev, "Header error\n");
711 return -ENODEV;
714 return 0;
717 static int oa_tc6_process_rx_chunk_footer(struct oa_tc6 *tc6, u32 footer)
719 /* Process rx chunk footer for the following,
720 * 1. tx credits
721 * 2. errors if any from MAC-PHY
722 * 3. receive chunks available
724 tc6->tx_credits = FIELD_GET(OA_TC6_DATA_FOOTER_TX_CREDITS, footer);
725 tc6->rx_chunks_available = FIELD_GET(OA_TC6_DATA_FOOTER_RX_CHUNKS,
726 footer);
728 if (FIELD_GET(OA_TC6_DATA_FOOTER_EXTENDED_STS, footer)) {
729 int ret = oa_tc6_process_extended_status(tc6);
731 if (ret)
732 return ret;
735 /* TODO: Currently received header bad and configuration unsync errors
736 * are treated as non-recoverable errors. They will be handled in the
737 * next version.
739 if (FIELD_GET(OA_TC6_DATA_FOOTER_RXD_HEADER_BAD, footer)) {
740 netdev_err(tc6->netdev, "Rxd header bad error\n");
741 return -ENODEV;
744 if (!FIELD_GET(OA_TC6_DATA_FOOTER_CONFIG_SYNC, footer)) {
745 netdev_err(tc6->netdev, "Config unsync error\n");
746 return -ENODEV;
749 return 0;
752 static void oa_tc6_submit_rx_skb(struct oa_tc6 *tc6)
754 tc6->rx_skb->protocol = eth_type_trans(tc6->rx_skb, tc6->netdev);
755 tc6->netdev->stats.rx_packets++;
756 tc6->netdev->stats.rx_bytes += tc6->rx_skb->len;
758 netif_rx(tc6->rx_skb);
760 tc6->rx_skb = NULL;
763 static void oa_tc6_update_rx_skb(struct oa_tc6 *tc6, u8 *payload, u8 length)
765 memcpy(skb_put(tc6->rx_skb, length), payload, length);
768 static int oa_tc6_allocate_rx_skb(struct oa_tc6 *tc6)
770 tc6->rx_skb = netdev_alloc_skb_ip_align(tc6->netdev, tc6->netdev->mtu +
771 ETH_HLEN + ETH_FCS_LEN);
772 if (!tc6->rx_skb) {
773 tc6->netdev->stats.rx_dropped++;
774 return -ENOMEM;
777 return 0;
780 static int oa_tc6_prcs_complete_rx_frame(struct oa_tc6 *tc6, u8 *payload,
781 u16 size)
783 int ret;
785 ret = oa_tc6_allocate_rx_skb(tc6);
786 if (ret)
787 return ret;
789 oa_tc6_update_rx_skb(tc6, payload, size);
791 oa_tc6_submit_rx_skb(tc6);
793 return 0;
796 static int oa_tc6_prcs_rx_frame_start(struct oa_tc6 *tc6, u8 *payload, u16 size)
798 int ret;
800 ret = oa_tc6_allocate_rx_skb(tc6);
801 if (ret)
802 return ret;
804 oa_tc6_update_rx_skb(tc6, payload, size);
806 return 0;
809 static void oa_tc6_prcs_rx_frame_end(struct oa_tc6 *tc6, u8 *payload, u16 size)
811 oa_tc6_update_rx_skb(tc6, payload, size);
813 oa_tc6_submit_rx_skb(tc6);
816 static void oa_tc6_prcs_ongoing_rx_frame(struct oa_tc6 *tc6, u8 *payload,
817 u32 footer)
819 oa_tc6_update_rx_skb(tc6, payload, OA_TC6_CHUNK_PAYLOAD_SIZE);
822 static int oa_tc6_prcs_rx_chunk_payload(struct oa_tc6 *tc6, u8 *data,
823 u32 footer)
825 u8 start_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_START_WORD_OFFSET,
826 footer) * sizeof(u32);
827 u8 end_byte_offset = FIELD_GET(OA_TC6_DATA_FOOTER_END_BYTE_OFFSET,
828 footer);
829 bool start_valid = FIELD_GET(OA_TC6_DATA_FOOTER_START_VALID, footer);
830 bool end_valid = FIELD_GET(OA_TC6_DATA_FOOTER_END_VALID, footer);
831 u16 size;
833 /* Restart the new rx frame after receiving rx buffer overflow error */
834 if (start_valid && tc6->rx_buf_overflow)
835 tc6->rx_buf_overflow = false;
837 if (tc6->rx_buf_overflow)
838 return 0;
840 /* Process the chunk with complete rx frame */
841 if (start_valid && end_valid && start_byte_offset < end_byte_offset) {
842 size = end_byte_offset + 1 - start_byte_offset;
843 return oa_tc6_prcs_complete_rx_frame(tc6,
844 &data[start_byte_offset],
845 size);
848 /* Process the chunk with only rx frame start */
849 if (start_valid && !end_valid) {
850 size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
851 return oa_tc6_prcs_rx_frame_start(tc6,
852 &data[start_byte_offset],
853 size);
856 /* Process the chunk with only rx frame end */
857 if (end_valid && !start_valid) {
858 size = end_byte_offset + 1;
859 oa_tc6_prcs_rx_frame_end(tc6, data, size);
860 return 0;
863 /* Process the chunk with previous rx frame end and next rx frame
864 * start.
866 if (start_valid && end_valid && start_byte_offset > end_byte_offset) {
867 /* After rx buffer overflow error received, there might be a
868 * possibility of getting an end valid of a previously
869 * incomplete rx frame along with the new rx frame start valid.
871 if (tc6->rx_skb) {
872 size = end_byte_offset + 1;
873 oa_tc6_prcs_rx_frame_end(tc6, data, size);
875 size = OA_TC6_CHUNK_PAYLOAD_SIZE - start_byte_offset;
876 return oa_tc6_prcs_rx_frame_start(tc6,
877 &data[start_byte_offset],
878 size);
881 /* Process the chunk with ongoing rx frame data */
882 oa_tc6_prcs_ongoing_rx_frame(tc6, data, footer);
884 return 0;
887 static u32 oa_tc6_get_rx_chunk_footer(struct oa_tc6 *tc6, u16 footer_offset)
889 u8 *rx_buf = tc6->spi_data_rx_buf;
890 __be32 footer;
892 footer = *((__be32 *)&rx_buf[footer_offset]);
894 return be32_to_cpu(footer);
897 static int oa_tc6_process_spi_data_rx_buf(struct oa_tc6 *tc6, u16 length)
899 u16 no_of_rx_chunks = length / OA_TC6_CHUNK_SIZE;
900 u32 footer;
901 int ret;
903 /* All the rx chunks in the receive SPI data buffer are examined here */
904 for (int i = 0; i < no_of_rx_chunks; i++) {
905 /* Last 4 bytes in each received chunk consist footer info */
906 footer = oa_tc6_get_rx_chunk_footer(tc6, i * OA_TC6_CHUNK_SIZE +
907 OA_TC6_CHUNK_PAYLOAD_SIZE);
909 ret = oa_tc6_process_rx_chunk_footer(tc6, footer);
910 if (ret)
911 return ret;
913 /* If there is a data valid chunks then process it for the
914 * information needed to determine the validity and the location
915 * of the receive frame data.
917 if (FIELD_GET(OA_TC6_DATA_FOOTER_DATA_VALID, footer)) {
918 u8 *payload = tc6->spi_data_rx_buf + i *
919 OA_TC6_CHUNK_SIZE;
921 ret = oa_tc6_prcs_rx_chunk_payload(tc6, payload,
922 footer);
923 if (ret)
924 return ret;
928 return 0;
931 static __be32 oa_tc6_prepare_data_header(bool data_valid, bool start_valid,
932 bool end_valid, u8 end_byte_offset)
934 u32 header = FIELD_PREP(OA_TC6_DATA_HEADER_DATA_NOT_CTRL,
935 OA_TC6_DATA_HEADER) |
936 FIELD_PREP(OA_TC6_DATA_HEADER_DATA_VALID, data_valid) |
937 FIELD_PREP(OA_TC6_DATA_HEADER_START_VALID, start_valid) |
938 FIELD_PREP(OA_TC6_DATA_HEADER_END_VALID, end_valid) |
939 FIELD_PREP(OA_TC6_DATA_HEADER_END_BYTE_OFFSET,
940 end_byte_offset);
942 header |= FIELD_PREP(OA_TC6_DATA_HEADER_PARITY,
943 oa_tc6_get_parity(header));
945 return cpu_to_be32(header);
948 static void oa_tc6_add_tx_skb_to_spi_buf(struct oa_tc6 *tc6)
950 enum oa_tc6_data_end_valid_info end_valid = OA_TC6_DATA_END_INVALID;
951 __be32 *tx_buf = tc6->spi_data_tx_buf + tc6->spi_data_tx_buf_offset;
952 u16 remaining_len = tc6->ongoing_tx_skb->len - tc6->tx_skb_offset;
953 u8 *tx_skb_data = tc6->ongoing_tx_skb->data + tc6->tx_skb_offset;
954 enum oa_tc6_data_start_valid_info start_valid;
955 u8 end_byte_offset = 0;
956 u16 length_to_copy;
958 /* Initial value is assigned here to avoid more than 80 characters in
959 * the declaration place.
961 start_valid = OA_TC6_DATA_START_INVALID;
963 /* Set start valid if the current tx chunk contains the start of the tx
964 * ethernet frame.
966 if (!tc6->tx_skb_offset)
967 start_valid = OA_TC6_DATA_START_VALID;
969 /* If the remaining tx skb length is more than the chunk payload size of
970 * 64 bytes then copy only 64 bytes and leave the ongoing tx skb for
971 * next tx chunk.
973 length_to_copy = min_t(u16, remaining_len, OA_TC6_CHUNK_PAYLOAD_SIZE);
975 /* Copy the tx skb data to the tx chunk payload buffer */
976 memcpy(tx_buf + 1, tx_skb_data, length_to_copy);
977 tc6->tx_skb_offset += length_to_copy;
979 /* Set end valid if the current tx chunk contains the end of the tx
980 * ethernet frame.
982 if (tc6->ongoing_tx_skb->len == tc6->tx_skb_offset) {
983 end_valid = OA_TC6_DATA_END_VALID;
984 end_byte_offset = length_to_copy - 1;
985 tc6->tx_skb_offset = 0;
986 tc6->netdev->stats.tx_bytes += tc6->ongoing_tx_skb->len;
987 tc6->netdev->stats.tx_packets++;
988 kfree_skb(tc6->ongoing_tx_skb);
989 tc6->ongoing_tx_skb = NULL;
992 *tx_buf = oa_tc6_prepare_data_header(OA_TC6_DATA_VALID, start_valid,
993 end_valid, end_byte_offset);
994 tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
997 static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
999 u16 used_tx_credits;
1001 /* Get tx skbs and convert them into tx chunks based on the tx credits
1002 * available.
1004 for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
1005 used_tx_credits++) {
1006 if (!tc6->ongoing_tx_skb) {
1007 tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
1008 tc6->waiting_tx_skb = NULL;
1010 if (!tc6->ongoing_tx_skb)
1011 break;
1012 oa_tc6_add_tx_skb_to_spi_buf(tc6);
1015 return used_tx_credits * OA_TC6_CHUNK_SIZE;
1018 static void oa_tc6_add_empty_chunks_to_spi_buf(struct oa_tc6 *tc6,
1019 u16 needed_empty_chunks)
1021 __be32 header;
1023 header = oa_tc6_prepare_data_header(OA_TC6_DATA_INVALID,
1024 OA_TC6_DATA_START_INVALID,
1025 OA_TC6_DATA_END_INVALID, 0);
1027 while (needed_empty_chunks--) {
1028 __be32 *tx_buf = tc6->spi_data_tx_buf +
1029 tc6->spi_data_tx_buf_offset;
1031 *tx_buf = header;
1032 tc6->spi_data_tx_buf_offset += OA_TC6_CHUNK_SIZE;
1036 static u16 oa_tc6_prepare_spi_tx_buf_for_rx_chunks(struct oa_tc6 *tc6, u16 len)
1038 u16 tx_chunks = len / OA_TC6_CHUNK_SIZE;
1039 u16 needed_empty_chunks;
1041 /* If there are more chunks to receive than to transmit, we need to add
1042 * enough empty tx chunks to allow the reception of the excess rx
1043 * chunks.
1045 if (tx_chunks >= tc6->rx_chunks_available)
1046 return len;
1048 needed_empty_chunks = tc6->rx_chunks_available - tx_chunks;
1050 oa_tc6_add_empty_chunks_to_spi_buf(tc6, needed_empty_chunks);
1052 return needed_empty_chunks * OA_TC6_CHUNK_SIZE + len;
1055 static int oa_tc6_try_spi_transfer(struct oa_tc6 *tc6)
1057 int ret;
1059 while (true) {
1060 u16 spi_len = 0;
1062 tc6->spi_data_tx_buf_offset = 0;
1064 if (tc6->ongoing_tx_skb || tc6->waiting_tx_skb)
1065 spi_len = oa_tc6_prepare_spi_tx_buf_for_tx_skbs(tc6);
1067 spi_len = oa_tc6_prepare_spi_tx_buf_for_rx_chunks(tc6, spi_len);
1069 if (tc6->int_flag) {
1070 tc6->int_flag = false;
1071 if (spi_len == 0) {
1072 oa_tc6_add_empty_chunks_to_spi_buf(tc6, 1);
1073 spi_len = OA_TC6_CHUNK_SIZE;
1077 if (spi_len == 0)
1078 break;
1080 ret = oa_tc6_spi_transfer(tc6, OA_TC6_DATA_HEADER, spi_len);
1081 if (ret) {
1082 netdev_err(tc6->netdev, "SPI data transfer failed: %d\n",
1083 ret);
1084 return ret;
1087 ret = oa_tc6_process_spi_data_rx_buf(tc6, spi_len);
1088 if (ret) {
1089 if (ret == -EAGAIN)
1090 continue;
1092 oa_tc6_cleanup_ongoing_tx_skb(tc6);
1093 oa_tc6_cleanup_ongoing_rx_skb(tc6);
1094 netdev_err(tc6->netdev, "Device error: %d\n", ret);
1095 return ret;
1098 if (!tc6->waiting_tx_skb && netif_queue_stopped(tc6->netdev))
1099 netif_wake_queue(tc6->netdev);
1102 return 0;
1105 static int oa_tc6_spi_thread_handler(void *data)
1107 struct oa_tc6 *tc6 = data;
1108 int ret;
1110 while (likely(!kthread_should_stop())) {
1111 /* This kthread will be waken up if there is a tx skb or mac-phy
1112 * interrupt to perform spi transfer with tx chunks.
1114 wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb ||
1115 tc6->int_flag ||
1116 kthread_should_stop());
1118 if (kthread_should_stop())
1119 break;
1121 ret = oa_tc6_try_spi_transfer(tc6);
1122 if (ret)
1123 return ret;
1126 return 0;
1129 static int oa_tc6_update_buffer_status_from_register(struct oa_tc6 *tc6)
1131 u32 value;
1132 int ret;
1134 /* Initially tx credits and rx chunks available to be updated from the
1135 * register as there is no data transfer performed yet. Later they will
1136 * be updated from the rx footer.
1138 ret = oa_tc6_read_register(tc6, OA_TC6_REG_BUFFER_STATUS, &value);
1139 if (ret)
1140 return ret;
1142 tc6->tx_credits = FIELD_GET(BUFFER_STATUS_TX_CREDITS_AVAILABLE, value);
1143 tc6->rx_chunks_available = FIELD_GET(BUFFER_STATUS_RX_CHUNKS_AVAILABLE,
1144 value);
1146 return 0;
1149 static irqreturn_t oa_tc6_macphy_isr(int irq, void *data)
1151 struct oa_tc6 *tc6 = data;
1153 /* MAC-PHY interrupt can occur for the following reasons.
1154 * - availability of tx credits if it was 0 before and not reported in
1155 * the previous rx footer.
1156 * - availability of rx chunks if it was 0 before and not reported in
1157 * the previous rx footer.
1158 * - extended status event not reported in the previous rx footer.
1160 tc6->int_flag = true;
1161 /* Wake spi kthread to perform spi transfer */
1162 wake_up_interruptible(&tc6->spi_wq);
1164 return IRQ_HANDLED;
1168 * oa_tc6_zero_align_receive_frame_enable - function to enable zero align
1169 * receive frame feature.
1170 * @tc6: oa_tc6 struct.
1172 * Return: 0 on success otherwise failed.
1174 int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6)
1176 u32 regval;
1177 int ret;
1179 ret = oa_tc6_read_register(tc6, OA_TC6_REG_CONFIG0, &regval);
1180 if (ret)
1181 return ret;
1183 /* Set Zero-Align Receive Frame Enable */
1184 regval |= CONFIG0_ZARFE_ENABLE;
1186 return oa_tc6_write_register(tc6, OA_TC6_REG_CONFIG0, regval);
1188 EXPORT_SYMBOL_GPL(oa_tc6_zero_align_receive_frame_enable);
1191 * oa_tc6_start_xmit - function for sending the tx skb which consists ethernet
1192 * frame.
1193 * @tc6: oa_tc6 struct.
1194 * @skb: socket buffer in which the ethernet frame is stored.
1196 * Return: NETDEV_TX_OK if the transmit ethernet frame skb added in the tx_skb_q
1197 * otherwise returns NETDEV_TX_BUSY.
1199 netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
1201 if (tc6->waiting_tx_skb) {
1202 netif_stop_queue(tc6->netdev);
1203 return NETDEV_TX_BUSY;
1206 if (skb_linearize(skb)) {
1207 dev_kfree_skb_any(skb);
1208 tc6->netdev->stats.tx_dropped++;
1209 return NETDEV_TX_OK;
1212 tc6->waiting_tx_skb = skb;
1214 /* Wake spi kthread to perform spi transfer */
1215 wake_up_interruptible(&tc6->spi_wq);
1217 return NETDEV_TX_OK;
1219 EXPORT_SYMBOL_GPL(oa_tc6_start_xmit);
1222 * oa_tc6_init - allocates and initializes oa_tc6 structure.
1223 * @spi: device with which data will be exchanged.
1224 * @netdev: network device interface structure.
1226 * Return: pointer reference to the oa_tc6 structure if the MAC-PHY
1227 * initialization is successful otherwise NULL.
1229 struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
1231 struct oa_tc6 *tc6;
1232 int ret;
1234 tc6 = devm_kzalloc(&spi->dev, sizeof(*tc6), GFP_KERNEL);
1235 if (!tc6)
1236 return NULL;
1238 tc6->spi = spi;
1239 tc6->netdev = netdev;
1240 SET_NETDEV_DEV(netdev, &spi->dev);
1241 mutex_init(&tc6->spi_ctrl_lock);
1243 /* Set the SPI controller to pump at realtime priority */
1244 tc6->spi->rt = true;
1245 spi_setup(tc6->spi);
1247 tc6->spi_ctrl_tx_buf = devm_kzalloc(&tc6->spi->dev,
1248 OA_TC6_CTRL_SPI_BUF_SIZE,
1249 GFP_KERNEL);
1250 if (!tc6->spi_ctrl_tx_buf)
1251 return NULL;
1253 tc6->spi_ctrl_rx_buf = devm_kzalloc(&tc6->spi->dev,
1254 OA_TC6_CTRL_SPI_BUF_SIZE,
1255 GFP_KERNEL);
1256 if (!tc6->spi_ctrl_rx_buf)
1257 return NULL;
1259 tc6->spi_data_tx_buf = devm_kzalloc(&tc6->spi->dev,
1260 OA_TC6_SPI_DATA_BUF_SIZE,
1261 GFP_KERNEL);
1262 if (!tc6->spi_data_tx_buf)
1263 return NULL;
1265 tc6->spi_data_rx_buf = devm_kzalloc(&tc6->spi->dev,
1266 OA_TC6_SPI_DATA_BUF_SIZE,
1267 GFP_KERNEL);
1268 if (!tc6->spi_data_rx_buf)
1269 return NULL;
1271 ret = oa_tc6_sw_reset_macphy(tc6);
1272 if (ret) {
1273 dev_err(&tc6->spi->dev,
1274 "MAC-PHY software reset failed: %d\n", ret);
1275 return NULL;
1278 ret = oa_tc6_unmask_macphy_error_interrupts(tc6);
1279 if (ret) {
1280 dev_err(&tc6->spi->dev,
1281 "MAC-PHY error interrupts unmask failed: %d\n", ret);
1282 return NULL;
1285 ret = oa_tc6_phy_init(tc6);
1286 if (ret) {
1287 dev_err(&tc6->spi->dev,
1288 "MAC internal PHY initialization failed: %d\n", ret);
1289 return NULL;
1292 ret = oa_tc6_enable_data_transfer(tc6);
1293 if (ret) {
1294 dev_err(&tc6->spi->dev, "Failed to enable data transfer: %d\n",
1295 ret);
1296 goto phy_exit;
1299 ret = oa_tc6_update_buffer_status_from_register(tc6);
1300 if (ret) {
1301 dev_err(&tc6->spi->dev,
1302 "Failed to update buffer status: %d\n", ret);
1303 goto phy_exit;
1306 init_waitqueue_head(&tc6->spi_wq);
1308 tc6->spi_thread = kthread_run(oa_tc6_spi_thread_handler, tc6,
1309 "oa-tc6-spi-thread");
1310 if (IS_ERR(tc6->spi_thread)) {
1311 dev_err(&tc6->spi->dev, "Failed to create SPI thread\n");
1312 goto phy_exit;
1315 sched_set_fifo(tc6->spi_thread);
1317 ret = devm_request_irq(&tc6->spi->dev, tc6->spi->irq, oa_tc6_macphy_isr,
1318 IRQF_TRIGGER_FALLING, dev_name(&tc6->spi->dev),
1319 tc6);
1320 if (ret) {
1321 dev_err(&tc6->spi->dev, "Failed to request macphy isr %d\n",
1322 ret);
1323 goto kthread_stop;
1326 /* oa_tc6_sw_reset_macphy() function resets and clears the MAC-PHY reset
1327 * complete status. IRQ is also asserted on reset completion and it is
1328 * remain asserted until MAC-PHY receives a data chunk. So performing an
1329 * empty data chunk transmission will deassert the IRQ. Refer section
1330 * 7.7 and 9.2.8.8 in the OPEN Alliance specification for more details.
1332 tc6->int_flag = true;
1333 wake_up_interruptible(&tc6->spi_wq);
1335 return tc6;
1337 kthread_stop:
1338 kthread_stop(tc6->spi_thread);
1339 phy_exit:
1340 oa_tc6_phy_exit(tc6);
1341 return NULL;
1343 EXPORT_SYMBOL_GPL(oa_tc6_init);
1346 * oa_tc6_exit - exit function.
1347 * @tc6: oa_tc6 struct.
1349 void oa_tc6_exit(struct oa_tc6 *tc6)
1351 oa_tc6_phy_exit(tc6);
1352 kthread_stop(tc6->spi_thread);
1353 dev_kfree_skb_any(tc6->ongoing_tx_skb);
1354 dev_kfree_skb_any(tc6->waiting_tx_skb);
1355 dev_kfree_skb_any(tc6->rx_skb);
1357 EXPORT_SYMBOL_GPL(oa_tc6_exit);
1359 MODULE_DESCRIPTION("OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface Lib");
1360 MODULE_AUTHOR("Parthiban Veerasooran <parthiban.veerasooran@microchip.com>");
1361 MODULE_LICENSE("GPL");