1 // SPDX-License-Identifier: GPL-2.0+
3 * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
5 * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
8 #include <linux/bitfield.h>
9 #include <linux/iopoll.h>
10 #include <linux/mdio.h>
11 #include <linux/phy.h>
12 #include <linux/oa_tc6.h>
14 /* OPEN Alliance TC6 registers */
15 /* Standard Capabilities Register */
16 #define OA_TC6_REG_STDCAP 0x0002
17 #define STDCAP_DIRECT_PHY_REG_ACCESS BIT(8)
19 /* Reset Control and Status Register */
20 #define OA_TC6_REG_RESET 0x0003
21 #define RESET_SWRESET BIT(0) /* Software Reset */
23 /* Configuration Register #0 */
24 #define OA_TC6_REG_CONFIG0 0x0004
25 #define CONFIG0_SYNC BIT(15)
26 #define CONFIG0_ZARFE_ENABLE BIT(12)
28 /* Status Register #0 */
29 #define OA_TC6_REG_STATUS0 0x0008
30 #define STATUS0_RESETC BIT(6) /* Reset Complete */
31 #define STATUS0_HEADER_ERROR BIT(5)
32 #define STATUS0_LOSS_OF_FRAME_ERROR BIT(4)
33 #define STATUS0_RX_BUFFER_OVERFLOW_ERROR BIT(3)
34 #define STATUS0_TX_PROTOCOL_ERROR BIT(0)
36 /* Buffer Status Register */
37 #define OA_TC6_REG_BUFFER_STATUS 0x000B
38 #define BUFFER_STATUS_TX_CREDITS_AVAILABLE GENMASK(15, 8)
39 #define BUFFER_STATUS_RX_CHUNKS_AVAILABLE GENMASK(7, 0)
41 /* Interrupt Mask Register #0 */
42 #define OA_TC6_REG_INT_MASK0 0x000C
43 #define INT_MASK0_HEADER_ERR_MASK BIT(5)
44 #define INT_MASK0_LOSS_OF_FRAME_ERR_MASK BIT(4)
45 #define INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK BIT(3)
46 #define INT_MASK0_TX_PROTOCOL_ERR_MASK BIT(0)
48 /* PHY Clause 22 registers base address and mask */
49 #define OA_TC6_PHY_STD_REG_ADDR_BASE 0xFF00
50 #define OA_TC6_PHY_STD_REG_ADDR_MASK 0x1F
52 /* Control command header */
53 #define OA_TC6_CTRL_HEADER_DATA_NOT_CTRL BIT(31)
54 #define OA_TC6_CTRL_HEADER_WRITE_NOT_READ BIT(29)
55 #define OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR GENMASK(27, 24)
56 #define OA_TC6_CTRL_HEADER_ADDR GENMASK(23, 8)
57 #define OA_TC6_CTRL_HEADER_LENGTH GENMASK(7, 1)
58 #define OA_TC6_CTRL_HEADER_PARITY BIT(0)
61 #define OA_TC6_DATA_HEADER_DATA_NOT_CTRL BIT(31)
62 #define OA_TC6_DATA_HEADER_DATA_VALID BIT(21)
63 #define OA_TC6_DATA_HEADER_START_VALID BIT(20)
64 #define OA_TC6_DATA_HEADER_START_WORD_OFFSET GENMASK(19, 16)
65 #define OA_TC6_DATA_HEADER_END_VALID BIT(14)
66 #define OA_TC6_DATA_HEADER_END_BYTE_OFFSET GENMASK(13, 8)
67 #define OA_TC6_DATA_HEADER_PARITY BIT(0)
70 #define OA_TC6_DATA_FOOTER_EXTENDED_STS BIT(31)
71 #define OA_TC6_DATA_FOOTER_RXD_HEADER_BAD BIT(30)
72 #define OA_TC6_DATA_FOOTER_CONFIG_SYNC BIT(29)
73 #define OA_TC6_DATA_FOOTER_RX_CHUNKS GENMASK(28, 24)
74 #define OA_TC6_DATA_FOOTER_DATA_VALID BIT(21)
75 #define OA_TC6_DATA_FOOTER_START_VALID BIT(20)
76 #define OA_TC6_DATA_FOOTER_START_WORD_OFFSET GENMASK(19, 16)
77 #define OA_TC6_DATA_FOOTER_END_VALID BIT(14)
78 #define OA_TC6_DATA_FOOTER_END_BYTE_OFFSET GENMASK(13, 8)
79 #define OA_TC6_DATA_FOOTER_TX_CREDITS GENMASK(5, 1)
81 /* PHY – Clause 45 registers memory map selector (MMS) as per table 6 in the
82 * OPEN Alliance specification.
84 #define OA_TC6_PHY_C45_PCS_MMS2 2 /* MMD 3 */
85 #define OA_TC6_PHY_C45_PMA_PMD_MMS3 3 /* MMD 1 */
86 #define OA_TC6_PHY_C45_VS_PLCA_MMS4 4 /* MMD 31 */
87 #define OA_TC6_PHY_C45_AUTO_NEG_MMS5 5 /* MMD 7 */
88 #define OA_TC6_PHY_C45_POWER_UNIT_MMS6 6 /* MMD 13 */
90 #define OA_TC6_CTRL_HEADER_SIZE 4
91 #define OA_TC6_CTRL_REG_VALUE_SIZE 4
92 #define OA_TC6_CTRL_IGNORED_SIZE 4
93 #define OA_TC6_CTRL_MAX_REGISTERS 128
94 #define OA_TC6_CTRL_SPI_BUF_SIZE (OA_TC6_CTRL_HEADER_SIZE +\
95 (OA_TC6_CTRL_MAX_REGISTERS *\
96 OA_TC6_CTRL_REG_VALUE_SIZE) +\
97 OA_TC6_CTRL_IGNORED_SIZE)
98 #define OA_TC6_CHUNK_PAYLOAD_SIZE 64
99 #define OA_TC6_DATA_HEADER_SIZE 4
100 #define OA_TC6_CHUNK_SIZE (OA_TC6_DATA_HEADER_SIZE +\
101 OA_TC6_CHUNK_PAYLOAD_SIZE)
102 #define OA_TC6_MAX_TX_CHUNKS 48
103 #define OA_TC6_SPI_DATA_BUF_SIZE (OA_TC6_MAX_TX_CHUNKS *\
105 #define STATUS0_RESETC_POLL_DELAY 1000
106 #define STATUS0_RESETC_POLL_TIMEOUT 1000000
108 /* Internal structure for MAC-PHY drivers */
111 struct net_device
*netdev
;
112 struct phy_device
*phydev
;
113 struct mii_bus
*mdiobus
;
114 struct spi_device
*spi
;
115 struct mutex spi_ctrl_lock
; /* Protects spi control transfer */
116 spinlock_t tx_skb_lock
; /* Protects tx skb handling */
117 void *spi_ctrl_tx_buf
;
118 void *spi_ctrl_rx_buf
;
119 void *spi_data_tx_buf
;
120 void *spi_data_rx_buf
;
121 struct sk_buff
*ongoing_tx_skb
;
122 struct sk_buff
*waiting_tx_skb
;
123 struct sk_buff
*rx_skb
;
124 struct task_struct
*spi_thread
;
125 wait_queue_head_t spi_wq
;
127 u16 spi_data_tx_buf_offset
;
129 u8 rx_chunks_available
;
130 bool rx_buf_overflow
;
134 enum oa_tc6_header_type
{
139 enum oa_tc6_register_op
{
140 OA_TC6_CTRL_REG_READ
= 0,
141 OA_TC6_CTRL_REG_WRITE
= 1,
144 enum oa_tc6_data_valid_info
{
149 enum oa_tc6_data_start_valid_info
{
150 OA_TC6_DATA_START_INVALID
,
151 OA_TC6_DATA_START_VALID
,
154 enum oa_tc6_data_end_valid_info
{
155 OA_TC6_DATA_END_INVALID
,
156 OA_TC6_DATA_END_VALID
,
159 static int oa_tc6_spi_transfer(struct oa_tc6
*tc6
,
160 enum oa_tc6_header_type header_type
, u16 length
)
162 struct spi_transfer xfer
= { 0 };
163 struct spi_message msg
;
165 if (header_type
== OA_TC6_DATA_HEADER
) {
166 xfer
.tx_buf
= tc6
->spi_data_tx_buf
;
167 xfer
.rx_buf
= tc6
->spi_data_rx_buf
;
169 xfer
.tx_buf
= tc6
->spi_ctrl_tx_buf
;
170 xfer
.rx_buf
= tc6
->spi_ctrl_rx_buf
;
174 spi_message_init(&msg
);
175 spi_message_add_tail(&xfer
, &msg
);
177 return spi_sync(tc6
->spi
, &msg
);
180 static int oa_tc6_get_parity(u32 p
)
182 /* Public domain code snippet, lifted from
183 * http://www-graphics.stanford.edu/~seander/bithacks.html
187 p
= (p
& 0x11111111U
) * 0x11111111U
;
189 /* Odd parity is used here */
190 return !((p
>> 28) & 1);
193 static __be32
oa_tc6_prepare_ctrl_header(u32 addr
, u8 length
,
194 enum oa_tc6_register_op reg_op
)
198 header
= FIELD_PREP(OA_TC6_CTRL_HEADER_DATA_NOT_CTRL
,
199 OA_TC6_CTRL_HEADER
) |
200 FIELD_PREP(OA_TC6_CTRL_HEADER_WRITE_NOT_READ
, reg_op
) |
201 FIELD_PREP(OA_TC6_CTRL_HEADER_MEM_MAP_SELECTOR
, addr
>> 16) |
202 FIELD_PREP(OA_TC6_CTRL_HEADER_ADDR
, addr
) |
203 FIELD_PREP(OA_TC6_CTRL_HEADER_LENGTH
, length
- 1);
204 header
|= FIELD_PREP(OA_TC6_CTRL_HEADER_PARITY
,
205 oa_tc6_get_parity(header
));
207 return cpu_to_be32(header
);
210 static void oa_tc6_update_ctrl_write_data(struct oa_tc6
*tc6
, u32 value
[],
213 __be32
*tx_buf
= tc6
->spi_ctrl_tx_buf
+ OA_TC6_CTRL_HEADER_SIZE
;
215 for (int i
= 0; i
< length
; i
++)
216 *tx_buf
++ = cpu_to_be32(value
[i
]);
219 static u16
oa_tc6_calculate_ctrl_buf_size(u8 length
)
221 /* Control command consists 4 bytes header + 4 bytes register value for
222 * each register + 4 bytes ignored value.
224 return OA_TC6_CTRL_HEADER_SIZE
+ OA_TC6_CTRL_REG_VALUE_SIZE
* length
+
225 OA_TC6_CTRL_IGNORED_SIZE
;
228 static void oa_tc6_prepare_ctrl_spi_buf(struct oa_tc6
*tc6
, u32 address
,
229 u32 value
[], u8 length
,
230 enum oa_tc6_register_op reg_op
)
232 __be32
*tx_buf
= tc6
->spi_ctrl_tx_buf
;
234 *tx_buf
= oa_tc6_prepare_ctrl_header(address
, length
, reg_op
);
236 if (reg_op
== OA_TC6_CTRL_REG_WRITE
)
237 oa_tc6_update_ctrl_write_data(tc6
, value
, length
);
240 static int oa_tc6_check_ctrl_write_reply(struct oa_tc6
*tc6
, u8 size
)
242 u8
*tx_buf
= tc6
->spi_ctrl_tx_buf
;
243 u8
*rx_buf
= tc6
->spi_ctrl_rx_buf
;
245 rx_buf
+= OA_TC6_CTRL_IGNORED_SIZE
;
247 /* The echoed control write must match with the one that was
250 if (memcmp(tx_buf
, rx_buf
, size
- OA_TC6_CTRL_IGNORED_SIZE
))
256 static int oa_tc6_check_ctrl_read_reply(struct oa_tc6
*tc6
, u8 size
)
258 u32
*rx_buf
= tc6
->spi_ctrl_rx_buf
+ OA_TC6_CTRL_IGNORED_SIZE
;
259 u32
*tx_buf
= tc6
->spi_ctrl_tx_buf
;
261 /* The echoed control read header must match with the one that was
264 if (*tx_buf
!= *rx_buf
)
270 static void oa_tc6_copy_ctrl_read_data(struct oa_tc6
*tc6
, u32 value
[],
273 __be32
*rx_buf
= tc6
->spi_ctrl_rx_buf
+ OA_TC6_CTRL_IGNORED_SIZE
+
274 OA_TC6_CTRL_HEADER_SIZE
;
276 for (int i
= 0; i
< length
; i
++)
277 value
[i
] = be32_to_cpu(*rx_buf
++);
280 static int oa_tc6_perform_ctrl(struct oa_tc6
*tc6
, u32 address
, u32 value
[],
281 u8 length
, enum oa_tc6_register_op reg_op
)
286 /* Prepare control command and copy to SPI control buffer */
287 oa_tc6_prepare_ctrl_spi_buf(tc6
, address
, value
, length
, reg_op
);
289 size
= oa_tc6_calculate_ctrl_buf_size(length
);
291 /* Perform SPI transfer */
292 ret
= oa_tc6_spi_transfer(tc6
, OA_TC6_CTRL_HEADER
, size
);
294 dev_err(&tc6
->spi
->dev
, "SPI transfer failed for control: %d\n",
299 /* Check echoed/received control write command reply for errors */
300 if (reg_op
== OA_TC6_CTRL_REG_WRITE
)
301 return oa_tc6_check_ctrl_write_reply(tc6
, size
);
303 /* Check echoed/received control read command reply for errors */
304 ret
= oa_tc6_check_ctrl_read_reply(tc6
, size
);
308 oa_tc6_copy_ctrl_read_data(tc6
, value
, length
);
314 * oa_tc6_read_registers - function for reading multiple consecutive registers.
315 * @tc6: oa_tc6 struct.
316 * @address: address of the first register to be read in the MAC-PHY.
317 * @value: values to be read from the starting register address @address.
318 * @length: number of consecutive registers to be read from @address.
320 * Maximum of 128 consecutive registers can be read starting at @address.
322 * Return: 0 on success otherwise failed.
324 int oa_tc6_read_registers(struct oa_tc6
*tc6
, u32 address
, u32 value
[],
329 if (!length
|| length
> OA_TC6_CTRL_MAX_REGISTERS
) {
330 dev_err(&tc6
->spi
->dev
, "Invalid register length parameter\n");
334 mutex_lock(&tc6
->spi_ctrl_lock
);
335 ret
= oa_tc6_perform_ctrl(tc6
, address
, value
, length
,
336 OA_TC6_CTRL_REG_READ
);
337 mutex_unlock(&tc6
->spi_ctrl_lock
);
341 EXPORT_SYMBOL_GPL(oa_tc6_read_registers
);
344 * oa_tc6_read_register - function for reading a MAC-PHY register.
345 * @tc6: oa_tc6 struct.
346 * @address: register address of the MAC-PHY to be read.
347 * @value: value read from the @address register address of the MAC-PHY.
349 * Return: 0 on success otherwise failed.
351 int oa_tc6_read_register(struct oa_tc6
*tc6
, u32 address
, u32
*value
)
353 return oa_tc6_read_registers(tc6
, address
, value
, 1);
355 EXPORT_SYMBOL_GPL(oa_tc6_read_register
);
358 * oa_tc6_write_registers - function for writing multiple consecutive registers.
359 * @tc6: oa_tc6 struct.
360 * @address: address of the first register to be written in the MAC-PHY.
361 * @value: values to be written from the starting register address @address.
362 * @length: number of consecutive registers to be written from @address.
364 * Maximum of 128 consecutive registers can be written starting at @address.
366 * Return: 0 on success otherwise failed.
368 int oa_tc6_write_registers(struct oa_tc6
*tc6
, u32 address
, u32 value
[],
373 if (!length
|| length
> OA_TC6_CTRL_MAX_REGISTERS
) {
374 dev_err(&tc6
->spi
->dev
, "Invalid register length parameter\n");
378 mutex_lock(&tc6
->spi_ctrl_lock
);
379 ret
= oa_tc6_perform_ctrl(tc6
, address
, value
, length
,
380 OA_TC6_CTRL_REG_WRITE
);
381 mutex_unlock(&tc6
->spi_ctrl_lock
);
385 EXPORT_SYMBOL_GPL(oa_tc6_write_registers
);
388 * oa_tc6_write_register - function for writing a MAC-PHY register.
389 * @tc6: oa_tc6 struct.
390 * @address: register address of the MAC-PHY to be written.
391 * @value: value to be written in the @address register address of the MAC-PHY.
393 * Return: 0 on success otherwise failed.
395 int oa_tc6_write_register(struct oa_tc6
*tc6
, u32 address
, u32 value
)
397 return oa_tc6_write_registers(tc6
, address
, &value
, 1);
399 EXPORT_SYMBOL_GPL(oa_tc6_write_register
);
401 static int oa_tc6_check_phy_reg_direct_access_capability(struct oa_tc6
*tc6
)
406 ret
= oa_tc6_read_register(tc6
, OA_TC6_REG_STDCAP
, ®val
);
410 if (!(regval
& STDCAP_DIRECT_PHY_REG_ACCESS
))
416 static void oa_tc6_handle_link_change(struct net_device
*netdev
)
418 phy_print_status(netdev
->phydev
);
421 static int oa_tc6_mdiobus_read(struct mii_bus
*bus
, int addr
, int regnum
)
423 struct oa_tc6
*tc6
= bus
->priv
;
427 ret
= oa_tc6_read_register(tc6
, OA_TC6_PHY_STD_REG_ADDR_BASE
|
428 (regnum
& OA_TC6_PHY_STD_REG_ADDR_MASK
),
436 static int oa_tc6_mdiobus_write(struct mii_bus
*bus
, int addr
, int regnum
,
439 struct oa_tc6
*tc6
= bus
->priv
;
441 return oa_tc6_write_register(tc6
, OA_TC6_PHY_STD_REG_ADDR_BASE
|
442 (regnum
& OA_TC6_PHY_STD_REG_ADDR_MASK
),
446 static int oa_tc6_get_phy_c45_mms(int devnum
)
450 return OA_TC6_PHY_C45_PCS_MMS2
;
451 case MDIO_MMD_PMAPMD
:
452 return OA_TC6_PHY_C45_PMA_PMD_MMS3
;
454 return OA_TC6_PHY_C45_VS_PLCA_MMS4
;
456 return OA_TC6_PHY_C45_AUTO_NEG_MMS5
;
457 case MDIO_MMD_POWER_UNIT
:
458 return OA_TC6_PHY_C45_POWER_UNIT_MMS6
;
464 static int oa_tc6_mdiobus_read_c45(struct mii_bus
*bus
, int addr
, int devnum
,
467 struct oa_tc6
*tc6
= bus
->priv
;
471 ret
= oa_tc6_get_phy_c45_mms(devnum
);
475 ret
= oa_tc6_read_register(tc6
, (ret
<< 16) | regnum
, ®val
);
482 static int oa_tc6_mdiobus_write_c45(struct mii_bus
*bus
, int addr
, int devnum
,
485 struct oa_tc6
*tc6
= bus
->priv
;
488 ret
= oa_tc6_get_phy_c45_mms(devnum
);
492 return oa_tc6_write_register(tc6
, (ret
<< 16) | regnum
, val
);
495 static int oa_tc6_mdiobus_register(struct oa_tc6
*tc6
)
499 tc6
->mdiobus
= mdiobus_alloc();
501 netdev_err(tc6
->netdev
, "MDIO bus alloc failed\n");
505 tc6
->mdiobus
->priv
= tc6
;
506 tc6
->mdiobus
->read
= oa_tc6_mdiobus_read
;
507 tc6
->mdiobus
->write
= oa_tc6_mdiobus_write
;
508 /* OPEN Alliance 10BASE-T1x compliance MAC-PHYs will have both C22 and
509 * C45 registers space. If the PHY is discovered via C22 bus protocol it
510 * assumes it uses C22 protocol and always uses C22 registers indirect
511 * access to access C45 registers. This is because, we don't have a
512 * clean separation between C22/C45 register space and C22/C45 MDIO bus
513 * protocols. Resulting, PHY C45 registers direct access can't be used
514 * which can save multiple SPI bus access. To support this feature, PHY
515 * drivers can set .read_mmd/.write_mmd in the PHY driver to call
516 * .read_c45/.write_c45. Ex: drivers/net/phy/microchip_t1s.c
518 tc6
->mdiobus
->read_c45
= oa_tc6_mdiobus_read_c45
;
519 tc6
->mdiobus
->write_c45
= oa_tc6_mdiobus_write_c45
;
520 tc6
->mdiobus
->name
= "oa-tc6-mdiobus";
521 tc6
->mdiobus
->parent
= tc6
->dev
;
523 snprintf(tc6
->mdiobus
->id
, ARRAY_SIZE(tc6
->mdiobus
->id
), "%s",
524 dev_name(&tc6
->spi
->dev
));
526 ret
= mdiobus_register(tc6
->mdiobus
);
528 netdev_err(tc6
->netdev
, "Could not register MDIO bus\n");
529 mdiobus_free(tc6
->mdiobus
);
536 static void oa_tc6_mdiobus_unregister(struct oa_tc6
*tc6
)
538 mdiobus_unregister(tc6
->mdiobus
);
539 mdiobus_free(tc6
->mdiobus
);
542 static int oa_tc6_phy_init(struct oa_tc6
*tc6
)
546 ret
= oa_tc6_check_phy_reg_direct_access_capability(tc6
);
548 netdev_err(tc6
->netdev
,
549 "Direct PHY register access is not supported by the MAC-PHY\n");
553 ret
= oa_tc6_mdiobus_register(tc6
);
557 tc6
->phydev
= phy_find_first(tc6
->mdiobus
);
559 netdev_err(tc6
->netdev
, "No PHY found\n");
560 oa_tc6_mdiobus_unregister(tc6
);
564 tc6
->phydev
->is_internal
= true;
565 ret
= phy_connect_direct(tc6
->netdev
, tc6
->phydev
,
566 &oa_tc6_handle_link_change
,
567 PHY_INTERFACE_MODE_INTERNAL
);
569 netdev_err(tc6
->netdev
, "Can't attach PHY to %s\n",
571 oa_tc6_mdiobus_unregister(tc6
);
575 phy_attached_info(tc6
->netdev
->phydev
);
580 static void oa_tc6_phy_exit(struct oa_tc6
*tc6
)
582 phy_disconnect(tc6
->phydev
);
583 oa_tc6_mdiobus_unregister(tc6
);
586 static int oa_tc6_read_status0(struct oa_tc6
*tc6
)
591 ret
= oa_tc6_read_register(tc6
, OA_TC6_REG_STATUS0
, ®val
);
593 dev_err(&tc6
->spi
->dev
, "STATUS0 register read failed: %d\n",
601 static int oa_tc6_sw_reset_macphy(struct oa_tc6
*tc6
)
603 u32 regval
= RESET_SWRESET
;
606 ret
= oa_tc6_write_register(tc6
, OA_TC6_REG_RESET
, regval
);
610 /* Poll for soft reset complete for every 1ms until 1s timeout */
611 ret
= readx_poll_timeout(oa_tc6_read_status0
, tc6
, regval
,
612 regval
& STATUS0_RESETC
,
613 STATUS0_RESETC_POLL_DELAY
,
614 STATUS0_RESETC_POLL_TIMEOUT
);
618 /* Clear the reset complete status */
619 return oa_tc6_write_register(tc6
, OA_TC6_REG_STATUS0
, regval
);
622 static int oa_tc6_unmask_macphy_error_interrupts(struct oa_tc6
*tc6
)
627 ret
= oa_tc6_read_register(tc6
, OA_TC6_REG_INT_MASK0
, ®val
);
631 regval
&= ~(INT_MASK0_TX_PROTOCOL_ERR_MASK
|
632 INT_MASK0_RX_BUFFER_OVERFLOW_ERR_MASK
|
633 INT_MASK0_LOSS_OF_FRAME_ERR_MASK
|
634 INT_MASK0_HEADER_ERR_MASK
);
636 return oa_tc6_write_register(tc6
, OA_TC6_REG_INT_MASK0
, regval
);
639 static int oa_tc6_enable_data_transfer(struct oa_tc6
*tc6
)
644 ret
= oa_tc6_read_register(tc6
, OA_TC6_REG_CONFIG0
, &value
);
648 /* Enable configuration synchronization for data transfer */
649 value
|= CONFIG0_SYNC
;
651 return oa_tc6_write_register(tc6
, OA_TC6_REG_CONFIG0
, value
);
654 static void oa_tc6_cleanup_ongoing_rx_skb(struct oa_tc6
*tc6
)
657 tc6
->netdev
->stats
.rx_dropped
++;
658 kfree_skb(tc6
->rx_skb
);
663 static void oa_tc6_cleanup_ongoing_tx_skb(struct oa_tc6
*tc6
)
665 if (tc6
->ongoing_tx_skb
) {
666 tc6
->netdev
->stats
.tx_dropped
++;
667 kfree_skb(tc6
->ongoing_tx_skb
);
668 tc6
->ongoing_tx_skb
= NULL
;
672 static int oa_tc6_process_extended_status(struct oa_tc6
*tc6
)
677 ret
= oa_tc6_read_register(tc6
, OA_TC6_REG_STATUS0
, &value
);
679 netdev_err(tc6
->netdev
, "STATUS0 register read failed: %d\n",
684 /* Clear the error interrupts status */
685 ret
= oa_tc6_write_register(tc6
, OA_TC6_REG_STATUS0
, value
);
687 netdev_err(tc6
->netdev
, "STATUS0 register write failed: %d\n",
692 if (FIELD_GET(STATUS0_RX_BUFFER_OVERFLOW_ERROR
, value
)) {
693 tc6
->rx_buf_overflow
= true;
694 oa_tc6_cleanup_ongoing_rx_skb(tc6
);
695 net_err_ratelimited("%s: Receive buffer overflow error\n",
699 if (FIELD_GET(STATUS0_TX_PROTOCOL_ERROR
, value
)) {
700 netdev_err(tc6
->netdev
, "Transmit protocol error\n");
703 /* TODO: Currently loss of frame and header errors are treated as
704 * non-recoverable errors. They will be handled in the next version.
706 if (FIELD_GET(STATUS0_LOSS_OF_FRAME_ERROR
, value
)) {
707 netdev_err(tc6
->netdev
, "Loss of frame error\n");
710 if (FIELD_GET(STATUS0_HEADER_ERROR
, value
)) {
711 netdev_err(tc6
->netdev
, "Header error\n");
718 static int oa_tc6_process_rx_chunk_footer(struct oa_tc6
*tc6
, u32 footer
)
720 /* Process rx chunk footer for the following,
722 * 2. errors if any from MAC-PHY
723 * 3. receive chunks available
725 tc6
->tx_credits
= FIELD_GET(OA_TC6_DATA_FOOTER_TX_CREDITS
, footer
);
726 tc6
->rx_chunks_available
= FIELD_GET(OA_TC6_DATA_FOOTER_RX_CHUNKS
,
729 if (FIELD_GET(OA_TC6_DATA_FOOTER_EXTENDED_STS
, footer
)) {
730 int ret
= oa_tc6_process_extended_status(tc6
);
736 /* TODO: Currently received header bad and configuration unsync errors
737 * are treated as non-recoverable errors. They will be handled in the
740 if (FIELD_GET(OA_TC6_DATA_FOOTER_RXD_HEADER_BAD
, footer
)) {
741 netdev_err(tc6
->netdev
, "Rxd header bad error\n");
745 if (!FIELD_GET(OA_TC6_DATA_FOOTER_CONFIG_SYNC
, footer
)) {
746 netdev_err(tc6
->netdev
, "Config unsync error\n");
753 static void oa_tc6_submit_rx_skb(struct oa_tc6
*tc6
)
755 tc6
->rx_skb
->protocol
= eth_type_trans(tc6
->rx_skb
, tc6
->netdev
);
756 tc6
->netdev
->stats
.rx_packets
++;
757 tc6
->netdev
->stats
.rx_bytes
+= tc6
->rx_skb
->len
;
759 netif_rx(tc6
->rx_skb
);
764 static void oa_tc6_update_rx_skb(struct oa_tc6
*tc6
, u8
*payload
, u8 length
)
766 memcpy(skb_put(tc6
->rx_skb
, length
), payload
, length
);
769 static int oa_tc6_allocate_rx_skb(struct oa_tc6
*tc6
)
771 tc6
->rx_skb
= netdev_alloc_skb_ip_align(tc6
->netdev
, tc6
->netdev
->mtu
+
772 ETH_HLEN
+ ETH_FCS_LEN
);
774 tc6
->netdev
->stats
.rx_dropped
++;
781 static int oa_tc6_prcs_complete_rx_frame(struct oa_tc6
*tc6
, u8
*payload
,
786 ret
= oa_tc6_allocate_rx_skb(tc6
);
790 oa_tc6_update_rx_skb(tc6
, payload
, size
);
792 oa_tc6_submit_rx_skb(tc6
);
797 static int oa_tc6_prcs_rx_frame_start(struct oa_tc6
*tc6
, u8
*payload
, u16 size
)
801 ret
= oa_tc6_allocate_rx_skb(tc6
);
805 oa_tc6_update_rx_skb(tc6
, payload
, size
);
810 static void oa_tc6_prcs_rx_frame_end(struct oa_tc6
*tc6
, u8
*payload
, u16 size
)
812 oa_tc6_update_rx_skb(tc6
, payload
, size
);
814 oa_tc6_submit_rx_skb(tc6
);
817 static void oa_tc6_prcs_ongoing_rx_frame(struct oa_tc6
*tc6
, u8
*payload
,
820 oa_tc6_update_rx_skb(tc6
, payload
, OA_TC6_CHUNK_PAYLOAD_SIZE
);
823 static int oa_tc6_prcs_rx_chunk_payload(struct oa_tc6
*tc6
, u8
*data
,
826 u8 start_byte_offset
= FIELD_GET(OA_TC6_DATA_FOOTER_START_WORD_OFFSET
,
827 footer
) * sizeof(u32
);
828 u8 end_byte_offset
= FIELD_GET(OA_TC6_DATA_FOOTER_END_BYTE_OFFSET
,
830 bool start_valid
= FIELD_GET(OA_TC6_DATA_FOOTER_START_VALID
, footer
);
831 bool end_valid
= FIELD_GET(OA_TC6_DATA_FOOTER_END_VALID
, footer
);
834 /* Restart the new rx frame after receiving rx buffer overflow error */
835 if (start_valid
&& tc6
->rx_buf_overflow
)
836 tc6
->rx_buf_overflow
= false;
838 if (tc6
->rx_buf_overflow
)
841 /* Process the chunk with complete rx frame */
842 if (start_valid
&& end_valid
&& start_byte_offset
< end_byte_offset
) {
843 size
= end_byte_offset
+ 1 - start_byte_offset
;
844 return oa_tc6_prcs_complete_rx_frame(tc6
,
845 &data
[start_byte_offset
],
849 /* Process the chunk with only rx frame start */
850 if (start_valid
&& !end_valid
) {
851 size
= OA_TC6_CHUNK_PAYLOAD_SIZE
- start_byte_offset
;
852 return oa_tc6_prcs_rx_frame_start(tc6
,
853 &data
[start_byte_offset
],
857 /* Process the chunk with only rx frame end */
858 if (end_valid
&& !start_valid
) {
859 size
= end_byte_offset
+ 1;
860 oa_tc6_prcs_rx_frame_end(tc6
, data
, size
);
864 /* Process the chunk with previous rx frame end and next rx frame
867 if (start_valid
&& end_valid
&& start_byte_offset
> end_byte_offset
) {
868 /* After rx buffer overflow error received, there might be a
869 * possibility of getting an end valid of a previously
870 * incomplete rx frame along with the new rx frame start valid.
873 size
= end_byte_offset
+ 1;
874 oa_tc6_prcs_rx_frame_end(tc6
, data
, size
);
876 size
= OA_TC6_CHUNK_PAYLOAD_SIZE
- start_byte_offset
;
877 return oa_tc6_prcs_rx_frame_start(tc6
,
878 &data
[start_byte_offset
],
882 /* Process the chunk with ongoing rx frame data */
883 oa_tc6_prcs_ongoing_rx_frame(tc6
, data
, footer
);
888 static u32
oa_tc6_get_rx_chunk_footer(struct oa_tc6
*tc6
, u16 footer_offset
)
890 u8
*rx_buf
= tc6
->spi_data_rx_buf
;
893 footer
= *((__be32
*)&rx_buf
[footer_offset
]);
895 return be32_to_cpu(footer
);
898 static int oa_tc6_process_spi_data_rx_buf(struct oa_tc6
*tc6
, u16 length
)
900 u16 no_of_rx_chunks
= length
/ OA_TC6_CHUNK_SIZE
;
904 /* All the rx chunks in the receive SPI data buffer are examined here */
905 for (int i
= 0; i
< no_of_rx_chunks
; i
++) {
906 /* Last 4 bytes in each received chunk consist footer info */
907 footer
= oa_tc6_get_rx_chunk_footer(tc6
, i
* OA_TC6_CHUNK_SIZE
+
908 OA_TC6_CHUNK_PAYLOAD_SIZE
);
910 ret
= oa_tc6_process_rx_chunk_footer(tc6
, footer
);
914 /* If there is a data valid chunks then process it for the
915 * information needed to determine the validity and the location
916 * of the receive frame data.
918 if (FIELD_GET(OA_TC6_DATA_FOOTER_DATA_VALID
, footer
)) {
919 u8
*payload
= tc6
->spi_data_rx_buf
+ i
*
922 ret
= oa_tc6_prcs_rx_chunk_payload(tc6
, payload
,
932 static __be32
oa_tc6_prepare_data_header(bool data_valid
, bool start_valid
,
933 bool end_valid
, u8 end_byte_offset
)
935 u32 header
= FIELD_PREP(OA_TC6_DATA_HEADER_DATA_NOT_CTRL
,
936 OA_TC6_DATA_HEADER
) |
937 FIELD_PREP(OA_TC6_DATA_HEADER_DATA_VALID
, data_valid
) |
938 FIELD_PREP(OA_TC6_DATA_HEADER_START_VALID
, start_valid
) |
939 FIELD_PREP(OA_TC6_DATA_HEADER_END_VALID
, end_valid
) |
940 FIELD_PREP(OA_TC6_DATA_HEADER_END_BYTE_OFFSET
,
943 header
|= FIELD_PREP(OA_TC6_DATA_HEADER_PARITY
,
944 oa_tc6_get_parity(header
));
946 return cpu_to_be32(header
);
949 static void oa_tc6_add_tx_skb_to_spi_buf(struct oa_tc6
*tc6
)
951 enum oa_tc6_data_end_valid_info end_valid
= OA_TC6_DATA_END_INVALID
;
952 __be32
*tx_buf
= tc6
->spi_data_tx_buf
+ tc6
->spi_data_tx_buf_offset
;
953 u16 remaining_len
= tc6
->ongoing_tx_skb
->len
- tc6
->tx_skb_offset
;
954 u8
*tx_skb_data
= tc6
->ongoing_tx_skb
->data
+ tc6
->tx_skb_offset
;
955 enum oa_tc6_data_start_valid_info start_valid
;
956 u8 end_byte_offset
= 0;
959 /* Initial value is assigned here to avoid more than 80 characters in
960 * the declaration place.
962 start_valid
= OA_TC6_DATA_START_INVALID
;
964 /* Set start valid if the current tx chunk contains the start of the tx
967 if (!tc6
->tx_skb_offset
)
968 start_valid
= OA_TC6_DATA_START_VALID
;
970 /* If the remaining tx skb length is more than the chunk payload size of
971 * 64 bytes then copy only 64 bytes and leave the ongoing tx skb for
974 length_to_copy
= min_t(u16
, remaining_len
, OA_TC6_CHUNK_PAYLOAD_SIZE
);
976 /* Copy the tx skb data to the tx chunk payload buffer */
977 memcpy(tx_buf
+ 1, tx_skb_data
, length_to_copy
);
978 tc6
->tx_skb_offset
+= length_to_copy
;
980 /* Set end valid if the current tx chunk contains the end of the tx
983 if (tc6
->ongoing_tx_skb
->len
== tc6
->tx_skb_offset
) {
984 end_valid
= OA_TC6_DATA_END_VALID
;
985 end_byte_offset
= length_to_copy
- 1;
986 tc6
->tx_skb_offset
= 0;
987 tc6
->netdev
->stats
.tx_bytes
+= tc6
->ongoing_tx_skb
->len
;
988 tc6
->netdev
->stats
.tx_packets
++;
989 kfree_skb(tc6
->ongoing_tx_skb
);
990 tc6
->ongoing_tx_skb
= NULL
;
993 *tx_buf
= oa_tc6_prepare_data_header(OA_TC6_DATA_VALID
, start_valid
,
994 end_valid
, end_byte_offset
);
995 tc6
->spi_data_tx_buf_offset
+= OA_TC6_CHUNK_SIZE
;
998 static u16
oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6
*tc6
)
1000 u16 used_tx_credits
;
1002 /* Get tx skbs and convert them into tx chunks based on the tx credits
1005 for (used_tx_credits
= 0; used_tx_credits
< tc6
->tx_credits
;
1006 used_tx_credits
++) {
1007 if (!tc6
->ongoing_tx_skb
) {
1008 spin_lock_bh(&tc6
->tx_skb_lock
);
1009 tc6
->ongoing_tx_skb
= tc6
->waiting_tx_skb
;
1010 tc6
->waiting_tx_skb
= NULL
;
1011 spin_unlock_bh(&tc6
->tx_skb_lock
);
1013 if (!tc6
->ongoing_tx_skb
)
1015 oa_tc6_add_tx_skb_to_spi_buf(tc6
);
1018 return used_tx_credits
* OA_TC6_CHUNK_SIZE
;
1021 static void oa_tc6_add_empty_chunks_to_spi_buf(struct oa_tc6
*tc6
,
1022 u16 needed_empty_chunks
)
1026 header
= oa_tc6_prepare_data_header(OA_TC6_DATA_INVALID
,
1027 OA_TC6_DATA_START_INVALID
,
1028 OA_TC6_DATA_END_INVALID
, 0);
1030 while (needed_empty_chunks
--) {
1031 __be32
*tx_buf
= tc6
->spi_data_tx_buf
+
1032 tc6
->spi_data_tx_buf_offset
;
1035 tc6
->spi_data_tx_buf_offset
+= OA_TC6_CHUNK_SIZE
;
1039 static u16
oa_tc6_prepare_spi_tx_buf_for_rx_chunks(struct oa_tc6
*tc6
, u16 len
)
1041 u16 tx_chunks
= len
/ OA_TC6_CHUNK_SIZE
;
1042 u16 needed_empty_chunks
;
1044 /* If there are more chunks to receive than to transmit, we need to add
1045 * enough empty tx chunks to allow the reception of the excess rx
1048 if (tx_chunks
>= tc6
->rx_chunks_available
)
1051 needed_empty_chunks
= tc6
->rx_chunks_available
- tx_chunks
;
1053 oa_tc6_add_empty_chunks_to_spi_buf(tc6
, needed_empty_chunks
);
1055 return needed_empty_chunks
* OA_TC6_CHUNK_SIZE
+ len
;
1058 static int oa_tc6_try_spi_transfer(struct oa_tc6
*tc6
)
1065 tc6
->spi_data_tx_buf_offset
= 0;
1067 if (tc6
->ongoing_tx_skb
|| tc6
->waiting_tx_skb
)
1068 spi_len
= oa_tc6_prepare_spi_tx_buf_for_tx_skbs(tc6
);
1070 spi_len
= oa_tc6_prepare_spi_tx_buf_for_rx_chunks(tc6
, spi_len
);
1072 if (tc6
->int_flag
) {
1073 tc6
->int_flag
= false;
1075 oa_tc6_add_empty_chunks_to_spi_buf(tc6
, 1);
1076 spi_len
= OA_TC6_CHUNK_SIZE
;
1083 ret
= oa_tc6_spi_transfer(tc6
, OA_TC6_DATA_HEADER
, spi_len
);
1085 netdev_err(tc6
->netdev
, "SPI data transfer failed: %d\n",
1090 ret
= oa_tc6_process_spi_data_rx_buf(tc6
, spi_len
);
1095 oa_tc6_cleanup_ongoing_tx_skb(tc6
);
1096 oa_tc6_cleanup_ongoing_rx_skb(tc6
);
1097 netdev_err(tc6
->netdev
, "Device error: %d\n", ret
);
1101 if (!tc6
->waiting_tx_skb
&& netif_queue_stopped(tc6
->netdev
))
1102 netif_wake_queue(tc6
->netdev
);
1108 static int oa_tc6_spi_thread_handler(void *data
)
1110 struct oa_tc6
*tc6
= data
;
1113 while (likely(!kthread_should_stop())) {
1114 /* This kthread will be waken up if there is a tx skb or mac-phy
1115 * interrupt to perform spi transfer with tx chunks.
1117 wait_event_interruptible(tc6
->spi_wq
, tc6
->int_flag
||
1118 (tc6
->waiting_tx_skb
&&
1120 kthread_should_stop());
1122 if (kthread_should_stop())
1125 ret
= oa_tc6_try_spi_transfer(tc6
);
1133 static int oa_tc6_update_buffer_status_from_register(struct oa_tc6
*tc6
)
1138 /* Initially tx credits and rx chunks available to be updated from the
1139 * register as there is no data transfer performed yet. Later they will
1140 * be updated from the rx footer.
1142 ret
= oa_tc6_read_register(tc6
, OA_TC6_REG_BUFFER_STATUS
, &value
);
1146 tc6
->tx_credits
= FIELD_GET(BUFFER_STATUS_TX_CREDITS_AVAILABLE
, value
);
1147 tc6
->rx_chunks_available
= FIELD_GET(BUFFER_STATUS_RX_CHUNKS_AVAILABLE
,
1153 static irqreturn_t
oa_tc6_macphy_isr(int irq
, void *data
)
1155 struct oa_tc6
*tc6
= data
;
1157 /* MAC-PHY interrupt can occur for the following reasons.
1158 * - availability of tx credits if it was 0 before and not reported in
1159 * the previous rx footer.
1160 * - availability of rx chunks if it was 0 before and not reported in
1161 * the previous rx footer.
1162 * - extended status event not reported in the previous rx footer.
1164 tc6
->int_flag
= true;
1165 /* Wake spi kthread to perform spi transfer */
1166 wake_up_interruptible(&tc6
->spi_wq
);
1172 * oa_tc6_zero_align_receive_frame_enable - function to enable zero align
1173 * receive frame feature.
1174 * @tc6: oa_tc6 struct.
1176 * Return: 0 on success otherwise failed.
1178 int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6
*tc6
)
1183 ret
= oa_tc6_read_register(tc6
, OA_TC6_REG_CONFIG0
, ®val
);
1187 /* Set Zero-Align Receive Frame Enable */
1188 regval
|= CONFIG0_ZARFE_ENABLE
;
1190 return oa_tc6_write_register(tc6
, OA_TC6_REG_CONFIG0
, regval
);
1192 EXPORT_SYMBOL_GPL(oa_tc6_zero_align_receive_frame_enable
);
1195 * oa_tc6_start_xmit - function for sending the tx skb which consists ethernet
1197 * @tc6: oa_tc6 struct.
1198 * @skb: socket buffer in which the ethernet frame is stored.
1200 * Return: NETDEV_TX_OK if the transmit ethernet frame skb added in the tx_skb_q
1201 * otherwise returns NETDEV_TX_BUSY.
1203 netdev_tx_t
oa_tc6_start_xmit(struct oa_tc6
*tc6
, struct sk_buff
*skb
)
1205 if (tc6
->waiting_tx_skb
) {
1206 netif_stop_queue(tc6
->netdev
);
1207 return NETDEV_TX_BUSY
;
1210 if (skb_linearize(skb
)) {
1211 dev_kfree_skb_any(skb
);
1212 tc6
->netdev
->stats
.tx_dropped
++;
1213 return NETDEV_TX_OK
;
1216 spin_lock_bh(&tc6
->tx_skb_lock
);
1217 tc6
->waiting_tx_skb
= skb
;
1218 spin_unlock_bh(&tc6
->tx_skb_lock
);
1220 /* Wake spi kthread to perform spi transfer */
1221 wake_up_interruptible(&tc6
->spi_wq
);
1223 return NETDEV_TX_OK
;
1225 EXPORT_SYMBOL_GPL(oa_tc6_start_xmit
);
1228 * oa_tc6_init - allocates and initializes oa_tc6 structure.
1229 * @spi: device with which data will be exchanged.
1230 * @netdev: network device interface structure.
1232 * Return: pointer reference to the oa_tc6 structure if the MAC-PHY
1233 * initialization is successful otherwise NULL.
1235 struct oa_tc6
*oa_tc6_init(struct spi_device
*spi
, struct net_device
*netdev
)
1240 tc6
= devm_kzalloc(&spi
->dev
, sizeof(*tc6
), GFP_KERNEL
);
1245 tc6
->netdev
= netdev
;
1246 SET_NETDEV_DEV(netdev
, &spi
->dev
);
1247 mutex_init(&tc6
->spi_ctrl_lock
);
1248 spin_lock_init(&tc6
->tx_skb_lock
);
1250 /* Set the SPI controller to pump at realtime priority */
1251 tc6
->spi
->rt
= true;
1252 spi_setup(tc6
->spi
);
1254 tc6
->spi_ctrl_tx_buf
= devm_kzalloc(&tc6
->spi
->dev
,
1255 OA_TC6_CTRL_SPI_BUF_SIZE
,
1257 if (!tc6
->spi_ctrl_tx_buf
)
1260 tc6
->spi_ctrl_rx_buf
= devm_kzalloc(&tc6
->spi
->dev
,
1261 OA_TC6_CTRL_SPI_BUF_SIZE
,
1263 if (!tc6
->spi_ctrl_rx_buf
)
1266 tc6
->spi_data_tx_buf
= devm_kzalloc(&tc6
->spi
->dev
,
1267 OA_TC6_SPI_DATA_BUF_SIZE
,
1269 if (!tc6
->spi_data_tx_buf
)
1272 tc6
->spi_data_rx_buf
= devm_kzalloc(&tc6
->spi
->dev
,
1273 OA_TC6_SPI_DATA_BUF_SIZE
,
1275 if (!tc6
->spi_data_rx_buf
)
1278 ret
= oa_tc6_sw_reset_macphy(tc6
);
1280 dev_err(&tc6
->spi
->dev
,
1281 "MAC-PHY software reset failed: %d\n", ret
);
1285 ret
= oa_tc6_unmask_macphy_error_interrupts(tc6
);
1287 dev_err(&tc6
->spi
->dev
,
1288 "MAC-PHY error interrupts unmask failed: %d\n", ret
);
1292 ret
= oa_tc6_phy_init(tc6
);
1294 dev_err(&tc6
->spi
->dev
,
1295 "MAC internal PHY initialization failed: %d\n", ret
);
1299 ret
= oa_tc6_enable_data_transfer(tc6
);
1301 dev_err(&tc6
->spi
->dev
, "Failed to enable data transfer: %d\n",
1306 ret
= oa_tc6_update_buffer_status_from_register(tc6
);
1308 dev_err(&tc6
->spi
->dev
,
1309 "Failed to update buffer status: %d\n", ret
);
1313 init_waitqueue_head(&tc6
->spi_wq
);
1315 tc6
->spi_thread
= kthread_run(oa_tc6_spi_thread_handler
, tc6
,
1316 "oa-tc6-spi-thread");
1317 if (IS_ERR(tc6
->spi_thread
)) {
1318 dev_err(&tc6
->spi
->dev
, "Failed to create SPI thread\n");
1322 sched_set_fifo(tc6
->spi_thread
);
1324 ret
= devm_request_irq(&tc6
->spi
->dev
, tc6
->spi
->irq
, oa_tc6_macphy_isr
,
1325 IRQF_TRIGGER_FALLING
, dev_name(&tc6
->spi
->dev
),
1328 dev_err(&tc6
->spi
->dev
, "Failed to request macphy isr %d\n",
1333 /* oa_tc6_sw_reset_macphy() function resets and clears the MAC-PHY reset
1334 * complete status. IRQ is also asserted on reset completion and it is
1335 * remain asserted until MAC-PHY receives a data chunk. So performing an
1336 * empty data chunk transmission will deassert the IRQ. Refer section
1337 * 7.7 and 9.2.8.8 in the OPEN Alliance specification for more details.
1339 tc6
->int_flag
= true;
1340 wake_up_interruptible(&tc6
->spi_wq
);
1345 kthread_stop(tc6
->spi_thread
);
1347 oa_tc6_phy_exit(tc6
);
1350 EXPORT_SYMBOL_GPL(oa_tc6_init
);
1353 * oa_tc6_exit - exit function.
1354 * @tc6: oa_tc6 struct.
1356 void oa_tc6_exit(struct oa_tc6
*tc6
)
1358 oa_tc6_phy_exit(tc6
);
1359 kthread_stop(tc6
->spi_thread
);
1360 dev_kfree_skb_any(tc6
->ongoing_tx_skb
);
1361 dev_kfree_skb_any(tc6
->waiting_tx_skb
);
1362 dev_kfree_skb_any(tc6
->rx_skb
);
1364 EXPORT_SYMBOL_GPL(oa_tc6_exit
);
1366 MODULE_DESCRIPTION("OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface Lib");
1367 MODULE_AUTHOR("Parthiban Veerasooran <parthiban.veerasooran@microchip.com>");
1368 MODULE_LICENSE("GPL");