1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) ST-Ericsson AB 2010
4 * Author: Sjur Brendeland
7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
9 #include <linux/stddef.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <net/caif/caif_layer.h>
13 #include <net/caif/cfpkt.h>
14 #include <net/caif/cfserl.h>
16 #define container_obj(layr) ((struct cfserl *) layr)
18 #define CFSERL_STX 0x02
19 #define SERIAL_MINIUM_PACKET_SIZE 4
20 #define SERIAL_MAX_FRAMESIZE 4096
23 struct cfpkt
*incomplete_frm
;
24 /* Protects parallel processing of incoming packets */
29 static int cfserl_receive(struct cflayer
*layr
, struct cfpkt
*pkt
);
30 static int cfserl_transmit(struct cflayer
*layr
, struct cfpkt
*pkt
);
31 static void cfserl_ctrlcmd(struct cflayer
*layr
, enum caif_ctrlcmd ctrl
,
34 void cfserl_release(struct cflayer
*layer
)
39 struct cflayer
*cfserl_create(int instance
, bool use_stx
)
41 struct cfserl
*this = kzalloc(sizeof(struct cfserl
), GFP_ATOMIC
);
44 caif_assert(offsetof(struct cfserl
, layer
) == 0);
45 this->layer
.receive
= cfserl_receive
;
46 this->layer
.transmit
= cfserl_transmit
;
47 this->layer
.ctrlcmd
= cfserl_ctrlcmd
;
48 this->usestx
= use_stx
;
49 spin_lock_init(&this->sync
);
50 snprintf(this->layer
.name
, CAIF_LAYER_NAME_SZ
, "ser1");
54 static int cfserl_receive(struct cflayer
*l
, struct cfpkt
*newpkt
)
56 struct cfserl
*layr
= container_obj(l
);
58 struct cfpkt
*pkt
= NULL
;
59 struct cfpkt
*tail_pkt
= NULL
;
66 caif_assert(newpkt
!= NULL
);
67 spin_lock(&layr
->sync
);
69 if (layr
->incomplete_frm
!= NULL
) {
70 layr
->incomplete_frm
=
71 cfpkt_append(layr
->incomplete_frm
, newpkt
, expectlen
);
72 pkt
= layr
->incomplete_frm
;
74 spin_unlock(&layr
->sync
);
80 layr
->incomplete_frm
= NULL
;
83 /* Search for STX at start of pkt if STX is used */
85 cfpkt_extr_head(pkt
, &tmp8
, 1);
86 if (tmp8
!= CFSERL_STX
) {
87 while (cfpkt_more(pkt
)
88 && tmp8
!= CFSERL_STX
) {
89 cfpkt_extr_head(pkt
, &tmp8
, 1);
91 if (!cfpkt_more(pkt
)) {
93 layr
->incomplete_frm
= NULL
;
94 spin_unlock(&layr
->sync
);
100 pkt_len
= cfpkt_getlen(pkt
);
103 * pkt_len is the accumulated length of the packet data
104 * we have received so far.
105 * Exit if frame doesn't hold length.
110 cfpkt_add_head(pkt
, &stx
, 1);
111 layr
->incomplete_frm
= pkt
;
112 spin_unlock(&layr
->sync
);
117 * Find length of frame.
118 * expectlen is the length we need for a full frame.
120 cfpkt_peek_head(pkt
, &tmp
, 2);
121 expectlen
= le16_to_cpu(tmp
) + 2;
123 * Frame error handling
125 if (expectlen
< SERIAL_MINIUM_PACKET_SIZE
126 || expectlen
> SERIAL_MAX_FRAMESIZE
) {
130 layr
->incomplete_frm
= NULL
;
131 spin_unlock(&layr
->sync
);
137 if (pkt_len
< expectlen
) {
138 /* Too little received data */
140 cfpkt_add_head(pkt
, &stx
, 1);
141 layr
->incomplete_frm
= pkt
;
142 spin_unlock(&layr
->sync
);
147 * Enough data for at least one frame.
148 * Split the frame, if too long
150 if (pkt_len
> expectlen
)
151 tail_pkt
= cfpkt_split(pkt
, expectlen
);
155 /* Send the first part of packet upwards.*/
156 spin_unlock(&layr
->sync
);
157 ret
= layr
->layer
.up
->receive(layr
->layer
.up
, pkt
);
158 spin_lock(&layr
->sync
);
159 if (ret
== -EILSEQ
) {
161 if (tail_pkt
!= NULL
)
162 pkt
= cfpkt_append(pkt
, tail_pkt
, 0);
163 /* Start search for next STX if frame failed */
173 } while (pkt
!= NULL
);
175 spin_unlock(&layr
->sync
);
179 static int cfserl_transmit(struct cflayer
*layer
, struct cfpkt
*newpkt
)
181 struct cfserl
*layr
= container_obj(layer
);
182 u8 tmp8
= CFSERL_STX
;
184 cfpkt_add_head(newpkt
, &tmp8
, 1);
185 return layer
->dn
->transmit(layer
->dn
, newpkt
);
188 static void cfserl_ctrlcmd(struct cflayer
*layr
, enum caif_ctrlcmd ctrl
,
191 layr
->up
->ctrlcmd(layr
->up
, ctrl
, phyid
);