treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / dsa / microchip / ksz_common.c
blobd8fda4a02640e0cdf4f592f25a135b9f3b7d9b6b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Microchip switch driver main logic
5 * Copyright (C) 2017-2019 Microchip Technology Inc.
6 */
8 #include <linux/delay.h>
9 #include <linux/export.h>
10 #include <linux/gpio/consumer.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_data/microchip-ksz.h>
14 #include <linux/phy.h>
15 #include <linux/etherdevice.h>
16 #include <linux/if_bridge.h>
17 #include <linux/of_net.h>
18 #include <net/dsa.h>
19 #include <net/switchdev.h>
21 #include "ksz_common.h"
23 void ksz_update_port_member(struct ksz_device *dev, int port)
25 struct ksz_port *p;
26 int i;
28 for (i = 0; i < dev->port_cnt; i++) {
29 if (i == port || i == dev->cpu_port)
30 continue;
31 p = &dev->ports[i];
32 if (!(dev->member & (1 << i)))
33 continue;
35 /* Port is a member of the bridge and is forwarding. */
36 if (p->stp_state == BR_STATE_FORWARDING &&
37 p->member != dev->member)
38 dev->dev_ops->cfg_port_member(dev, i, dev->member);
41 EXPORT_SYMBOL_GPL(ksz_update_port_member);
43 static void port_r_cnt(struct ksz_device *dev, int port)
45 struct ksz_port_mib *mib = &dev->ports[port].mib;
46 u64 *dropped;
48 /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
49 while (mib->cnt_ptr < dev->reg_mib_cnt) {
50 dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
51 &mib->counters[mib->cnt_ptr]);
52 ++mib->cnt_ptr;
55 /* last one in storage */
56 dropped = &mib->counters[dev->mib_cnt];
58 /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
59 while (mib->cnt_ptr < dev->mib_cnt) {
60 dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
61 dropped, &mib->counters[mib->cnt_ptr]);
62 ++mib->cnt_ptr;
64 mib->cnt_ptr = 0;
67 static void ksz_mib_read_work(struct work_struct *work)
69 struct ksz_device *dev = container_of(work, struct ksz_device,
70 mib_read);
71 struct ksz_port_mib *mib;
72 struct ksz_port *p;
73 int i;
75 for (i = 0; i < dev->mib_port_cnt; i++) {
76 if (dsa_is_unused_port(dev->ds, i))
77 continue;
79 p = &dev->ports[i];
80 mib = &p->mib;
81 mutex_lock(&mib->cnt_mutex);
83 /* Only read MIB counters when the port is told to do.
84 * If not, read only dropped counters when link is not up.
86 if (!p->read) {
87 const struct dsa_port *dp = dsa_to_port(dev->ds, i);
89 if (!netif_carrier_ok(dp->slave))
90 mib->cnt_ptr = dev->reg_mib_cnt;
92 port_r_cnt(dev, i);
93 p->read = false;
94 mutex_unlock(&mib->cnt_mutex);
98 static void mib_monitor(struct timer_list *t)
100 struct ksz_device *dev = from_timer(dev, t, mib_read_timer);
102 mod_timer(&dev->mib_read_timer, jiffies + dev->mib_read_interval);
103 schedule_work(&dev->mib_read);
106 void ksz_init_mib_timer(struct ksz_device *dev)
108 int i;
110 /* Read MIB counters every 30 seconds to avoid overflow. */
111 dev->mib_read_interval = msecs_to_jiffies(30000);
113 INIT_WORK(&dev->mib_read, ksz_mib_read_work);
114 timer_setup(&dev->mib_read_timer, mib_monitor, 0);
116 for (i = 0; i < dev->mib_port_cnt; i++)
117 dev->dev_ops->port_init_cnt(dev, i);
119 /* Start the timer 2 seconds later. */
120 dev->mib_read_timer.expires = jiffies + msecs_to_jiffies(2000);
121 add_timer(&dev->mib_read_timer);
123 EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
125 int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
127 struct ksz_device *dev = ds->priv;
128 u16 val = 0xffff;
130 dev->dev_ops->r_phy(dev, addr, reg, &val);
132 return val;
134 EXPORT_SYMBOL_GPL(ksz_phy_read16);
136 int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
138 struct ksz_device *dev = ds->priv;
140 dev->dev_ops->w_phy(dev, addr, reg, val);
142 return 0;
144 EXPORT_SYMBOL_GPL(ksz_phy_write16);
146 void ksz_adjust_link(struct dsa_switch *ds, int port,
147 struct phy_device *phydev)
149 struct ksz_device *dev = ds->priv;
150 struct ksz_port *p = &dev->ports[port];
152 /* Read all MIB counters when the link is going down. */
153 if (!phydev->link) {
154 p->read = true;
155 schedule_work(&dev->mib_read);
157 mutex_lock(&dev->dev_mutex);
158 if (!phydev->link)
159 dev->live_ports &= ~(1 << port);
160 else
161 /* Remember which port is connected and active. */
162 dev->live_ports |= (1 << port) & dev->on_ports;
163 mutex_unlock(&dev->dev_mutex);
165 EXPORT_SYMBOL_GPL(ksz_adjust_link);
167 int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
169 struct ksz_device *dev = ds->priv;
171 if (sset != ETH_SS_STATS)
172 return 0;
174 return dev->mib_cnt;
176 EXPORT_SYMBOL_GPL(ksz_sset_count);
178 void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
180 const struct dsa_port *dp = dsa_to_port(ds, port);
181 struct ksz_device *dev = ds->priv;
182 struct ksz_port_mib *mib;
184 mib = &dev->ports[port].mib;
185 mutex_lock(&mib->cnt_mutex);
187 /* Only read dropped counters if no link. */
188 if (!netif_carrier_ok(dp->slave))
189 mib->cnt_ptr = dev->reg_mib_cnt;
190 port_r_cnt(dev, port);
191 memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
192 mutex_unlock(&mib->cnt_mutex);
194 EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
196 int ksz_port_bridge_join(struct dsa_switch *ds, int port,
197 struct net_device *br)
199 struct ksz_device *dev = ds->priv;
201 mutex_lock(&dev->dev_mutex);
202 dev->br_member |= (1 << port);
203 mutex_unlock(&dev->dev_mutex);
205 /* port_stp_state_set() will be called after to put the port in
206 * appropriate state so there is no need to do anything.
209 return 0;
211 EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
213 void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
214 struct net_device *br)
216 struct ksz_device *dev = ds->priv;
218 mutex_lock(&dev->dev_mutex);
219 dev->br_member &= ~(1 << port);
220 dev->member &= ~(1 << port);
221 mutex_unlock(&dev->dev_mutex);
223 /* port_stp_state_set() will be called after to put the port in
224 * forwarding state so there is no need to do anything.
227 EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
229 void ksz_port_fast_age(struct dsa_switch *ds, int port)
231 struct ksz_device *dev = ds->priv;
233 dev->dev_ops->flush_dyn_mac_table(dev, port);
235 EXPORT_SYMBOL_GPL(ksz_port_fast_age);
237 int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
238 const struct switchdev_obj_port_vlan *vlan)
240 /* nothing needed */
242 return 0;
244 EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
246 int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
247 void *data)
249 struct ksz_device *dev = ds->priv;
250 int ret = 0;
251 u16 i = 0;
252 u16 entries = 0;
253 u8 timestamp = 0;
254 u8 fid;
255 u8 member;
256 struct alu_struct alu;
258 do {
259 alu.is_static = false;
260 ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
261 &member, &timestamp,
262 &entries);
263 if (!ret && (member & BIT(port))) {
264 ret = cb(alu.mac, alu.fid, alu.is_static, data);
265 if (ret)
266 break;
268 i++;
269 } while (i < entries);
270 if (i >= entries)
271 ret = 0;
273 return ret;
275 EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
277 int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
278 const struct switchdev_obj_port_mdb *mdb)
280 /* nothing to do */
281 return 0;
283 EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
285 void ksz_port_mdb_add(struct dsa_switch *ds, int port,
286 const struct switchdev_obj_port_mdb *mdb)
288 struct ksz_device *dev = ds->priv;
289 struct alu_struct alu;
290 int index;
291 int empty = 0;
293 alu.port_forward = 0;
294 for (index = 0; index < dev->num_statics; index++) {
295 if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
296 /* Found one already in static MAC table. */
297 if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
298 alu.fid == mdb->vid)
299 break;
300 /* Remember the first empty entry. */
301 } else if (!empty) {
302 empty = index + 1;
306 /* no available entry */
307 if (index == dev->num_statics && !empty)
308 return;
310 /* add entry */
311 if (index == dev->num_statics) {
312 index = empty - 1;
313 memset(&alu, 0, sizeof(alu));
314 memcpy(alu.mac, mdb->addr, ETH_ALEN);
315 alu.is_static = true;
317 alu.port_forward |= BIT(port);
318 if (mdb->vid) {
319 alu.is_use_fid = true;
321 /* Need a way to map VID to FID. */
322 alu.fid = mdb->vid;
324 dev->dev_ops->w_sta_mac_table(dev, index, &alu);
326 EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
328 int ksz_port_mdb_del(struct dsa_switch *ds, int port,
329 const struct switchdev_obj_port_mdb *mdb)
331 struct ksz_device *dev = ds->priv;
332 struct alu_struct alu;
333 int index;
334 int ret = 0;
336 for (index = 0; index < dev->num_statics; index++) {
337 if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
338 /* Found one already in static MAC table. */
339 if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
340 alu.fid == mdb->vid)
341 break;
345 /* no available entry */
346 if (index == dev->num_statics)
347 goto exit;
349 /* clear port */
350 alu.port_forward &= ~BIT(port);
351 if (!alu.port_forward)
352 alu.is_static = false;
353 dev->dev_ops->w_sta_mac_table(dev, index, &alu);
355 exit:
356 return ret;
358 EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
360 int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
362 struct ksz_device *dev = ds->priv;
364 if (!dsa_is_user_port(ds, port))
365 return 0;
367 /* setup slave port */
368 dev->dev_ops->port_setup(dev, port, false);
369 if (dev->dev_ops->phy_setup)
370 dev->dev_ops->phy_setup(dev, port, phy);
372 /* port_stp_state_set() will be called after to enable the port so
373 * there is no need to do anything.
376 return 0;
378 EXPORT_SYMBOL_GPL(ksz_enable_port);
380 void ksz_disable_port(struct dsa_switch *ds, int port)
382 struct ksz_device *dev = ds->priv;
384 if (!dsa_is_user_port(ds, port))
385 return;
387 dev->on_ports &= ~(1 << port);
388 dev->live_ports &= ~(1 << port);
390 /* port_stp_state_set() will be called after to disable the port so
391 * there is no need to do anything.
394 EXPORT_SYMBOL_GPL(ksz_disable_port);
396 struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
398 struct dsa_switch *ds;
399 struct ksz_device *swdev;
401 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
402 if (!ds)
403 return NULL;
405 ds->dev = base;
406 ds->num_ports = DSA_MAX_PORTS;
408 swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
409 if (!swdev)
410 return NULL;
412 ds->priv = swdev;
413 swdev->dev = base;
415 swdev->ds = ds;
416 swdev->priv = priv;
418 return swdev;
420 EXPORT_SYMBOL(ksz_switch_alloc);
422 int ksz_switch_register(struct ksz_device *dev,
423 const struct ksz_dev_ops *ops)
425 phy_interface_t interface;
426 int ret;
428 if (dev->pdata)
429 dev->chip_id = dev->pdata->chip_id;
431 dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
432 GPIOD_OUT_LOW);
433 if (IS_ERR(dev->reset_gpio))
434 return PTR_ERR(dev->reset_gpio);
436 if (dev->reset_gpio) {
437 gpiod_set_value_cansleep(dev->reset_gpio, 1);
438 mdelay(10);
439 gpiod_set_value_cansleep(dev->reset_gpio, 0);
442 mutex_init(&dev->dev_mutex);
443 mutex_init(&dev->regmap_mutex);
444 mutex_init(&dev->alu_mutex);
445 mutex_init(&dev->vlan_mutex);
447 dev->dev_ops = ops;
449 if (dev->dev_ops->detect(dev))
450 return -EINVAL;
452 ret = dev->dev_ops->init(dev);
453 if (ret)
454 return ret;
456 /* Host port interface will be self detected, or specifically set in
457 * device tree.
459 if (dev->dev->of_node) {
460 ret = of_get_phy_mode(dev->dev->of_node, &interface);
461 if (ret == 0)
462 dev->interface = interface;
463 dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
464 "microchip,synclko-125");
467 ret = dsa_register_switch(dev->ds);
468 if (ret) {
469 dev->dev_ops->exit(dev);
470 return ret;
473 return 0;
475 EXPORT_SYMBOL(ksz_switch_register);
477 void ksz_switch_remove(struct ksz_device *dev)
479 /* timer started */
480 if (dev->mib_read_timer.expires) {
481 del_timer_sync(&dev->mib_read_timer);
482 flush_work(&dev->mib_read);
485 dev->dev_ops->exit(dev);
486 dsa_unregister_switch(dev->ds);
488 if (dev->reset_gpio)
489 gpiod_set_value_cansleep(dev->reset_gpio, 1);
492 EXPORT_SYMBOL(ksz_switch_remove);
494 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
495 MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
496 MODULE_LICENSE("GPL");