xref: /kernel/linux/linux-6.6/net/hsr/hsr_slave.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2011-2014 Autronica Fire and Security AS
3 *
4 * Author(s):
5 *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
6 *
7 * Frame handler other utility functions for HSR and PRP.
8 */
9
10#include "hsr_slave.h"
11#include <linux/etherdevice.h>
12#include <linux/if_arp.h>
13#include <linux/if_vlan.h>
14#include "hsr_main.h"
15#include "hsr_device.h"
16#include "hsr_forward.h"
17#include "hsr_framereg.h"
18
19bool hsr_invalid_dan_ingress_frame(__be16 protocol)
20{
21	return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR));
22}
23
24static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
25{
26	struct sk_buff *skb = *pskb;
27	struct hsr_port *port;
28	struct hsr_priv *hsr;
29	__be16 protocol;
30
31	/* Packets from dev_loopback_xmit() do not have L2 header, bail out */
32	if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
33		return RX_HANDLER_PASS;
34
35	if (!skb_mac_header_was_set(skb)) {
36		WARN_ONCE(1, "%s: skb invalid", __func__);
37		return RX_HANDLER_PASS;
38	}
39
40	port = hsr_port_get_rcu(skb->dev);
41	if (!port)
42		goto finish_pass;
43	hsr = port->hsr;
44
45	if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
46		/* Directly kill frames sent by ourselves */
47		kfree_skb(skb);
48		goto finish_consume;
49	}
50
51	/* For HSR, only tagged frames are expected (unless the device offloads
52	 * HSR tag removal), but for PRP there could be non tagged frames as
53	 * well from Single attached nodes (SANs).
54	 */
55	protocol = eth_hdr(skb)->h_proto;
56
57	if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
58	    hsr->proto_ops->invalid_dan_ingress_frame &&
59	    hsr->proto_ops->invalid_dan_ingress_frame(protocol))
60		goto finish_pass;
61
62	skb_push(skb, ETH_HLEN);
63	skb_reset_mac_header(skb);
64	if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) ||
65	    protocol == htons(ETH_P_HSR))
66		skb_set_network_header(skb, ETH_HLEN + HSR_HLEN);
67	skb_reset_mac_len(skb);
68
69	hsr_forward_skb(skb, port);
70
71finish_consume:
72	return RX_HANDLER_CONSUMED;
73
74finish_pass:
75	return RX_HANDLER_PASS;
76}
77
78bool hsr_port_exists(const struct net_device *dev)
79{
80	return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
81}
82
83static int hsr_check_dev_ok(struct net_device *dev,
84			    struct netlink_ext_ack *extack)
85{
86	/* Don't allow HSR on non-ethernet like devices */
87	if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
88	    dev->addr_len != ETH_ALEN) {
89		NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave.");
90		return -EINVAL;
91	}
92
93	/* Don't allow enslaving hsr devices */
94	if (is_hsr_master(dev)) {
95		NL_SET_ERR_MSG_MOD(extack,
96				   "Cannot create trees of HSR devices.");
97		return -EINVAL;
98	}
99
100	if (hsr_port_exists(dev)) {
101		NL_SET_ERR_MSG_MOD(extack,
102				   "This device is already a HSR slave.");
103		return -EINVAL;
104	}
105
106	if (is_vlan_dev(dev)) {
107		NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver.");
108		return -EINVAL;
109	}
110
111	if (dev->priv_flags & IFF_DONT_BRIDGE) {
112		NL_SET_ERR_MSG_MOD(extack,
113				   "This device does not support bridging.");
114		return -EOPNOTSUPP;
115	}
116
117	/* HSR over bonded devices has not been tested, but I'm not sure it
118	 * won't work...
119	 */
120
121	return 0;
122}
123
124/* Setup device to be added to the HSR bridge. */
125static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev,
126			     struct hsr_port *port,
127			     struct netlink_ext_ack *extack)
128
129{
130	struct net_device *hsr_dev;
131	struct hsr_port *master;
132	int res;
133
134	/* Don't use promiscuous mode for offload since L2 frame forward
135	 * happens at the offloaded hardware.
136	 */
137	if (!port->hsr->fwd_offloaded) {
138		res = dev_set_promiscuity(dev, 1);
139		if (res)
140			return res;
141	}
142
143	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
144	hsr_dev = master->dev;
145
146	res = netdev_upper_dev_link(dev, hsr_dev, extack);
147	if (res)
148		goto fail_upper_dev_link;
149
150	res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
151	if (res)
152		goto fail_rx_handler;
153	dev_disable_lro(dev);
154
155	return 0;
156
157fail_rx_handler:
158	netdev_upper_dev_unlink(dev, hsr_dev);
159fail_upper_dev_link:
160	if (!port->hsr->fwd_offloaded)
161		dev_set_promiscuity(dev, -1);
162
163	return res;
164}
165
166int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
167		 enum hsr_port_type type, struct netlink_ext_ack *extack)
168{
169	struct hsr_port *port, *master;
170	int res;
171
172	if (type != HSR_PT_MASTER) {
173		res = hsr_check_dev_ok(dev, extack);
174		if (res)
175			return res;
176	}
177
178	port = hsr_port_get_hsr(hsr, type);
179	if (port)
180		return -EBUSY;	/* This port already exists */
181
182	port = kzalloc(sizeof(*port), GFP_KERNEL);
183	if (!port)
184		return -ENOMEM;
185
186	port->hsr = hsr;
187	port->dev = dev;
188	port->type = type;
189
190	if (type != HSR_PT_MASTER) {
191		res = hsr_portdev_setup(hsr, dev, port, extack);
192		if (res)
193			goto fail_dev_setup;
194	}
195
196	list_add_tail_rcu(&port->port_list, &hsr->ports);
197	synchronize_rcu();
198
199	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
200	netdev_update_features(master->dev);
201	dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
202
203	return 0;
204
205fail_dev_setup:
206	kfree(port);
207	return res;
208}
209
210void hsr_del_port(struct hsr_port *port)
211{
212	struct hsr_priv *hsr;
213	struct hsr_port *master;
214
215	hsr = port->hsr;
216	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
217	list_del_rcu(&port->port_list);
218
219	if (port != master) {
220		netdev_update_features(master->dev);
221		dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
222		netdev_rx_handler_unregister(port->dev);
223		dev_set_promiscuity(port->dev, -1);
224		netdev_upper_dev_unlink(port->dev, master->dev);
225	}
226
227	synchronize_rcu();
228
229	kfree(port);
230}
231