1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * net/psample/psample.c - Netlink channel for packet sampling
4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
5 */
6
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <linux/module.h>
11 #include <net/net_namespace.h>
12 #include <net/sock.h>
13 #include <net/netlink.h>
14 #include <net/genetlink.h>
15 #include <net/psample.h>
16 #include <linux/spinlock.h>
17 #include <net/ip_tunnels.h>
18 #include <net/dst_metadata.h>
19
20 #define PSAMPLE_MAX_PACKET_SIZE 0xffff
21
22 static LIST_HEAD(psample_groups_list);
23 static DEFINE_SPINLOCK(psample_groups_lock);
24
25 /* multicast groups */
26 enum psample_nl_multicast_groups {
27 PSAMPLE_NL_MCGRP_CONFIG,
28 PSAMPLE_NL_MCGRP_SAMPLE,
29 };
30
31 static const struct genl_multicast_group psample_nl_mcgrps[] = {
32 [PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
33 [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME,
34 .flags = GENL_UNS_ADMIN_PERM },
35 };
36
37 static struct genl_family psample_nl_family __ro_after_init;
38
psample_group_nl_fill(struct sk_buff *msg, struct psample_group *group, enum psample_command cmd, u32 portid, u32 seq, int flags)39 static int psample_group_nl_fill(struct sk_buff *msg,
40 struct psample_group *group,
41 enum psample_command cmd, u32 portid, u32 seq,
42 int flags)
43 {
44 void *hdr;
45 int ret;
46
47 hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
48 if (!hdr)
49 return -EMSGSIZE;
50
51 ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
52 if (ret < 0)
53 goto error;
54
55 ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
56 if (ret < 0)
57 goto error;
58
59 ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
60 if (ret < 0)
61 goto error;
62
63 genlmsg_end(msg, hdr);
64 return 0;
65
66 error:
67 genlmsg_cancel(msg, hdr);
68 return -EMSGSIZE;
69 }
70
psample_nl_cmd_get_group_dumpit(struct sk_buff *msg, struct netlink_callback *cb)71 static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
72 struct netlink_callback *cb)
73 {
74 struct psample_group *group;
75 int start = cb->args[0];
76 int idx = 0;
77 int err;
78
79 spin_lock_bh(&psample_groups_lock);
80 list_for_each_entry(group, &psample_groups_list, list) {
81 if (!net_eq(group->net, sock_net(msg->sk)))
82 continue;
83 if (idx < start) {
84 idx++;
85 continue;
86 }
87 err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
88 NETLINK_CB(cb->skb).portid,
89 cb->nlh->nlmsg_seq, NLM_F_MULTI);
90 if (err)
91 break;
92 idx++;
93 }
94
95 spin_unlock_bh(&psample_groups_lock);
96 cb->args[0] = idx;
97 return msg->len;
98 }
99
100 static const struct genl_small_ops psample_nl_ops[] = {
101 {
102 .cmd = PSAMPLE_CMD_GET_GROUP,
103 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
104 .dumpit = psample_nl_cmd_get_group_dumpit,
105 /* can be retrieved by unprivileged users */
106 }
107 };
108
109 static struct genl_family psample_nl_family __ro_after_init = {
110 .name = PSAMPLE_GENL_NAME,
111 .version = PSAMPLE_GENL_VERSION,
112 .maxattr = PSAMPLE_ATTR_MAX,
113 .netnsok = true,
114 .module = THIS_MODULE,
115 .mcgrps = psample_nl_mcgrps,
116 .small_ops = psample_nl_ops,
117 .n_small_ops = ARRAY_SIZE(psample_nl_ops),
118 .n_mcgrps = ARRAY_SIZE(psample_nl_mcgrps),
119 };
120
psample_group_notify(struct psample_group *group, enum psample_command cmd)121 static void psample_group_notify(struct psample_group *group,
122 enum psample_command cmd)
123 {
124 struct sk_buff *msg;
125 int err;
126
127 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
128 if (!msg)
129 return;
130
131 err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
132 if (!err)
133 genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
134 PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
135 else
136 nlmsg_free(msg);
137 }
138
psample_group_create(struct net *net, u32 group_num)139 static struct psample_group *psample_group_create(struct net *net,
140 u32 group_num)
141 {
142 struct psample_group *group;
143
144 group = kzalloc(sizeof(*group), GFP_ATOMIC);
145 if (!group)
146 return NULL;
147
148 group->net = net;
149 group->group_num = group_num;
150 list_add_tail(&group->list, &psample_groups_list);
151
152 psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
153 return group;
154 }
155
psample_group_destroy(struct psample_group *group)156 static void psample_group_destroy(struct psample_group *group)
157 {
158 psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
159 list_del(&group->list);
160 kfree_rcu(group, rcu);
161 }
162
163 static struct psample_group *
psample_group_lookup(struct net *net, u32 group_num)164 psample_group_lookup(struct net *net, u32 group_num)
165 {
166 struct psample_group *group;
167
168 list_for_each_entry(group, &psample_groups_list, list)
169 if ((group->group_num == group_num) && (group->net == net))
170 return group;
171 return NULL;
172 }
173
psample_group_get(struct net *net, u32 group_num)174 struct psample_group *psample_group_get(struct net *net, u32 group_num)
175 {
176 struct psample_group *group;
177
178 spin_lock_bh(&psample_groups_lock);
179
180 group = psample_group_lookup(net, group_num);
181 if (!group) {
182 group = psample_group_create(net, group_num);
183 if (!group)
184 goto out;
185 }
186 group->refcount++;
187
188 out:
189 spin_unlock_bh(&psample_groups_lock);
190 return group;
191 }
192 EXPORT_SYMBOL_GPL(psample_group_get);
193
psample_group_take(struct psample_group *group)194 void psample_group_take(struct psample_group *group)
195 {
196 spin_lock_bh(&psample_groups_lock);
197 group->refcount++;
198 spin_unlock_bh(&psample_groups_lock);
199 }
200 EXPORT_SYMBOL_GPL(psample_group_take);
201
psample_group_put(struct psample_group *group)202 void psample_group_put(struct psample_group *group)
203 {
204 spin_lock_bh(&psample_groups_lock);
205
206 if (--group->refcount == 0)
207 psample_group_destroy(group);
208
209 spin_unlock_bh(&psample_groups_lock);
210 }
211 EXPORT_SYMBOL_GPL(psample_group_put);
212
213 #ifdef CONFIG_INET
__psample_ip_tun_to_nlattr(struct sk_buff *skb, struct ip_tunnel_info *tun_info)214 static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
215 struct ip_tunnel_info *tun_info)
216 {
217 unsigned short tun_proto = ip_tunnel_info_af(tun_info);
218 const void *tun_opts = ip_tunnel_info_opts(tun_info);
219 const struct ip_tunnel_key *tun_key = &tun_info->key;
220 int tun_opts_len = tun_info->options_len;
221
222 if (tun_key->tun_flags & TUNNEL_KEY &&
223 nla_put_be64(skb, PSAMPLE_TUNNEL_KEY_ATTR_ID, tun_key->tun_id,
224 PSAMPLE_TUNNEL_KEY_ATTR_PAD))
225 return -EMSGSIZE;
226
227 if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE &&
228 nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE))
229 return -EMSGSIZE;
230
231 switch (tun_proto) {
232 case AF_INET:
233 if (tun_key->u.ipv4.src &&
234 nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_SRC,
235 tun_key->u.ipv4.src))
236 return -EMSGSIZE;
237 if (tun_key->u.ipv4.dst &&
238 nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_DST,
239 tun_key->u.ipv4.dst))
240 return -EMSGSIZE;
241 break;
242 case AF_INET6:
243 if (!ipv6_addr_any(&tun_key->u.ipv6.src) &&
244 nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_SRC,
245 &tun_key->u.ipv6.src))
246 return -EMSGSIZE;
247 if (!ipv6_addr_any(&tun_key->u.ipv6.dst) &&
248 nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_DST,
249 &tun_key->u.ipv6.dst))
250 return -EMSGSIZE;
251 break;
252 }
253 if (tun_key->tos &&
254 nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TOS, tun_key->tos))
255 return -EMSGSIZE;
256 if (nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TTL, tun_key->ttl))
257 return -EMSGSIZE;
258 if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
259 nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
260 return -EMSGSIZE;
261 if ((tun_key->tun_flags & TUNNEL_CSUM) &&
262 nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_CSUM))
263 return -EMSGSIZE;
264 if (tun_key->tp_src &&
265 nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src))
266 return -EMSGSIZE;
267 if (tun_key->tp_dst &&
268 nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst))
269 return -EMSGSIZE;
270 if ((tun_key->tun_flags & TUNNEL_OAM) &&
271 nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_OAM))
272 return -EMSGSIZE;
273 if (tun_opts_len) {
274 if (tun_key->tun_flags & TUNNEL_GENEVE_OPT &&
275 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS,
276 tun_opts_len, tun_opts))
277 return -EMSGSIZE;
278 else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT &&
279 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
280 tun_opts_len, tun_opts))
281 return -EMSGSIZE;
282 }
283
284 return 0;
285 }
286
psample_ip_tun_to_nlattr(struct sk_buff *skb, struct ip_tunnel_info *tun_info)287 static int psample_ip_tun_to_nlattr(struct sk_buff *skb,
288 struct ip_tunnel_info *tun_info)
289 {
290 struct nlattr *nla;
291 int err;
292
293 nla = nla_nest_start_noflag(skb, PSAMPLE_ATTR_TUNNEL);
294 if (!nla)
295 return -EMSGSIZE;
296
297 err = __psample_ip_tun_to_nlattr(skb, tun_info);
298 if (err) {
299 nla_nest_cancel(skb, nla);
300 return err;
301 }
302
303 nla_nest_end(skb, nla);
304
305 return 0;
306 }
307
psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)308 static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
309 {
310 unsigned short tun_proto = ip_tunnel_info_af(tun_info);
311 const struct ip_tunnel_key *tun_key = &tun_info->key;
312 int tun_opts_len = tun_info->options_len;
313 int sum = nla_total_size(0); /* PSAMPLE_ATTR_TUNNEL */
314
315 if (tun_key->tun_flags & TUNNEL_KEY)
316 sum += nla_total_size_64bit(sizeof(u64));
317
318 if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE)
319 sum += nla_total_size(0);
320
321 switch (tun_proto) {
322 case AF_INET:
323 if (tun_key->u.ipv4.src)
324 sum += nla_total_size(sizeof(u32));
325 if (tun_key->u.ipv4.dst)
326 sum += nla_total_size(sizeof(u32));
327 break;
328 case AF_INET6:
329 if (!ipv6_addr_any(&tun_key->u.ipv6.src))
330 sum += nla_total_size(sizeof(struct in6_addr));
331 if (!ipv6_addr_any(&tun_key->u.ipv6.dst))
332 sum += nla_total_size(sizeof(struct in6_addr));
333 break;
334 }
335 if (tun_key->tos)
336 sum += nla_total_size(sizeof(u8));
337 sum += nla_total_size(sizeof(u8)); /* TTL */
338 if (tun_key->tun_flags & TUNNEL_DONT_FRAGMENT)
339 sum += nla_total_size(0);
340 if (tun_key->tun_flags & TUNNEL_CSUM)
341 sum += nla_total_size(0);
342 if (tun_key->tp_src)
343 sum += nla_total_size(sizeof(u16));
344 if (tun_key->tp_dst)
345 sum += nla_total_size(sizeof(u16));
346 if (tun_key->tun_flags & TUNNEL_OAM)
347 sum += nla_total_size(0);
348 if (tun_opts_len) {
349 if (tun_key->tun_flags & TUNNEL_GENEVE_OPT)
350 sum += nla_total_size(tun_opts_len);
351 else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT)
352 sum += nla_total_size(tun_opts_len);
353 }
354
355 return sum;
356 }
357 #endif
358
psample_sample_packet(struct psample_group *group, struct sk_buff *skb, u32 trunc_size, int in_ifindex, int out_ifindex, u32 sample_rate)359 void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
360 u32 trunc_size, int in_ifindex, int out_ifindex,
361 u32 sample_rate)
362 {
363 #ifdef CONFIG_INET
364 struct ip_tunnel_info *tun_info;
365 #endif
366 struct sk_buff *nl_skb;
367 int data_len;
368 int meta_len;
369 void *data;
370 int ret;
371
372 meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
373 (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
374 nla_total_size(sizeof(u32)) + /* sample_rate */
375 nla_total_size(sizeof(u32)) + /* orig_size */
376 nla_total_size(sizeof(u32)) + /* group_num */
377 nla_total_size(sizeof(u32)); /* seq */
378
379 #ifdef CONFIG_INET
380 tun_info = skb_tunnel_info(skb);
381 if (tun_info)
382 meta_len += psample_tunnel_meta_len(tun_info);
383 #endif
384
385 data_len = min(skb->len, trunc_size);
386 if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
387 data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
388 - NLA_ALIGNTO;
389
390 nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
391 if (unlikely(!nl_skb))
392 return;
393
394 data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
395 PSAMPLE_CMD_SAMPLE);
396 if (unlikely(!data))
397 goto error;
398
399 if (in_ifindex) {
400 ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
401 if (unlikely(ret < 0))
402 goto error;
403 }
404
405 if (out_ifindex) {
406 ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
407 if (unlikely(ret < 0))
408 goto error;
409 }
410
411 ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
412 if (unlikely(ret < 0))
413 goto error;
414
415 ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
416 if (unlikely(ret < 0))
417 goto error;
418
419 ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
420 if (unlikely(ret < 0))
421 goto error;
422
423 ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
424 if (unlikely(ret < 0))
425 goto error;
426
427 if (data_len) {
428 int nla_len = nla_total_size(data_len);
429 struct nlattr *nla;
430
431 nla = skb_put(nl_skb, nla_len);
432 nla->nla_type = PSAMPLE_ATTR_DATA;
433 nla->nla_len = nla_attr_size(data_len);
434
435 if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
436 goto error;
437 }
438
439 #ifdef CONFIG_INET
440 if (tun_info) {
441 ret = psample_ip_tun_to_nlattr(nl_skb, tun_info);
442 if (unlikely(ret < 0))
443 goto error;
444 }
445 #endif
446
447 genlmsg_end(nl_skb, data);
448 genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
449 PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
450
451 return;
452 error:
453 pr_err_ratelimited("Could not create psample log message\n");
454 nlmsg_free(nl_skb);
455 }
456 EXPORT_SYMBOL_GPL(psample_sample_packet);
457
psample_module_init(void)458 static int __init psample_module_init(void)
459 {
460 return genl_register_family(&psample_nl_family);
461 }
462
psample_module_exit(void)463 static void __exit psample_module_exit(void)
464 {
465 genl_unregister_family(&psample_nl_family);
466 }
467
468 module_init(psample_module_init);
469 module_exit(psample_module_exit);
470
471 MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
472 MODULE_DESCRIPTION("netlink channel for packet sampling");
473 MODULE_LICENSE("GPL v2");
474