1// SPDX-License-Identifier: GPL-2.0-or-later 2 3#include <linux/mrp_bridge.h> 4#include "br_private_mrp.h" 5 6static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 }; 7static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 }; 8 9static bool br_mrp_is_ring_port(struct net_bridge_port *p_port, 10 struct net_bridge_port *s_port, 11 struct net_bridge_port *port) 12{ 13 if (port == p_port || 14 port == s_port) 15 return true; 16 17 return false; 18} 19 20static bool br_mrp_is_in_port(struct net_bridge_port *i_port, 21 struct net_bridge_port *port) 22{ 23 if (port == i_port) 24 return true; 25 26 return false; 27} 28 29static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br, 30 u32 ifindex) 31{ 32 struct net_bridge_port *res = NULL; 33 struct net_bridge_port *port; 34 35 list_for_each_entry(port, &br->port_list, list) { 36 if (port->dev->ifindex == ifindex) { 37 res = port; 38 break; 39 } 40 } 41 42 return res; 43} 44 45static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id) 46{ 47 struct br_mrp *res = NULL; 48 struct br_mrp *mrp; 49 50 list_for_each_entry_rcu(mrp, &br->mrp_list, list, 51 lockdep_rtnl_is_held()) { 52 if (mrp->ring_id == ring_id) { 53 res = mrp; 54 break; 55 } 56 } 57 58 return res; 59} 60 61static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id) 62{ 63 struct br_mrp *res = NULL; 64 struct br_mrp *mrp; 65 66 list_for_each_entry_rcu(mrp, &br->mrp_list, list, 67 lockdep_rtnl_is_held()) { 68 if (mrp->in_id == in_id) { 69 res = mrp; 70 break; 71 } 72 } 73 74 return res; 75} 76 77static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex) 78{ 79 struct br_mrp *mrp; 80 81 list_for_each_entry_rcu(mrp, &br->mrp_list, list, 82 lockdep_rtnl_is_held()) { 83 struct net_bridge_port *p; 84 85 p = rtnl_dereference(mrp->p_port); 86 if (p && p->dev->ifindex == ifindex) 87 return false; 88 89 p = rtnl_dereference(mrp->s_port); 90 if (p && p->dev->ifindex == ifindex) 91 return false; 92 93 p = rtnl_dereference(mrp->i_port); 94 if (p && p->dev->ifindex == ifindex) 95 return false; 96 } 97 98 return true; 99} 100 101static struct br_mrp *br_mrp_find_port(struct net_bridge *br, 102 struct net_bridge_port *p) 103{ 104 struct br_mrp *res = NULL; 105 struct br_mrp *mrp; 106 107 list_for_each_entry_rcu(mrp, &br->mrp_list, list, 108 lockdep_rtnl_is_held()) { 109 if (rcu_access_pointer(mrp->p_port) == p || 110 rcu_access_pointer(mrp->s_port) == p || 111 rcu_access_pointer(mrp->i_port) == p) { 112 res = mrp; 113 break; 114 } 115 } 116 117 return res; 118} 119 120static int br_mrp_next_seq(struct br_mrp *mrp) 121{ 122 mrp->seq_id++; 123 return mrp->seq_id; 124} 125 126static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p, 127 const u8 *src, const u8 *dst) 128{ 129 struct ethhdr *eth_hdr; 130 struct sk_buff *skb; 131 __be16 *version; 132 133 skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH); 134 if (!skb) 135 return NULL; 136 137 skb->dev = p->dev; 138 skb->protocol = htons(ETH_P_MRP); 139 skb->priority = MRP_FRAME_PRIO; 140 skb_reserve(skb, sizeof(*eth_hdr)); 141 142 eth_hdr = skb_push(skb, sizeof(*eth_hdr)); 143 ether_addr_copy(eth_hdr->h_dest, dst); 144 ether_addr_copy(eth_hdr->h_source, src); 145 eth_hdr->h_proto = htons(ETH_P_MRP); 146 147 version = skb_put(skb, sizeof(*version)); 148 *version = cpu_to_be16(MRP_VERSION); 149 150 return skb; 151} 152 153static void br_mrp_skb_tlv(struct sk_buff *skb, 154 enum br_mrp_tlv_header_type type, 155 u8 length) 156{ 157 struct br_mrp_tlv_hdr *hdr; 158 159 hdr = skb_put(skb, sizeof(*hdr)); 160 hdr->type = type; 161 hdr->length = length; 162} 163 164static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp) 165{ 166 struct br_mrp_common_hdr *hdr; 167 168 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr)); 169 170 hdr = skb_put(skb, sizeof(*hdr)); 171 hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp)); 172 memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH); 173} 174 175static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp, 176 struct net_bridge_port *p, 177 enum br_mrp_port_role_type port_role) 178{ 179 struct br_mrp_ring_test_hdr *hdr = NULL; 180 struct sk_buff *skb = NULL; 181 182 if (!p) 183 return NULL; 184 185 skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac); 186 if (!skb) 187 return NULL; 188 189 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr)); 190 hdr = skb_put(skb, sizeof(*hdr)); 191 192 hdr->prio = cpu_to_be16(mrp->prio); 193 ether_addr_copy(hdr->sa, p->br->dev->dev_addr); 194 hdr->port_role = cpu_to_be16(port_role); 195 hdr->state = cpu_to_be16(mrp->ring_state); 196 hdr->transitions = cpu_to_be16(mrp->ring_transitions); 197 hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies)); 198 199 br_mrp_skb_common(skb, mrp); 200 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0); 201 202 return skb; 203} 204 205static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp, 206 struct net_bridge_port *p, 207 enum br_mrp_port_role_type port_role) 208{ 209 struct br_mrp_in_test_hdr *hdr = NULL; 210 struct sk_buff *skb = NULL; 211 212 if (!p) 213 return NULL; 214 215 skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac); 216 if (!skb) 217 return NULL; 218 219 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr)); 220 hdr = skb_put(skb, sizeof(*hdr)); 221 222 hdr->id = cpu_to_be16(mrp->in_id); 223 ether_addr_copy(hdr->sa, p->br->dev->dev_addr); 224 hdr->port_role = cpu_to_be16(port_role); 225 hdr->state = cpu_to_be16(mrp->in_state); 226 hdr->transitions = cpu_to_be16(mrp->in_transitions); 227 hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies)); 228 229 br_mrp_skb_common(skb, mrp); 230 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0); 231 232 return skb; 233} 234 235/* This function is continuously called in the following cases: 236 * - when node role is MRM, in this case test_monitor is always set to false 237 * because it needs to notify the userspace that the ring is open and needs to 238 * send MRP_Test frames 239 * - when node role is MRA, there are 2 subcases: 240 * - when MRA behaves as MRM, in this case is similar with MRM role 241 * - when MRA behaves as MRC, in this case test_monitor is set to true, 242 * because it needs to detect when it stops seeing MRP_Test frames 243 * from MRM node but it doesn't need to send MRP_Test frames. 244 */ 245static void br_mrp_test_work_expired(struct work_struct *work) 246{ 247 struct delayed_work *del_work = to_delayed_work(work); 248 struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work); 249 struct net_bridge_port *p; 250 bool notify_open = false; 251 struct sk_buff *skb; 252 253 if (time_before_eq(mrp->test_end, jiffies)) 254 return; 255 256 if (mrp->test_count_miss < mrp->test_max_miss) { 257 mrp->test_count_miss++; 258 } else { 259 /* Notify that the ring is open only if the ring state is 260 * closed, otherwise it would continue to notify at every 261 * interval. 262 * Also notify that the ring is open when the node has the 263 * role MRA and behaves as MRC. The reason is that the 264 * userspace needs to know when the MRM stopped sending 265 * MRP_Test frames so that the current node to try to take 266 * the role of a MRM. 267 */ 268 if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED || 269 mrp->test_monitor) 270 notify_open = true; 271 } 272 273 rcu_read_lock(); 274 275 p = rcu_dereference(mrp->p_port); 276 if (p) { 277 if (!mrp->test_monitor) { 278 skb = br_mrp_alloc_test_skb(mrp, p, 279 BR_MRP_PORT_ROLE_PRIMARY); 280 if (!skb) 281 goto out; 282 283 skb_reset_network_header(skb); 284 dev_queue_xmit(skb); 285 } 286 287 if (notify_open && !mrp->ring_role_offloaded) 288 br_mrp_ring_port_open(p->dev, true); 289 } 290 291 p = rcu_dereference(mrp->s_port); 292 if (p) { 293 if (!mrp->test_monitor) { 294 skb = br_mrp_alloc_test_skb(mrp, p, 295 BR_MRP_PORT_ROLE_SECONDARY); 296 if (!skb) 297 goto out; 298 299 skb_reset_network_header(skb); 300 dev_queue_xmit(skb); 301 } 302 303 if (notify_open && !mrp->ring_role_offloaded) 304 br_mrp_ring_port_open(p->dev, true); 305 } 306 307out: 308 rcu_read_unlock(); 309 310 queue_delayed_work(system_wq, &mrp->test_work, 311 usecs_to_jiffies(mrp->test_interval)); 312} 313 314/* This function is continuously called when the node has the interconnect role 315 * MIM. It would generate interconnect test frames and will send them on all 3 316 * ports. But will also check if it stop receiving interconnect test frames. 317 */ 318static void br_mrp_in_test_work_expired(struct work_struct *work) 319{ 320 struct delayed_work *del_work = to_delayed_work(work); 321 struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work); 322 struct net_bridge_port *p; 323 bool notify_open = false; 324 struct sk_buff *skb; 325 326 if (time_before_eq(mrp->in_test_end, jiffies)) 327 return; 328 329 if (mrp->in_test_count_miss < mrp->in_test_max_miss) { 330 mrp->in_test_count_miss++; 331 } else { 332 /* Notify that the interconnect ring is open only if the 333 * interconnect ring state is closed, otherwise it would 334 * continue to notify at every interval. 335 */ 336 if (mrp->in_state == BR_MRP_IN_STATE_CLOSED) 337 notify_open = true; 338 } 339 340 rcu_read_lock(); 341 342 p = rcu_dereference(mrp->p_port); 343 if (p) { 344 skb = br_mrp_alloc_in_test_skb(mrp, p, 345 BR_MRP_PORT_ROLE_PRIMARY); 346 if (!skb) 347 goto out; 348 349 skb_reset_network_header(skb); 350 dev_queue_xmit(skb); 351 352 if (notify_open && !mrp->in_role_offloaded) 353 br_mrp_in_port_open(p->dev, true); 354 } 355 356 p = rcu_dereference(mrp->s_port); 357 if (p) { 358 skb = br_mrp_alloc_in_test_skb(mrp, p, 359 BR_MRP_PORT_ROLE_SECONDARY); 360 if (!skb) 361 goto out; 362 363 skb_reset_network_header(skb); 364 dev_queue_xmit(skb); 365 366 if (notify_open && !mrp->in_role_offloaded) 367 br_mrp_in_port_open(p->dev, true); 368 } 369 370 p = rcu_dereference(mrp->i_port); 371 if (p) { 372 skb = br_mrp_alloc_in_test_skb(mrp, p, 373 BR_MRP_PORT_ROLE_INTER); 374 if (!skb) 375 goto out; 376 377 skb_reset_network_header(skb); 378 dev_queue_xmit(skb); 379 380 if (notify_open && !mrp->in_role_offloaded) 381 br_mrp_in_port_open(p->dev, true); 382 } 383 384out: 385 rcu_read_unlock(); 386 387 queue_delayed_work(system_wq, &mrp->in_test_work, 388 usecs_to_jiffies(mrp->in_test_interval)); 389} 390 391/* Deletes the MRP instance. 392 * note: called under rtnl_lock 393 */ 394static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp) 395{ 396 struct net_bridge_port *p; 397 u8 state; 398 399 /* Stop sending MRP_Test frames */ 400 cancel_delayed_work_sync(&mrp->test_work); 401 br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0); 402 403 /* Stop sending MRP_InTest frames if has an interconnect role */ 404 cancel_delayed_work_sync(&mrp->in_test_work); 405 br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0); 406 407 br_mrp_switchdev_del(br, mrp); 408 409 /* Reset the ports */ 410 p = rtnl_dereference(mrp->p_port); 411 if (p) { 412 spin_lock_bh(&br->lock); 413 state = netif_running(br->dev) ? 414 BR_STATE_FORWARDING : BR_STATE_DISABLED; 415 p->state = state; 416 p->flags &= ~BR_MRP_AWARE; 417 spin_unlock_bh(&br->lock); 418 br_mrp_port_switchdev_set_state(p, state); 419 rcu_assign_pointer(mrp->p_port, NULL); 420 } 421 422 p = rtnl_dereference(mrp->s_port); 423 if (p) { 424 spin_lock_bh(&br->lock); 425 state = netif_running(br->dev) ? 426 BR_STATE_FORWARDING : BR_STATE_DISABLED; 427 p->state = state; 428 p->flags &= ~BR_MRP_AWARE; 429 spin_unlock_bh(&br->lock); 430 br_mrp_port_switchdev_set_state(p, state); 431 rcu_assign_pointer(mrp->s_port, NULL); 432 } 433 434 p = rtnl_dereference(mrp->i_port); 435 if (p) { 436 spin_lock_bh(&br->lock); 437 state = netif_running(br->dev) ? 438 BR_STATE_FORWARDING : BR_STATE_DISABLED; 439 p->state = state; 440 p->flags &= ~BR_MRP_AWARE; 441 spin_unlock_bh(&br->lock); 442 br_mrp_port_switchdev_set_state(p, state); 443 rcu_assign_pointer(mrp->i_port, NULL); 444 } 445 446 list_del_rcu(&mrp->list); 447 kfree_rcu(mrp, rcu); 448} 449 450/* Adds a new MRP instance. 451 * note: called under rtnl_lock 452 */ 453int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance) 454{ 455 struct net_bridge_port *p; 456 struct br_mrp *mrp; 457 int err; 458 459 /* If the ring exists, it is not possible to create another one with the 460 * same ring_id 461 */ 462 mrp = br_mrp_find_id(br, instance->ring_id); 463 if (mrp) 464 return -EINVAL; 465 466 if (!br_mrp_get_port(br, instance->p_ifindex) || 467 !br_mrp_get_port(br, instance->s_ifindex)) 468 return -EINVAL; 469 470 /* It is not possible to have the same port part of multiple rings */ 471 if (!br_mrp_unique_ifindex(br, instance->p_ifindex) || 472 !br_mrp_unique_ifindex(br, instance->s_ifindex)) 473 return -EINVAL; 474 475 mrp = kzalloc(sizeof(*mrp), GFP_KERNEL); 476 if (!mrp) 477 return -ENOMEM; 478 479 mrp->ring_id = instance->ring_id; 480 mrp->prio = instance->prio; 481 482 p = br_mrp_get_port(br, instance->p_ifindex); 483 spin_lock_bh(&br->lock); 484 p->state = BR_STATE_FORWARDING; 485 p->flags |= BR_MRP_AWARE; 486 spin_unlock_bh(&br->lock); 487 rcu_assign_pointer(mrp->p_port, p); 488 489 p = br_mrp_get_port(br, instance->s_ifindex); 490 spin_lock_bh(&br->lock); 491 p->state = BR_STATE_FORWARDING; 492 p->flags |= BR_MRP_AWARE; 493 spin_unlock_bh(&br->lock); 494 rcu_assign_pointer(mrp->s_port, p); 495 496 INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired); 497 INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired); 498 list_add_tail_rcu(&mrp->list, &br->mrp_list); 499 500 err = br_mrp_switchdev_add(br, mrp); 501 if (err) 502 goto delete_mrp; 503 504 return 0; 505 506delete_mrp: 507 br_mrp_del_impl(br, mrp); 508 509 return err; 510} 511 512/* Deletes the MRP instance from which the port is part of 513 * note: called under rtnl_lock 514 */ 515void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p) 516{ 517 struct br_mrp *mrp = br_mrp_find_port(br, p); 518 519 /* If the port is not part of a MRP instance just bail out */ 520 if (!mrp) 521 return; 522 523 br_mrp_del_impl(br, mrp); 524} 525 526/* Deletes existing MRP instance based on ring_id 527 * note: called under rtnl_lock 528 */ 529int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance) 530{ 531 struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id); 532 533 if (!mrp) 534 return -EINVAL; 535 536 br_mrp_del_impl(br, mrp); 537 538 return 0; 539} 540 541/* Set port state, port state can be forwarding, blocked or disabled 542 * note: already called with rtnl_lock 543 */ 544int br_mrp_set_port_state(struct net_bridge_port *p, 545 enum br_mrp_port_state_type state) 546{ 547 u32 port_state; 548 549 if (!p || !(p->flags & BR_MRP_AWARE)) 550 return -EINVAL; 551 552 spin_lock_bh(&p->br->lock); 553 554 if (state == BR_MRP_PORT_STATE_FORWARDING) 555 port_state = BR_STATE_FORWARDING; 556 else 557 port_state = BR_STATE_BLOCKING; 558 559 p->state = port_state; 560 spin_unlock_bh(&p->br->lock); 561 562 br_mrp_port_switchdev_set_state(p, port_state); 563 564 return 0; 565} 566 567/* Set port role, port role can be primary or secondary 568 * note: already called with rtnl_lock 569 */ 570int br_mrp_set_port_role(struct net_bridge_port *p, 571 enum br_mrp_port_role_type role) 572{ 573 struct br_mrp *mrp; 574 575 if (!p || !(p->flags & BR_MRP_AWARE)) 576 return -EINVAL; 577 578 mrp = br_mrp_find_port(p->br, p); 579 580 if (!mrp) 581 return -EINVAL; 582 583 switch (role) { 584 case BR_MRP_PORT_ROLE_PRIMARY: 585 rcu_assign_pointer(mrp->p_port, p); 586 break; 587 case BR_MRP_PORT_ROLE_SECONDARY: 588 rcu_assign_pointer(mrp->s_port, p); 589 break; 590 default: 591 return -EINVAL; 592 } 593 594 br_mrp_port_switchdev_set_role(p, role); 595 596 return 0; 597} 598 599/* Set ring state, ring state can be only Open or Closed 600 * note: already called with rtnl_lock 601 */ 602int br_mrp_set_ring_state(struct net_bridge *br, 603 struct br_mrp_ring_state *state) 604{ 605 struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id); 606 607 if (!mrp) 608 return -EINVAL; 609 610 if (mrp->ring_state != state->ring_state) 611 mrp->ring_transitions++; 612 613 mrp->ring_state = state->ring_state; 614 615 br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state); 616 617 return 0; 618} 619 620/* Set ring role, ring role can be only MRM(Media Redundancy Manager) or 621 * MRC(Media Redundancy Client). 622 * note: already called with rtnl_lock 623 */ 624int br_mrp_set_ring_role(struct net_bridge *br, 625 struct br_mrp_ring_role *role) 626{ 627 struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id); 628 int err; 629 630 if (!mrp) 631 return -EINVAL; 632 633 mrp->ring_role = role->ring_role; 634 635 /* If there is an error just bailed out */ 636 err = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role); 637 if (err && err != -EOPNOTSUPP) 638 return err; 639 640 /* Now detect if the HW actually applied the role or not. If the HW 641 * applied the role it means that the SW will not to do those operations 642 * anymore. For example if the role ir MRM then the HW will notify the 643 * SW when ring is open, but if the is not pushed to the HW the SW will 644 * need to detect when the ring is open 645 */ 646 mrp->ring_role_offloaded = err == -EOPNOTSUPP ? 0 : 1; 647 648 return 0; 649} 650 651/* Start to generate or monitor MRP test frames, the frames are generated by 652 * HW and if it fails, they are generated by the SW. 653 * note: already called with rtnl_lock 654 */ 655int br_mrp_start_test(struct net_bridge *br, 656 struct br_mrp_start_test *test) 657{ 658 struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id); 659 660 if (!mrp) 661 return -EINVAL; 662 663 /* Try to push it to the HW and if it fails then continue with SW 664 * implementation and if that also fails then return error. 665 */ 666 if (!br_mrp_switchdev_send_ring_test(br, mrp, test->interval, 667 test->max_miss, test->period, 668 test->monitor)) 669 return 0; 670 671 mrp->test_interval = test->interval; 672 mrp->test_end = jiffies + usecs_to_jiffies(test->period); 673 mrp->test_max_miss = test->max_miss; 674 mrp->test_monitor = test->monitor; 675 mrp->test_count_miss = 0; 676 queue_delayed_work(system_wq, &mrp->test_work, 677 usecs_to_jiffies(test->interval)); 678 679 return 0; 680} 681 682/* Set in state, int state can be only Open or Closed 683 * note: already called with rtnl_lock 684 */ 685int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state) 686{ 687 struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id); 688 689 if (!mrp) 690 return -EINVAL; 691 692 if (mrp->in_state != state->in_state) 693 mrp->in_transitions++; 694 695 mrp->in_state = state->in_state; 696 697 br_mrp_switchdev_set_in_state(br, mrp, state->in_state); 698 699 return 0; 700} 701 702/* Set in role, in role can be only MIM(Media Interconnection Manager) or 703 * MIC(Media Interconnection Client). 704 * note: already called with rtnl_lock 705 */ 706int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role) 707{ 708 struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id); 709 struct net_bridge_port *p; 710 int err; 711 712 if (!mrp) 713 return -EINVAL; 714 715 if (!br_mrp_get_port(br, role->i_ifindex)) 716 return -EINVAL; 717 718 if (role->in_role == BR_MRP_IN_ROLE_DISABLED) { 719 u8 state; 720 721 /* It is not allowed to disable a port that doesn't exist */ 722 p = rtnl_dereference(mrp->i_port); 723 if (!p) 724 return -EINVAL; 725 726 /* Stop the generating MRP_InTest frames */ 727 cancel_delayed_work_sync(&mrp->in_test_work); 728 br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0); 729 730 /* Remove the port */ 731 spin_lock_bh(&br->lock); 732 state = netif_running(br->dev) ? 733 BR_STATE_FORWARDING : BR_STATE_DISABLED; 734 p->state = state; 735 p->flags &= ~BR_MRP_AWARE; 736 spin_unlock_bh(&br->lock); 737 br_mrp_port_switchdev_set_state(p, state); 738 rcu_assign_pointer(mrp->i_port, NULL); 739 740 mrp->in_role = role->in_role; 741 mrp->in_id = 0; 742 743 return 0; 744 } 745 746 /* It is not possible to have the same port part of multiple rings */ 747 if (!br_mrp_unique_ifindex(br, role->i_ifindex)) 748 return -EINVAL; 749 750 /* It is not allowed to set a different interconnect port if the mrp 751 * instance has already one. First it needs to be disabled and after 752 * that set the new port 753 */ 754 if (rcu_access_pointer(mrp->i_port)) 755 return -EINVAL; 756 757 p = br_mrp_get_port(br, role->i_ifindex); 758 spin_lock_bh(&br->lock); 759 p->state = BR_STATE_FORWARDING; 760 p->flags |= BR_MRP_AWARE; 761 spin_unlock_bh(&br->lock); 762 rcu_assign_pointer(mrp->i_port, p); 763 764 mrp->in_role = role->in_role; 765 mrp->in_id = role->in_id; 766 767 /* If there is an error just bailed out */ 768 err = br_mrp_switchdev_set_in_role(br, mrp, role->in_id, 769 role->ring_id, role->in_role); 770 if (err && err != -EOPNOTSUPP) 771 return err; 772 773 /* Now detect if the HW actually applied the role or not. If the HW 774 * applied the role it means that the SW will not to do those operations 775 * anymore. For example if the role is MIM then the HW will notify the 776 * SW when interconnect ring is open, but if the is not pushed to the HW 777 * the SW will need to detect when the interconnect ring is open. 778 */ 779 mrp->in_role_offloaded = err == -EOPNOTSUPP ? 0 : 1; 780 781 return 0; 782} 783 784/* Start to generate MRP_InTest frames, the frames are generated by 785 * HW and if it fails, they are generated by the SW. 786 * note: already called with rtnl_lock 787 */ 788int br_mrp_start_in_test(struct net_bridge *br, 789 struct br_mrp_start_in_test *in_test) 790{ 791 struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id); 792 793 if (!mrp) 794 return -EINVAL; 795 796 if (mrp->in_role != BR_MRP_IN_ROLE_MIM) 797 return -EINVAL; 798 799 /* Try to push it to the HW and if it fails then continue with SW 800 * implementation and if that also fails then return error. 801 */ 802 if (!br_mrp_switchdev_send_in_test(br, mrp, in_test->interval, 803 in_test->max_miss, in_test->period)) 804 return 0; 805 806 mrp->in_test_interval = in_test->interval; 807 mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period); 808 mrp->in_test_max_miss = in_test->max_miss; 809 mrp->in_test_count_miss = 0; 810 queue_delayed_work(system_wq, &mrp->in_test_work, 811 usecs_to_jiffies(in_test->interval)); 812 813 return 0; 814} 815 816/* Determin if the frame type is a ring frame */ 817static bool br_mrp_ring_frame(struct sk_buff *skb) 818{ 819 const struct br_mrp_tlv_hdr *hdr; 820 struct br_mrp_tlv_hdr _hdr; 821 822 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); 823 if (!hdr) 824 return false; 825 826 if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST || 827 hdr->type == BR_MRP_TLV_HEADER_RING_TOPO || 828 hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN || 829 hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP || 830 hdr->type == BR_MRP_TLV_HEADER_OPTION) 831 return true; 832 833 return false; 834} 835 836/* Determin if the frame type is an interconnect frame */ 837static bool br_mrp_in_frame(struct sk_buff *skb) 838{ 839 const struct br_mrp_tlv_hdr *hdr; 840 struct br_mrp_tlv_hdr _hdr; 841 842 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); 843 if (!hdr) 844 return false; 845 846 if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST || 847 hdr->type == BR_MRP_TLV_HEADER_IN_TOPO || 848 hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN || 849 hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP) 850 return true; 851 852 return false; 853} 854 855/* Process only MRP Test frame. All the other MRP frames are processed by 856 * userspace application 857 * note: already called with rcu_read_lock 858 */ 859static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port, 860 struct sk_buff *skb) 861{ 862 const struct br_mrp_tlv_hdr *hdr; 863 struct br_mrp_tlv_hdr _hdr; 864 865 /* Each MRP header starts with a version field which is 16 bits. 866 * Therefore skip the version and get directly the TLV header. 867 */ 868 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); 869 if (!hdr) 870 return; 871 872 if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST) 873 return; 874 875 mrp->test_count_miss = 0; 876 877 /* Notify the userspace that the ring is closed only when the ring is 878 * not closed 879 */ 880 if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED) 881 br_mrp_ring_port_open(port->dev, false); 882} 883 884/* Determin if the test hdr has a better priority than the node */ 885static bool br_mrp_test_better_than_own(struct br_mrp *mrp, 886 struct net_bridge *br, 887 const struct br_mrp_ring_test_hdr *hdr) 888{ 889 u16 prio = be16_to_cpu(hdr->prio); 890 891 if (prio < mrp->prio || 892 (prio == mrp->prio && 893 ether_addr_to_u64(hdr->sa) < ether_addr_to_u64(br->dev->dev_addr))) 894 return true; 895 896 return false; 897} 898 899/* Process only MRP Test frame. All the other MRP frames are processed by 900 * userspace application 901 * note: already called with rcu_read_lock 902 */ 903static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br, 904 struct net_bridge_port *port, 905 struct sk_buff *skb) 906{ 907 const struct br_mrp_ring_test_hdr *test_hdr; 908 struct br_mrp_ring_test_hdr _test_hdr; 909 const struct br_mrp_tlv_hdr *hdr; 910 struct br_mrp_tlv_hdr _hdr; 911 912 /* Each MRP header starts with a version field which is 16 bits. 913 * Therefore skip the version and get directly the TLV header. 914 */ 915 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); 916 if (!hdr) 917 return; 918 919 if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST) 920 return; 921 922 test_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr), 923 sizeof(_test_hdr), &_test_hdr); 924 if (!test_hdr) 925 return; 926 927 /* Only frames that have a better priority than the node will 928 * clear the miss counter because otherwise the node will need to behave 929 * as MRM. 930 */ 931 if (br_mrp_test_better_than_own(mrp, br, test_hdr)) 932 mrp->test_count_miss = 0; 933} 934 935/* Process only MRP InTest frame. All the other MRP frames are processed by 936 * userspace application 937 * note: already called with rcu_read_lock 938 */ 939static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port, 940 struct sk_buff *skb) 941{ 942 const struct br_mrp_in_test_hdr *in_hdr; 943 struct br_mrp_in_test_hdr _in_hdr; 944 const struct br_mrp_tlv_hdr *hdr; 945 struct br_mrp_tlv_hdr _hdr; 946 947 /* Each MRP header starts with a version field which is 16 bits. 948 * Therefore skip the version and get directly the TLV header. 949 */ 950 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); 951 if (!hdr) 952 return false; 953 954 /* The check for InTest frame type was already done */ 955 in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr), 956 sizeof(_in_hdr), &_in_hdr); 957 if (!in_hdr) 958 return false; 959 960 /* It needs to process only it's own InTest frames. */ 961 if (mrp->in_id != ntohs(in_hdr->id)) 962 return false; 963 964 mrp->in_test_count_miss = 0; 965 966 /* Notify the userspace that the ring is closed only when the ring is 967 * not closed 968 */ 969 if (mrp->in_state != BR_MRP_IN_STATE_CLOSED) 970 br_mrp_in_port_open(port->dev, false); 971 972 return true; 973} 974 975/* Get the MRP frame type 976 * note: already called with rcu_read_lock 977 */ 978static u8 br_mrp_get_frame_type(struct sk_buff *skb) 979{ 980 const struct br_mrp_tlv_hdr *hdr; 981 struct br_mrp_tlv_hdr _hdr; 982 983 /* Each MRP header starts with a version field which is 16 bits. 984 * Therefore skip the version and get directly the TLV header. 985 */ 986 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); 987 if (!hdr) 988 return 0xff; 989 990 return hdr->type; 991} 992 993static bool br_mrp_mrm_behaviour(struct br_mrp *mrp) 994{ 995 if (mrp->ring_role == BR_MRP_RING_ROLE_MRM || 996 (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor)) 997 return true; 998 999 return false; 1000} 1001 1002static bool br_mrp_mrc_behaviour(struct br_mrp *mrp) 1003{ 1004 if (mrp->ring_role == BR_MRP_RING_ROLE_MRC || 1005 (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor)) 1006 return true; 1007 1008 return false; 1009} 1010 1011/* This will just forward the frame to the other mrp ring ports, depending on 1012 * the frame type, ring role and interconnect role 1013 * note: already called with rcu_read_lock 1014 */ 1015static int br_mrp_rcv(struct net_bridge_port *p, 1016 struct sk_buff *skb, struct net_device *dev) 1017{ 1018 struct net_bridge_port *p_port, *s_port, *i_port = NULL; 1019 struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL; 1020 struct net_bridge *br; 1021 struct br_mrp *mrp; 1022 1023 /* If port is disabled don't accept any frames */ 1024 if (p->state == BR_STATE_DISABLED) 1025 return 0; 1026 1027 br = p->br; 1028 mrp = br_mrp_find_port(br, p); 1029 if (unlikely(!mrp)) 1030 return 0; 1031 1032 p_port = rcu_dereference(mrp->p_port); 1033 if (!p_port) 1034 return 0; 1035 p_dst = p_port; 1036 1037 s_port = rcu_dereference(mrp->s_port); 1038 if (!s_port) 1039 return 0; 1040 s_dst = s_port; 1041 1042 /* If the frame is a ring frame then it is not required to check the 1043 * interconnect role and ports to process or forward the frame 1044 */ 1045 if (br_mrp_ring_frame(skb)) { 1046 /* If the role is MRM then don't forward the frames */ 1047 if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) { 1048 br_mrp_mrm_process(mrp, p, skb); 1049 goto no_forward; 1050 } 1051 1052 /* If the role is MRA then don't forward the frames if it 1053 * behaves as MRM node 1054 */ 1055 if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) { 1056 if (!mrp->test_monitor) { 1057 br_mrp_mrm_process(mrp, p, skb); 1058 goto no_forward; 1059 } 1060 1061 br_mrp_mra_process(mrp, br, p, skb); 1062 } 1063 1064 goto forward; 1065 } 1066 1067 if (br_mrp_in_frame(skb)) { 1068 u8 in_type = br_mrp_get_frame_type(skb); 1069 1070 i_port = rcu_dereference(mrp->i_port); 1071 i_dst = i_port; 1072 1073 /* If the ring port is in block state it should not forward 1074 * In_Test frames 1075 */ 1076 if (br_mrp_is_ring_port(p_port, s_port, p) && 1077 p->state == BR_STATE_BLOCKING && 1078 in_type == BR_MRP_TLV_HEADER_IN_TEST) 1079 goto no_forward; 1080 1081 /* Nodes that behaves as MRM needs to stop forwarding the 1082 * frames in case the ring is closed, otherwise will be a loop. 1083 * In this case the frame is no forward between the ring ports. 1084 */ 1085 if (br_mrp_mrm_behaviour(mrp) && 1086 br_mrp_is_ring_port(p_port, s_port, p) && 1087 (s_port->state != BR_STATE_FORWARDING || 1088 p_port->state != BR_STATE_FORWARDING)) { 1089 p_dst = NULL; 1090 s_dst = NULL; 1091 } 1092 1093 /* A node that behaves as MRC and doesn't have a interconnect 1094 * role then it should forward all frames between the ring ports 1095 * because it doesn't have an interconnect port 1096 */ 1097 if (br_mrp_mrc_behaviour(mrp) && 1098 mrp->in_role == BR_MRP_IN_ROLE_DISABLED) 1099 goto forward; 1100 1101 if (mrp->in_role == BR_MRP_IN_ROLE_MIM) { 1102 if (in_type == BR_MRP_TLV_HEADER_IN_TEST) { 1103 /* MIM should not forward it's own InTest 1104 * frames 1105 */ 1106 if (br_mrp_mim_process(mrp, p, skb)) { 1107 goto no_forward; 1108 } else { 1109 if (br_mrp_is_ring_port(p_port, s_port, 1110 p)) 1111 i_dst = NULL; 1112 1113 if (br_mrp_is_in_port(i_port, p)) 1114 goto no_forward; 1115 } 1116 } else { 1117 /* MIM should forward IntLinkChange and 1118 * IntTopoChange between ring ports but MIM 1119 * should not forward IntLinkChange and 1120 * IntTopoChange if the frame was received at 1121 * the interconnect port 1122 */ 1123 if (br_mrp_is_ring_port(p_port, s_port, p)) 1124 i_dst = NULL; 1125 1126 if (br_mrp_is_in_port(i_port, p)) 1127 goto no_forward; 1128 } 1129 } 1130 1131 if (mrp->in_role == BR_MRP_IN_ROLE_MIC) { 1132 /* MIC should forward InTest frames on all ports 1133 * regardless of the received port 1134 */ 1135 if (in_type == BR_MRP_TLV_HEADER_IN_TEST) 1136 goto forward; 1137 1138 /* MIC should forward IntLinkChange frames only if they 1139 * are received on ring ports to all the ports 1140 */ 1141 if (br_mrp_is_ring_port(p_port, s_port, p) && 1142 (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP || 1143 in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN)) 1144 goto forward; 1145 1146 /* Should forward the InTopo frames only between the 1147 * ring ports 1148 */ 1149 if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) { 1150 i_dst = NULL; 1151 goto forward; 1152 } 1153 1154 /* In all the other cases don't forward the frames */ 1155 goto no_forward; 1156 } 1157 } 1158 1159forward: 1160 if (p_dst) 1161 br_forward(p_dst, skb, true, false); 1162 if (s_dst) 1163 br_forward(s_dst, skb, true, false); 1164 if (i_dst) 1165 br_forward(i_dst, skb, true, false); 1166 1167no_forward: 1168 return 1; 1169} 1170 1171/* Check if the frame was received on a port that is part of MRP ring 1172 * and if the frame has MRP eth. In that case process the frame otherwise do 1173 * normal forwarding. 1174 * note: already called with rcu_read_lock 1175 */ 1176int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb) 1177{ 1178 /* If there is no MRP instance do normal forwarding */ 1179 if (likely(!(p->flags & BR_MRP_AWARE))) 1180 goto out; 1181 1182 if (unlikely(skb->protocol == htons(ETH_P_MRP))) 1183 return br_mrp_rcv(p, skb, p->dev); 1184 1185out: 1186 return 0; 1187} 1188 1189bool br_mrp_enabled(struct net_bridge *br) 1190{ 1191 return !list_empty(&br->mrp_list); 1192} 1193