1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) ST-Ericsson AB 2010 4 * Author: Daniel Martensson 5 * Dmitry.Tarnyagin / dmitry.tarnyagin@lockless.no 6 */ 7 8#define pr_fmt(fmt) KBUILD_MODNAME fmt 9 10#include <linux/init.h> 11#include <linux/module.h> 12#include <linux/device.h> 13#include <linux/netdevice.h> 14#include <linux/string.h> 15#include <linux/list.h> 16#include <linux/interrupt.h> 17#include <linux/delay.h> 18#include <linux/sched.h> 19#include <linux/if_arp.h> 20#include <linux/timer.h> 21#include <net/rtnetlink.h> 22#include <linux/pkt_sched.h> 23#include <net/caif/caif_layer.h> 24#include <net/caif/caif_hsi.h> 25 26MODULE_LICENSE("GPL"); 27MODULE_AUTHOR("Daniel Martensson"); 28MODULE_DESCRIPTION("CAIF HSI driver"); 29 30/* Returns the number of padding bytes for alignment. */ 31#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\ 32 (((pow)-((x)&((pow)-1))))) 33 34static const struct cfhsi_config hsi_default_config = { 35 36 /* Inactivity timeout on HSI, ms */ 37 .inactivity_timeout = HZ, 38 39 /* Aggregation timeout (ms) of zero means no aggregation is done*/ 40 .aggregation_timeout = 1, 41 42 /* 43 * HSI link layer flow-control thresholds. 44 * Threshold values for the HSI packet queue. Flow-control will be 45 * asserted when the number of packets exceeds q_high_mark. It will 46 * not be de-asserted before the number of packets drops below 47 * q_low_mark. 48 * Warning: A high threshold value might increase throughput but it 49 * will at the same time prevent channel prioritization and increase 50 * the risk of flooding the modem. The high threshold should be above 51 * the low. 52 */ 53 .q_high_mark = 100, 54 .q_low_mark = 50, 55 56 /* 57 * HSI padding options. 58 * Warning: must be a base of 2 (& operation used) and can not be zero ! 59 */ 60 .head_align = 4, 61 .tail_align = 4, 62}; 63 64#define ON 1 65#define OFF 0 66 67static LIST_HEAD(cfhsi_list); 68 69static void cfhsi_inactivity_tout(struct timer_list *t) 70{ 71 struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer); 72 73 netdev_dbg(cfhsi->ndev, "%s.\n", 74 __func__); 75 76 /* Schedule power down work queue. */ 77 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 78 queue_work(cfhsi->wq, &cfhsi->wake_down_work); 79} 80 81static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi, 82 const struct sk_buff *skb, 83 int direction) 84{ 85 struct caif_payload_info *info; 86 int hpad, tpad, len; 87 88 info = (struct caif_payload_info *)&skb->cb; 89 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); 90 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); 91 len = skb->len + hpad + tpad; 92 93 if (direction > 0) 94 cfhsi->aggregation_len += len; 95 else if (direction < 0) 96 cfhsi->aggregation_len -= len; 97} 98 99static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi) 100{ 101 int i; 102 103 if (cfhsi->cfg.aggregation_timeout == 0) 104 return true; 105 106 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) { 107 if (cfhsi->qhead[i].qlen) 108 return true; 109 } 110 111 /* TODO: Use aggregation_len instead */ 112 if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS) 113 return true; 114 115 return false; 116} 117 118static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi) 119{ 120 struct sk_buff *skb; 121 int i; 122 123 for (i = 0; i < CFHSI_PRIO_LAST; ++i) { 124 skb = skb_dequeue(&cfhsi->qhead[i]); 125 if (skb) 126 break; 127 } 128 129 return skb; 130} 131 132static int cfhsi_tx_queue_len(struct cfhsi *cfhsi) 133{ 134 int i, len = 0; 135 for (i = 0; i < CFHSI_PRIO_LAST; ++i) 136 len += skb_queue_len(&cfhsi->qhead[i]); 137 return len; 138} 139 140static void cfhsi_abort_tx(struct cfhsi *cfhsi) 141{ 142 struct sk_buff *skb; 143 144 for (;;) { 145 spin_lock_bh(&cfhsi->lock); 146 skb = cfhsi_dequeue(cfhsi); 147 if (!skb) 148 break; 149 150 cfhsi->ndev->stats.tx_errors++; 151 cfhsi->ndev->stats.tx_dropped++; 152 cfhsi_update_aggregation_stats(cfhsi, skb, -1); 153 spin_unlock_bh(&cfhsi->lock); 154 kfree_skb(skb); 155 } 156 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 157 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 158 mod_timer(&cfhsi->inactivity_timer, 159 jiffies + cfhsi->cfg.inactivity_timeout); 160 spin_unlock_bh(&cfhsi->lock); 161} 162 163static int cfhsi_flush_fifo(struct cfhsi *cfhsi) 164{ 165 char buffer[32]; /* Any reasonable value */ 166 size_t fifo_occupancy; 167 int ret; 168 169 netdev_dbg(cfhsi->ndev, "%s.\n", 170 __func__); 171 172 do { 173 ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, 174 &fifo_occupancy); 175 if (ret) { 176 netdev_warn(cfhsi->ndev, 177 "%s: can't get FIFO occupancy: %d.\n", 178 __func__, ret); 179 break; 180 } else if (!fifo_occupancy) 181 /* No more data, exitting normally */ 182 break; 183 184 fifo_occupancy = min(sizeof(buffer), fifo_occupancy); 185 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); 186 ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy, 187 cfhsi->ops); 188 if (ret) { 189 clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); 190 netdev_warn(cfhsi->ndev, 191 "%s: can't read data: %d.\n", 192 __func__, ret); 193 break; 194 } 195 196 ret = 5 * HZ; 197 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait, 198 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); 199 200 if (ret < 0) { 201 netdev_warn(cfhsi->ndev, 202 "%s: can't wait for flush complete: %d.\n", 203 __func__, ret); 204 break; 205 } else if (!ret) { 206 ret = -ETIMEDOUT; 207 netdev_warn(cfhsi->ndev, 208 "%s: timeout waiting for flush complete.\n", 209 __func__); 210 break; 211 } 212 } while (1); 213 214 return ret; 215} 216 217static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) 218{ 219 int nfrms = 0; 220 int pld_len = 0; 221 struct sk_buff *skb; 222 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; 223 224 skb = cfhsi_dequeue(cfhsi); 225 if (!skb) 226 return 0; 227 228 /* Clear offset. */ 229 desc->offset = 0; 230 231 /* Check if we can embed a CAIF frame. */ 232 if (skb->len < CFHSI_MAX_EMB_FRM_SZ) { 233 struct caif_payload_info *info; 234 int hpad; 235 int tpad; 236 237 /* Calculate needed head alignment and tail alignment. */ 238 info = (struct caif_payload_info *)&skb->cb; 239 240 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); 241 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); 242 243 /* Check if frame still fits with added alignment. */ 244 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) { 245 u8 *pemb = desc->emb_frm; 246 desc->offset = CFHSI_DESC_SHORT_SZ; 247 *pemb = (u8)(hpad - 1); 248 pemb += hpad; 249 250 /* Update network statistics. */ 251 spin_lock_bh(&cfhsi->lock); 252 cfhsi->ndev->stats.tx_packets++; 253 cfhsi->ndev->stats.tx_bytes += skb->len; 254 cfhsi_update_aggregation_stats(cfhsi, skb, -1); 255 spin_unlock_bh(&cfhsi->lock); 256 257 /* Copy in embedded CAIF frame. */ 258 skb_copy_bits(skb, 0, pemb, skb->len); 259 260 /* Consume the SKB */ 261 consume_skb(skb); 262 skb = NULL; 263 } 264 } 265 266 /* Create payload CAIF frames. */ 267 while (nfrms < CFHSI_MAX_PKTS) { 268 struct caif_payload_info *info; 269 int hpad; 270 int tpad; 271 272 if (!skb) 273 skb = cfhsi_dequeue(cfhsi); 274 275 if (!skb) 276 break; 277 278 /* Calculate needed head alignment and tail alignment. */ 279 info = (struct caif_payload_info *)&skb->cb; 280 281 hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); 282 tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); 283 284 /* Fill in CAIF frame length in descriptor. */ 285 desc->cffrm_len[nfrms] = hpad + skb->len + tpad; 286 287 /* Fill head padding information. */ 288 *pfrm = (u8)(hpad - 1); 289 pfrm += hpad; 290 291 /* Update network statistics. */ 292 spin_lock_bh(&cfhsi->lock); 293 cfhsi->ndev->stats.tx_packets++; 294 cfhsi->ndev->stats.tx_bytes += skb->len; 295 cfhsi_update_aggregation_stats(cfhsi, skb, -1); 296 spin_unlock_bh(&cfhsi->lock); 297 298 /* Copy in CAIF frame. */ 299 skb_copy_bits(skb, 0, pfrm, skb->len); 300 301 /* Update payload length. */ 302 pld_len += desc->cffrm_len[nfrms]; 303 304 /* Update frame pointer. */ 305 pfrm += skb->len + tpad; 306 307 /* Consume the SKB */ 308 consume_skb(skb); 309 skb = NULL; 310 311 /* Update number of frames. */ 312 nfrms++; 313 } 314 315 /* Unused length fields should be zero-filled (according to SPEC). */ 316 while (nfrms < CFHSI_MAX_PKTS) { 317 desc->cffrm_len[nfrms] = 0x0000; 318 nfrms++; 319 } 320 321 /* Check if we can piggy-back another descriptor. */ 322 if (cfhsi_can_send_aggregate(cfhsi)) 323 desc->header |= CFHSI_PIGGY_DESC; 324 else 325 desc->header &= ~CFHSI_PIGGY_DESC; 326 327 return CFHSI_DESC_SZ + pld_len; 328} 329 330static void cfhsi_start_tx(struct cfhsi *cfhsi) 331{ 332 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; 333 int len, res; 334 335 netdev_dbg(cfhsi->ndev, "%s.\n", __func__); 336 337 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 338 return; 339 340 do { 341 /* Create HSI frame. */ 342 len = cfhsi_tx_frm(desc, cfhsi); 343 if (!len) { 344 spin_lock_bh(&cfhsi->lock); 345 if (unlikely(cfhsi_tx_queue_len(cfhsi))) { 346 spin_unlock_bh(&cfhsi->lock); 347 res = -EAGAIN; 348 continue; 349 } 350 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 351 /* Start inactivity timer. */ 352 mod_timer(&cfhsi->inactivity_timer, 353 jiffies + cfhsi->cfg.inactivity_timeout); 354 spin_unlock_bh(&cfhsi->lock); 355 break; 356 } 357 358 /* Set up new transfer. */ 359 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); 360 if (WARN_ON(res < 0)) 361 netdev_err(cfhsi->ndev, "%s: TX error %d.\n", 362 __func__, res); 363 } while (res < 0); 364} 365 366static void cfhsi_tx_done(struct cfhsi *cfhsi) 367{ 368 netdev_dbg(cfhsi->ndev, "%s.\n", __func__); 369 370 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 371 return; 372 373 /* 374 * Send flow on if flow off has been previously signalled 375 * and number of packets is below low water mark. 376 */ 377 spin_lock_bh(&cfhsi->lock); 378 if (cfhsi->flow_off_sent && 379 cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark && 380 cfhsi->cfdev.flowctrl) { 381 382 cfhsi->flow_off_sent = 0; 383 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON); 384 } 385 386 if (cfhsi_can_send_aggregate(cfhsi)) { 387 spin_unlock_bh(&cfhsi->lock); 388 cfhsi_start_tx(cfhsi); 389 } else { 390 mod_timer(&cfhsi->aggregation_timer, 391 jiffies + cfhsi->cfg.aggregation_timeout); 392 spin_unlock_bh(&cfhsi->lock); 393 } 394 395 return; 396} 397 398static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops) 399{ 400 struct cfhsi *cfhsi; 401 402 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); 403 netdev_dbg(cfhsi->ndev, "%s.\n", 404 __func__); 405 406 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 407 return; 408 cfhsi_tx_done(cfhsi); 409} 410 411static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) 412{ 413 int xfer_sz = 0; 414 int nfrms = 0; 415 u16 *plen = NULL; 416 u8 *pfrm = NULL; 417 418 if ((desc->header & ~CFHSI_PIGGY_DESC) || 419 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { 420 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", 421 __func__); 422 return -EPROTO; 423 } 424 425 /* Check for embedded CAIF frame. */ 426 if (desc->offset) { 427 struct sk_buff *skb; 428 int len = 0; 429 pfrm = ((u8 *)desc) + desc->offset; 430 431 /* Remove offset padding. */ 432 pfrm += *pfrm + 1; 433 434 /* Read length of CAIF frame (little endian). */ 435 len = *pfrm; 436 len |= ((*(pfrm+1)) << 8) & 0xFF00; 437 len += 2; /* Add FCS fields. */ 438 439 /* Sanity check length of CAIF frame. */ 440 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { 441 netdev_err(cfhsi->ndev, "%s: Invalid length.\n", 442 __func__); 443 return -EPROTO; 444 } 445 446 /* Allocate SKB (OK even in IRQ context). */ 447 skb = alloc_skb(len + 1, GFP_ATOMIC); 448 if (!skb) { 449 netdev_err(cfhsi->ndev, "%s: Out of memory !\n", 450 __func__); 451 return -ENOMEM; 452 } 453 caif_assert(skb != NULL); 454 455 skb_put_data(skb, pfrm, len); 456 457 skb->protocol = htons(ETH_P_CAIF); 458 skb_reset_mac_header(skb); 459 skb->dev = cfhsi->ndev; 460 461 netif_rx_any_context(skb); 462 463 /* Update network statistics. */ 464 cfhsi->ndev->stats.rx_packets++; 465 cfhsi->ndev->stats.rx_bytes += len; 466 } 467 468 /* Calculate transfer length. */ 469 plen = desc->cffrm_len; 470 while (nfrms < CFHSI_MAX_PKTS && *plen) { 471 xfer_sz += *plen; 472 plen++; 473 nfrms++; 474 } 475 476 /* Check for piggy-backed descriptor. */ 477 if (desc->header & CFHSI_PIGGY_DESC) 478 xfer_sz += CFHSI_DESC_SZ; 479 480 if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) { 481 netdev_err(cfhsi->ndev, 482 "%s: Invalid payload len: %d, ignored.\n", 483 __func__, xfer_sz); 484 return -EPROTO; 485 } 486 return xfer_sz; 487} 488 489static int cfhsi_rx_desc_len(struct cfhsi_desc *desc) 490{ 491 int xfer_sz = 0; 492 int nfrms = 0; 493 u16 *plen; 494 495 if ((desc->header & ~CFHSI_PIGGY_DESC) || 496 (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { 497 498 pr_err("Invalid descriptor. %x %x\n", desc->header, 499 desc->offset); 500 return -EPROTO; 501 } 502 503 /* Calculate transfer length. */ 504 plen = desc->cffrm_len; 505 while (nfrms < CFHSI_MAX_PKTS && *plen) { 506 xfer_sz += *plen; 507 plen++; 508 nfrms++; 509 } 510 511 if (xfer_sz % 4) { 512 pr_err("Invalid payload len: %d, ignored.\n", xfer_sz); 513 return -EPROTO; 514 } 515 return xfer_sz; 516} 517 518static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) 519{ 520 int rx_sz = 0; 521 int nfrms = 0; 522 u16 *plen = NULL; 523 u8 *pfrm = NULL; 524 525 /* Sanity check header and offset. */ 526 if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) || 527 (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { 528 netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", 529 __func__); 530 return -EPROTO; 531 } 532 533 /* Set frame pointer to start of payload. */ 534 pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; 535 plen = desc->cffrm_len; 536 537 /* Skip already processed frames. */ 538 while (nfrms < cfhsi->rx_state.nfrms) { 539 pfrm += *plen; 540 rx_sz += *plen; 541 plen++; 542 nfrms++; 543 } 544 545 /* Parse payload. */ 546 while (nfrms < CFHSI_MAX_PKTS && *plen) { 547 struct sk_buff *skb; 548 u8 *pcffrm = NULL; 549 int len; 550 551 /* CAIF frame starts after head padding. */ 552 pcffrm = pfrm + *pfrm + 1; 553 554 /* Read length of CAIF frame (little endian). */ 555 len = *pcffrm; 556 len |= ((*(pcffrm + 1)) << 8) & 0xFF00; 557 len += 2; /* Add FCS fields. */ 558 559 /* Sanity check length of CAIF frames. */ 560 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { 561 netdev_err(cfhsi->ndev, "%s: Invalid length.\n", 562 __func__); 563 return -EPROTO; 564 } 565 566 /* Allocate SKB (OK even in IRQ context). */ 567 skb = alloc_skb(len + 1, GFP_ATOMIC); 568 if (!skb) { 569 netdev_err(cfhsi->ndev, "%s: Out of memory !\n", 570 __func__); 571 cfhsi->rx_state.nfrms = nfrms; 572 return -ENOMEM; 573 } 574 caif_assert(skb != NULL); 575 576 skb_put_data(skb, pcffrm, len); 577 578 skb->protocol = htons(ETH_P_CAIF); 579 skb_reset_mac_header(skb); 580 skb->dev = cfhsi->ndev; 581 582 netif_rx_any_context(skb); 583 584 /* Update network statistics. */ 585 cfhsi->ndev->stats.rx_packets++; 586 cfhsi->ndev->stats.rx_bytes += len; 587 588 pfrm += *plen; 589 rx_sz += *plen; 590 plen++; 591 nfrms++; 592 } 593 594 return rx_sz; 595} 596 597static void cfhsi_rx_done(struct cfhsi *cfhsi) 598{ 599 int res; 600 int desc_pld_len = 0, rx_len, rx_state; 601 struct cfhsi_desc *desc = NULL; 602 u8 *rx_ptr, *rx_buf; 603 struct cfhsi_desc *piggy_desc = NULL; 604 605 desc = (struct cfhsi_desc *)cfhsi->rx_buf; 606 607 netdev_dbg(cfhsi->ndev, "%s\n", __func__); 608 609 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 610 return; 611 612 /* Update inactivity timer if pending. */ 613 spin_lock_bh(&cfhsi->lock); 614 mod_timer_pending(&cfhsi->inactivity_timer, 615 jiffies + cfhsi->cfg.inactivity_timeout); 616 spin_unlock_bh(&cfhsi->lock); 617 618 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { 619 desc_pld_len = cfhsi_rx_desc_len(desc); 620 621 if (desc_pld_len < 0) 622 goto out_of_sync; 623 624 rx_buf = cfhsi->rx_buf; 625 rx_len = desc_pld_len; 626 if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC)) 627 rx_len += CFHSI_DESC_SZ; 628 if (desc_pld_len == 0) 629 rx_buf = cfhsi->rx_flip_buf; 630 } else { 631 rx_buf = cfhsi->rx_flip_buf; 632 633 rx_len = CFHSI_DESC_SZ; 634 if (cfhsi->rx_state.pld_len > 0 && 635 (desc->header & CFHSI_PIGGY_DESC)) { 636 637 piggy_desc = (struct cfhsi_desc *) 638 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ + 639 cfhsi->rx_state.pld_len); 640 641 cfhsi->rx_state.piggy_desc = true; 642 643 /* Extract payload len from piggy-backed descriptor. */ 644 desc_pld_len = cfhsi_rx_desc_len(piggy_desc); 645 if (desc_pld_len < 0) 646 goto out_of_sync; 647 648 if (desc_pld_len > 0) { 649 rx_len = desc_pld_len; 650 if (piggy_desc->header & CFHSI_PIGGY_DESC) 651 rx_len += CFHSI_DESC_SZ; 652 } 653 654 /* 655 * Copy needed information from the piggy-backed 656 * descriptor to the descriptor in the start. 657 */ 658 memcpy(rx_buf, (u8 *)piggy_desc, 659 CFHSI_DESC_SHORT_SZ); 660 } 661 } 662 663 if (desc_pld_len) { 664 rx_state = CFHSI_RX_STATE_PAYLOAD; 665 rx_ptr = rx_buf + CFHSI_DESC_SZ; 666 } else { 667 rx_state = CFHSI_RX_STATE_DESC; 668 rx_ptr = rx_buf; 669 rx_len = CFHSI_DESC_SZ; 670 } 671 672 /* Initiate next read */ 673 if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { 674 /* Set up new transfer. */ 675 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", 676 __func__); 677 678 res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len, 679 cfhsi->ops); 680 if (WARN_ON(res < 0)) { 681 netdev_err(cfhsi->ndev, "%s: RX error %d.\n", 682 __func__, res); 683 cfhsi->ndev->stats.rx_errors++; 684 cfhsi->ndev->stats.rx_dropped++; 685 } 686 } 687 688 if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { 689 /* Extract payload from descriptor */ 690 if (cfhsi_rx_desc(desc, cfhsi) < 0) 691 goto out_of_sync; 692 } else { 693 /* Extract payload */ 694 if (cfhsi_rx_pld(desc, cfhsi) < 0) 695 goto out_of_sync; 696 if (piggy_desc) { 697 /* Extract any payload in piggyback descriptor. */ 698 if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0) 699 goto out_of_sync; 700 /* Mark no embedded frame after extracting it */ 701 piggy_desc->offset = 0; 702 } 703 } 704 705 /* Update state info */ 706 memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state)); 707 cfhsi->rx_state.state = rx_state; 708 cfhsi->rx_ptr = rx_ptr; 709 cfhsi->rx_len = rx_len; 710 cfhsi->rx_state.pld_len = desc_pld_len; 711 cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC; 712 713 if (rx_buf != cfhsi->rx_buf) 714 swap(cfhsi->rx_buf, cfhsi->rx_flip_buf); 715 return; 716 717out_of_sync: 718 netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__); 719 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, 720 cfhsi->rx_buf, CFHSI_DESC_SZ); 721 schedule_work(&cfhsi->out_of_sync_work); 722} 723 724static void cfhsi_rx_slowpath(struct timer_list *t) 725{ 726 struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer); 727 728 netdev_dbg(cfhsi->ndev, "%s.\n", 729 __func__); 730 731 cfhsi_rx_done(cfhsi); 732} 733 734static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops) 735{ 736 struct cfhsi *cfhsi; 737 738 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); 739 netdev_dbg(cfhsi->ndev, "%s.\n", 740 __func__); 741 742 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 743 return; 744 745 if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits)) 746 wake_up_interruptible(&cfhsi->flush_fifo_wait); 747 else 748 cfhsi_rx_done(cfhsi); 749} 750 751static void cfhsi_wake_up(struct work_struct *work) 752{ 753 struct cfhsi *cfhsi = NULL; 754 int res; 755 int len; 756 long ret; 757 758 cfhsi = container_of(work, struct cfhsi, wake_up_work); 759 760 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 761 return; 762 763 if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) { 764 /* It happenes when wakeup is requested by 765 * both ends at the same time. */ 766 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 767 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); 768 return; 769 } 770 771 /* Activate wake line. */ 772 cfhsi->ops->cfhsi_wake_up(cfhsi->ops); 773 774 netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n", 775 __func__); 776 777 /* Wait for acknowledge. */ 778 ret = CFHSI_WAKE_TOUT; 779 ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait, 780 test_and_clear_bit(CFHSI_WAKE_UP_ACK, 781 &cfhsi->bits), ret); 782 if (unlikely(ret < 0)) { 783 /* Interrupted by signal. */ 784 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", 785 __func__, ret); 786 787 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 788 cfhsi->ops->cfhsi_wake_down(cfhsi->ops); 789 return; 790 } else if (!ret) { 791 bool ca_wake = false; 792 size_t fifo_occupancy = 0; 793 794 /* Wakeup timeout */ 795 netdev_dbg(cfhsi->ndev, "%s: Timeout.\n", 796 __func__); 797 798 /* Check FIFO to check if modem has sent something. */ 799 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, 800 &fifo_occupancy)); 801 802 netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n", 803 __func__, (unsigned) fifo_occupancy); 804 805 /* Check if we misssed the interrupt. */ 806 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, 807 &ca_wake)); 808 809 if (ca_wake) { 810 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", 811 __func__); 812 813 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */ 814 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); 815 816 /* Continue execution. */ 817 goto wake_ack; 818 } 819 820 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 821 cfhsi->ops->cfhsi_wake_down(cfhsi->ops); 822 return; 823 } 824wake_ack: 825 netdev_dbg(cfhsi->ndev, "%s: Woken.\n", 826 __func__); 827 828 /* Clear power up bit. */ 829 set_bit(CFHSI_AWAKE, &cfhsi->bits); 830 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 831 832 /* Resume read operation. */ 833 netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__); 834 res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops); 835 836 if (WARN_ON(res < 0)) 837 netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res); 838 839 /* Clear power up acknowledment. */ 840 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); 841 842 spin_lock_bh(&cfhsi->lock); 843 844 /* Resume transmit if queues are not empty. */ 845 if (!cfhsi_tx_queue_len(cfhsi)) { 846 netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n", 847 __func__); 848 /* Start inactivity timer. */ 849 mod_timer(&cfhsi->inactivity_timer, 850 jiffies + cfhsi->cfg.inactivity_timeout); 851 spin_unlock_bh(&cfhsi->lock); 852 return; 853 } 854 855 netdev_dbg(cfhsi->ndev, "%s: Host wake.\n", 856 __func__); 857 858 spin_unlock_bh(&cfhsi->lock); 859 860 /* Create HSI frame. */ 861 len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi); 862 863 if (likely(len > 0)) { 864 /* Set up new transfer. */ 865 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); 866 if (WARN_ON(res < 0)) { 867 netdev_err(cfhsi->ndev, "%s: TX error %d.\n", 868 __func__, res); 869 cfhsi_abort_tx(cfhsi); 870 } 871 } else { 872 netdev_err(cfhsi->ndev, 873 "%s: Failed to create HSI frame: %d.\n", 874 __func__, len); 875 } 876} 877 878static void cfhsi_wake_down(struct work_struct *work) 879{ 880 long ret; 881 struct cfhsi *cfhsi = NULL; 882 size_t fifo_occupancy = 0; 883 int retry = CFHSI_WAKE_TOUT; 884 885 cfhsi = container_of(work, struct cfhsi, wake_down_work); 886 netdev_dbg(cfhsi->ndev, "%s.\n", __func__); 887 888 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 889 return; 890 891 /* Deactivate wake line. */ 892 cfhsi->ops->cfhsi_wake_down(cfhsi->ops); 893 894 /* Wait for acknowledge. */ 895 ret = CFHSI_WAKE_TOUT; 896 ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait, 897 test_and_clear_bit(CFHSI_WAKE_DOWN_ACK, 898 &cfhsi->bits), ret); 899 if (ret < 0) { 900 /* Interrupted by signal. */ 901 netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", 902 __func__, ret); 903 return; 904 } else if (!ret) { 905 bool ca_wake = true; 906 907 /* Timeout */ 908 netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__); 909 910 /* Check if we misssed the interrupt. */ 911 WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, 912 &ca_wake)); 913 if (!ca_wake) 914 netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", 915 __func__); 916 } 917 918 /* Check FIFO occupancy. */ 919 while (retry) { 920 WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, 921 &fifo_occupancy)); 922 923 if (!fifo_occupancy) 924 break; 925 926 set_current_state(TASK_INTERRUPTIBLE); 927 schedule_timeout(1); 928 retry--; 929 } 930 931 if (!retry) 932 netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__); 933 934 /* Clear AWAKE condition. */ 935 clear_bit(CFHSI_AWAKE, &cfhsi->bits); 936 937 /* Cancel pending RX requests. */ 938 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); 939} 940 941static void cfhsi_out_of_sync(struct work_struct *work) 942{ 943 struct cfhsi *cfhsi = NULL; 944 945 cfhsi = container_of(work, struct cfhsi, out_of_sync_work); 946 947 rtnl_lock(); 948 dev_close(cfhsi->ndev); 949 rtnl_unlock(); 950} 951 952static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops) 953{ 954 struct cfhsi *cfhsi = NULL; 955 956 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); 957 netdev_dbg(cfhsi->ndev, "%s.\n", 958 __func__); 959 960 set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); 961 wake_up_interruptible(&cfhsi->wake_up_wait); 962 963 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 964 return; 965 966 /* Schedule wake up work queue if the peer initiates. */ 967 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) 968 queue_work(cfhsi->wq, &cfhsi->wake_up_work); 969} 970 971static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops) 972{ 973 struct cfhsi *cfhsi = NULL; 974 975 cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); 976 netdev_dbg(cfhsi->ndev, "%s.\n", 977 __func__); 978 979 /* Initiating low power is only permitted by the host (us). */ 980 set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); 981 wake_up_interruptible(&cfhsi->wake_down_wait); 982} 983 984static void cfhsi_aggregation_tout(struct timer_list *t) 985{ 986 struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer); 987 988 netdev_dbg(cfhsi->ndev, "%s.\n", 989 __func__); 990 991 cfhsi_start_tx(cfhsi); 992} 993 994static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) 995{ 996 struct cfhsi *cfhsi = NULL; 997 int start_xfer = 0; 998 int timer_active; 999 int prio; 1000 1001 if (!dev) 1002 return -EINVAL; 1003 1004 cfhsi = netdev_priv(dev); 1005 1006 switch (skb->priority) { 1007 case TC_PRIO_BESTEFFORT: 1008 case TC_PRIO_FILLER: 1009 case TC_PRIO_BULK: 1010 prio = CFHSI_PRIO_BEBK; 1011 break; 1012 case TC_PRIO_INTERACTIVE_BULK: 1013 prio = CFHSI_PRIO_VI; 1014 break; 1015 case TC_PRIO_INTERACTIVE: 1016 prio = CFHSI_PRIO_VO; 1017 break; 1018 case TC_PRIO_CONTROL: 1019 default: 1020 prio = CFHSI_PRIO_CTL; 1021 break; 1022 } 1023 1024 spin_lock_bh(&cfhsi->lock); 1025 1026 /* Update aggregation statistics */ 1027 cfhsi_update_aggregation_stats(cfhsi, skb, 1); 1028 1029 /* Queue the SKB */ 1030 skb_queue_tail(&cfhsi->qhead[prio], skb); 1031 1032 /* Sanity check; xmit should not be called after unregister_netdev */ 1033 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) { 1034 spin_unlock_bh(&cfhsi->lock); 1035 cfhsi_abort_tx(cfhsi); 1036 return -EINVAL; 1037 } 1038 1039 /* Send flow off if number of packets is above high water mark. */ 1040 if (!cfhsi->flow_off_sent && 1041 cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark && 1042 cfhsi->cfdev.flowctrl) { 1043 cfhsi->flow_off_sent = 1; 1044 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); 1045 } 1046 1047 if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) { 1048 cfhsi->tx_state = CFHSI_TX_STATE_XFER; 1049 start_xfer = 1; 1050 } 1051 1052 if (!start_xfer) { 1053 /* Send aggregate if it is possible */ 1054 bool aggregate_ready = 1055 cfhsi_can_send_aggregate(cfhsi) && 1056 del_timer(&cfhsi->aggregation_timer) > 0; 1057 spin_unlock_bh(&cfhsi->lock); 1058 if (aggregate_ready) 1059 cfhsi_start_tx(cfhsi); 1060 return NETDEV_TX_OK; 1061 } 1062 1063 /* Delete inactivity timer if started. */ 1064 timer_active = del_timer_sync(&cfhsi->inactivity_timer); 1065 1066 spin_unlock_bh(&cfhsi->lock); 1067 1068 if (timer_active) { 1069 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; 1070 int len; 1071 int res; 1072 1073 /* Create HSI frame. */ 1074 len = cfhsi_tx_frm(desc, cfhsi); 1075 WARN_ON(!len); 1076 1077 /* Set up new transfer. */ 1078 res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); 1079 if (WARN_ON(res < 0)) { 1080 netdev_err(cfhsi->ndev, "%s: TX error %d.\n", 1081 __func__, res); 1082 cfhsi_abort_tx(cfhsi); 1083 } 1084 } else { 1085 /* Schedule wake up work queue if the we initiate. */ 1086 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) 1087 queue_work(cfhsi->wq, &cfhsi->wake_up_work); 1088 } 1089 1090 return NETDEV_TX_OK; 1091} 1092 1093static const struct net_device_ops cfhsi_netdevops; 1094 1095static void cfhsi_setup(struct net_device *dev) 1096{ 1097 int i; 1098 struct cfhsi *cfhsi = netdev_priv(dev); 1099 dev->features = 0; 1100 dev->type = ARPHRD_CAIF; 1101 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1102 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; 1103 dev->priv_flags |= IFF_NO_QUEUE; 1104 dev->needs_free_netdev = true; 1105 dev->netdev_ops = &cfhsi_netdevops; 1106 for (i = 0; i < CFHSI_PRIO_LAST; ++i) 1107 skb_queue_head_init(&cfhsi->qhead[i]); 1108 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; 1109 cfhsi->cfdev.use_frag = false; 1110 cfhsi->cfdev.use_stx = false; 1111 cfhsi->cfdev.use_fcs = false; 1112 cfhsi->ndev = dev; 1113 cfhsi->cfg = hsi_default_config; 1114} 1115 1116static int cfhsi_open(struct net_device *ndev) 1117{ 1118 struct cfhsi *cfhsi = netdev_priv(ndev); 1119 int res; 1120 1121 clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits); 1122 1123 /* Initialize state vaiables. */ 1124 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 1125 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; 1126 1127 /* Set flow info */ 1128 cfhsi->flow_off_sent = 0; 1129 1130 /* 1131 * Allocate a TX buffer with the size of a HSI packet descriptors 1132 * and the necessary room for CAIF payload frames. 1133 */ 1134 cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL); 1135 if (!cfhsi->tx_buf) { 1136 res = -ENODEV; 1137 goto err_alloc_tx; 1138 } 1139 1140 /* 1141 * Allocate a RX buffer with the size of two HSI packet descriptors and 1142 * the necessary room for CAIF payload frames. 1143 */ 1144 cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL); 1145 if (!cfhsi->rx_buf) { 1146 res = -ENODEV; 1147 goto err_alloc_rx; 1148 } 1149 1150 cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL); 1151 if (!cfhsi->rx_flip_buf) { 1152 res = -ENODEV; 1153 goto err_alloc_rx_flip; 1154 } 1155 1156 /* Initialize aggregation timeout */ 1157 cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout; 1158 1159 /* Initialize recieve vaiables. */ 1160 cfhsi->rx_ptr = cfhsi->rx_buf; 1161 cfhsi->rx_len = CFHSI_DESC_SZ; 1162 1163 /* Initialize spin locks. */ 1164 spin_lock_init(&cfhsi->lock); 1165 1166 /* Set up the driver. */ 1167 cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb; 1168 cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb; 1169 cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb; 1170 cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb; 1171 1172 /* Initialize the work queues. */ 1173 INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up); 1174 INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down); 1175 INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync); 1176 1177 /* Clear all bit fields. */ 1178 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); 1179 clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); 1180 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); 1181 clear_bit(CFHSI_AWAKE, &cfhsi->bits); 1182 1183 /* Create work thread. */ 1184 cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM); 1185 if (!cfhsi->wq) { 1186 netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n", 1187 __func__); 1188 res = -ENODEV; 1189 goto err_create_wq; 1190 } 1191 1192 /* Initialize wait queues. */ 1193 init_waitqueue_head(&cfhsi->wake_up_wait); 1194 init_waitqueue_head(&cfhsi->wake_down_wait); 1195 init_waitqueue_head(&cfhsi->flush_fifo_wait); 1196 1197 /* Setup the inactivity timer. */ 1198 timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0); 1199 /* Setup the slowpath RX timer. */ 1200 timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0); 1201 /* Setup the aggregation timer. */ 1202 timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0); 1203 1204 /* Activate HSI interface. */ 1205 res = cfhsi->ops->cfhsi_up(cfhsi->ops); 1206 if (res) { 1207 netdev_err(cfhsi->ndev, 1208 "%s: can't activate HSI interface: %d.\n", 1209 __func__, res); 1210 goto err_activate; 1211 } 1212 1213 /* Flush FIFO */ 1214 res = cfhsi_flush_fifo(cfhsi); 1215 if (res) { 1216 netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n", 1217 __func__, res); 1218 goto err_net_reg; 1219 } 1220 return res; 1221 1222 err_net_reg: 1223 cfhsi->ops->cfhsi_down(cfhsi->ops); 1224 err_activate: 1225 destroy_workqueue(cfhsi->wq); 1226 err_create_wq: 1227 kfree(cfhsi->rx_flip_buf); 1228 err_alloc_rx_flip: 1229 kfree(cfhsi->rx_buf); 1230 err_alloc_rx: 1231 kfree(cfhsi->tx_buf); 1232 err_alloc_tx: 1233 return res; 1234} 1235 1236static int cfhsi_close(struct net_device *ndev) 1237{ 1238 struct cfhsi *cfhsi = netdev_priv(ndev); 1239 u8 *tx_buf, *rx_buf, *flip_buf; 1240 1241 /* going to shutdown driver */ 1242 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits); 1243 1244 /* Delete timers if pending */ 1245 del_timer_sync(&cfhsi->inactivity_timer); 1246 del_timer_sync(&cfhsi->rx_slowpath_timer); 1247 del_timer_sync(&cfhsi->aggregation_timer); 1248 1249 /* Cancel pending RX request (if any) */ 1250 cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); 1251 1252 /* Destroy workqueue */ 1253 destroy_workqueue(cfhsi->wq); 1254 1255 /* Store bufferes: will be freed later. */ 1256 tx_buf = cfhsi->tx_buf; 1257 rx_buf = cfhsi->rx_buf; 1258 flip_buf = cfhsi->rx_flip_buf; 1259 /* Flush transmit queues. */ 1260 cfhsi_abort_tx(cfhsi); 1261 1262 /* Deactivate interface */ 1263 cfhsi->ops->cfhsi_down(cfhsi->ops); 1264 1265 /* Free buffers. */ 1266 kfree(tx_buf); 1267 kfree(rx_buf); 1268 kfree(flip_buf); 1269 return 0; 1270} 1271 1272static void cfhsi_uninit(struct net_device *dev) 1273{ 1274 struct cfhsi *cfhsi = netdev_priv(dev); 1275 ASSERT_RTNL(); 1276 symbol_put(cfhsi_get_device); 1277 list_del(&cfhsi->list); 1278} 1279 1280static const struct net_device_ops cfhsi_netdevops = { 1281 .ndo_uninit = cfhsi_uninit, 1282 .ndo_open = cfhsi_open, 1283 .ndo_stop = cfhsi_close, 1284 .ndo_start_xmit = cfhsi_xmit 1285}; 1286 1287static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi) 1288{ 1289 int i; 1290 1291 if (!data) { 1292 pr_debug("no params data found\n"); 1293 return; 1294 } 1295 1296 i = __IFLA_CAIF_HSI_INACTIVITY_TOUT; 1297 /* 1298 * Inactivity timeout in millisecs. Lowest possible value is 1, 1299 * and highest possible is NEXT_TIMER_MAX_DELTA. 1300 */ 1301 if (data[i]) { 1302 u32 inactivity_timeout = nla_get_u32(data[i]); 1303 /* Pre-calculate inactivity timeout. */ 1304 cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000; 1305 if (cfhsi->cfg.inactivity_timeout == 0) 1306 cfhsi->cfg.inactivity_timeout = 1; 1307 else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA) 1308 cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA; 1309 } 1310 1311 i = __IFLA_CAIF_HSI_AGGREGATION_TOUT; 1312 if (data[i]) 1313 cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]); 1314 1315 i = __IFLA_CAIF_HSI_HEAD_ALIGN; 1316 if (data[i]) 1317 cfhsi->cfg.head_align = nla_get_u32(data[i]); 1318 1319 i = __IFLA_CAIF_HSI_TAIL_ALIGN; 1320 if (data[i]) 1321 cfhsi->cfg.tail_align = nla_get_u32(data[i]); 1322 1323 i = __IFLA_CAIF_HSI_QHIGH_WATERMARK; 1324 if (data[i]) 1325 cfhsi->cfg.q_high_mark = nla_get_u32(data[i]); 1326 1327 i = __IFLA_CAIF_HSI_QLOW_WATERMARK; 1328 if (data[i]) 1329 cfhsi->cfg.q_low_mark = nla_get_u32(data[i]); 1330} 1331 1332static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[], 1333 struct nlattr *data[], 1334 struct netlink_ext_ack *extack) 1335{ 1336 cfhsi_netlink_parms(data, netdev_priv(dev)); 1337 netdev_state_change(dev); 1338 return 0; 1339} 1340 1341static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = { 1342 [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 }, 1343 [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 }, 1344 [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 }, 1345 [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 }, 1346 [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 }, 1347 [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 }, 1348}; 1349 1350static size_t caif_hsi_get_size(const struct net_device *dev) 1351{ 1352 int i; 1353 size_t s = 0; 1354 for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++) 1355 s += nla_total_size(caif_hsi_policy[i].len); 1356 return s; 1357} 1358 1359static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev) 1360{ 1361 struct cfhsi *cfhsi = netdev_priv(dev); 1362 1363 if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT, 1364 cfhsi->cfg.inactivity_timeout) || 1365 nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT, 1366 cfhsi->cfg.aggregation_timeout) || 1367 nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN, 1368 cfhsi->cfg.head_align) || 1369 nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN, 1370 cfhsi->cfg.tail_align) || 1371 nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK, 1372 cfhsi->cfg.q_high_mark) || 1373 nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK, 1374 cfhsi->cfg.q_low_mark)) 1375 return -EMSGSIZE; 1376 1377 return 0; 1378} 1379 1380static int caif_hsi_newlink(struct net *src_net, struct net_device *dev, 1381 struct nlattr *tb[], struct nlattr *data[], 1382 struct netlink_ext_ack *extack) 1383{ 1384 struct cfhsi *cfhsi = NULL; 1385 struct cfhsi_ops *(*get_ops)(void); 1386 1387 ASSERT_RTNL(); 1388 1389 cfhsi = netdev_priv(dev); 1390 cfhsi_netlink_parms(data, cfhsi); 1391 1392 get_ops = symbol_get(cfhsi_get_ops); 1393 if (!get_ops) { 1394 pr_err("%s: failed to get the cfhsi_ops\n", __func__); 1395 return -ENODEV; 1396 } 1397 1398 /* Assign the HSI device. */ 1399 cfhsi->ops = (*get_ops)(); 1400 if (!cfhsi->ops) { 1401 pr_err("%s: failed to get the cfhsi_ops\n", __func__); 1402 goto err; 1403 } 1404 1405 /* Assign the driver to this HSI device. */ 1406 cfhsi->ops->cb_ops = &cfhsi->cb_ops; 1407 if (register_netdevice(dev)) { 1408 pr_warn("%s: caif_hsi device registration failed\n", __func__); 1409 goto err; 1410 } 1411 /* Add CAIF HSI device to list. */ 1412 list_add_tail(&cfhsi->list, &cfhsi_list); 1413 1414 return 0; 1415err: 1416 symbol_put(cfhsi_get_ops); 1417 return -ENODEV; 1418} 1419 1420static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = { 1421 .kind = "cfhsi", 1422 .priv_size = sizeof(struct cfhsi), 1423 .setup = cfhsi_setup, 1424 .maxtype = __IFLA_CAIF_HSI_MAX, 1425 .policy = caif_hsi_policy, 1426 .newlink = caif_hsi_newlink, 1427 .changelink = caif_hsi_changelink, 1428 .get_size = caif_hsi_get_size, 1429 .fill_info = caif_hsi_fill_info, 1430}; 1431 1432static void __exit cfhsi_exit_module(void) 1433{ 1434 struct list_head *list_node; 1435 struct list_head *n; 1436 struct cfhsi *cfhsi; 1437 1438 rtnl_link_unregister(&caif_hsi_link_ops); 1439 1440 rtnl_lock(); 1441 list_for_each_safe(list_node, n, &cfhsi_list) { 1442 cfhsi = list_entry(list_node, struct cfhsi, list); 1443 unregister_netdevice(cfhsi->ndev); 1444 } 1445 rtnl_unlock(); 1446} 1447 1448static int __init cfhsi_init_module(void) 1449{ 1450 return rtnl_link_register(&caif_hsi_link_ops); 1451} 1452 1453module_init(cfhsi_init_module); 1454module_exit(cfhsi_exit_module); 1455