1// SPDX-License-Identifier: ISC 2/* 3 * Copyright (c) 2014 Broadcom Corporation 4 */ 5 6/******************************************************************************* 7 * Communicates with the dongle by using dcmd codes. 8 * For certain dcmd codes, the dongle interprets string data from the host. 9 ******************************************************************************/ 10 11#include <linux/types.h> 12#include <linux/netdevice.h> 13#include <linux/etherdevice.h> 14 15#include <brcmu_utils.h> 16#include <brcmu_wifi.h> 17 18#include "core.h" 19#include "debug.h" 20#include "proto.h" 21#include "msgbuf.h" 22#include "commonring.h" 23#include "flowring.h" 24#include "bus.h" 25#include "tracepoint.h" 26 27 28#define MSGBUF_IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000) 29 30#define MSGBUF_TYPE_GEN_STATUS 0x1 31#define MSGBUF_TYPE_RING_STATUS 0x2 32#define MSGBUF_TYPE_FLOW_RING_CREATE 0x3 33#define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4 34#define MSGBUF_TYPE_FLOW_RING_DELETE 0x5 35#define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6 36#define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7 37#define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8 38#define MSGBUF_TYPE_IOCTLPTR_REQ 0x9 39#define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA 40#define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB 41#define MSGBUF_TYPE_IOCTL_CMPLT 0xC 42#define MSGBUF_TYPE_EVENT_BUF_POST 0xD 43#define MSGBUF_TYPE_WL_EVENT 0xE 44#define MSGBUF_TYPE_TX_POST 0xF 45#define MSGBUF_TYPE_TX_STATUS 0x10 46#define MSGBUF_TYPE_RXBUF_POST 0x11 47#define MSGBUF_TYPE_RX_CMPLT 0x12 48#define MSGBUF_TYPE_LPBK_DMAXFER 0x13 49#define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14 50 51#define NR_TX_PKTIDS 2048 52#define NR_RX_PKTIDS 1024 53 54#define BRCMF_IOCTL_REQ_PKTID 0xFFFE 55 56#define BRCMF_MSGBUF_MAX_PKT_SIZE 2048 57#define BRCMF_MSGBUF_MAX_CTL_PKT_SIZE 8192 58#define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32 59#define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8 60#define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8 61 62#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01 63#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02 64#define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07 65#define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5 66 67#define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 68#define BRCMF_MSGBUF_TX_FLUSH_CNT2 96 69 70#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96 71#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32 72#define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48 73 74 75struct msgbuf_common_hdr { 76 u8 msgtype; 77 u8 ifidx; 78 u8 flags; 79 u8 rsvd0; 80 __le32 request_id; 81}; 82 83struct msgbuf_ioctl_req_hdr { 84 struct msgbuf_common_hdr msg; 85 __le32 cmd; 86 __le16 trans_id; 87 __le16 input_buf_len; 88 __le16 output_buf_len; 89 __le16 rsvd0[3]; 90 struct msgbuf_buf_addr req_buf_addr; 91 __le32 rsvd1[2]; 92}; 93 94struct msgbuf_tx_msghdr { 95 struct msgbuf_common_hdr msg; 96 u8 txhdr[ETH_HLEN]; 97 u8 flags; 98 u8 seg_cnt; 99 struct msgbuf_buf_addr metadata_buf_addr; 100 struct msgbuf_buf_addr data_buf_addr; 101 __le16 metadata_buf_len; 102 __le16 data_len; 103 __le32 rsvd0; 104}; 105 106struct msgbuf_rx_bufpost { 107 struct msgbuf_common_hdr msg; 108 __le16 metadata_buf_len; 109 __le16 data_buf_len; 110 __le32 rsvd0; 111 struct msgbuf_buf_addr metadata_buf_addr; 112 struct msgbuf_buf_addr data_buf_addr; 113}; 114 115struct msgbuf_rx_ioctl_resp_or_event { 116 struct msgbuf_common_hdr msg; 117 __le16 host_buf_len; 118 __le16 rsvd0[3]; 119 struct msgbuf_buf_addr host_buf_addr; 120 __le32 rsvd1[4]; 121}; 122 123struct msgbuf_completion_hdr { 124 __le16 status; 125 __le16 flow_ring_id; 126}; 127 128/* Data struct for the MSGBUF_TYPE_GEN_STATUS */ 129struct msgbuf_gen_status { 130 struct msgbuf_common_hdr msg; 131 struct msgbuf_completion_hdr compl_hdr; 132 __le16 write_idx; 133 __le32 rsvd0[3]; 134}; 135 136/* Data struct for the MSGBUF_TYPE_RING_STATUS */ 137struct msgbuf_ring_status { 138 struct msgbuf_common_hdr msg; 139 struct msgbuf_completion_hdr compl_hdr; 140 __le16 write_idx; 141 __le16 rsvd0[5]; 142}; 143 144struct msgbuf_rx_event { 145 struct msgbuf_common_hdr msg; 146 struct msgbuf_completion_hdr compl_hdr; 147 __le16 event_data_len; 148 __le16 seqnum; 149 __le16 rsvd0[4]; 150}; 151 152struct msgbuf_ioctl_resp_hdr { 153 struct msgbuf_common_hdr msg; 154 struct msgbuf_completion_hdr compl_hdr; 155 __le16 resp_len; 156 __le16 trans_id; 157 __le32 cmd; 158 __le32 rsvd0; 159}; 160 161struct msgbuf_tx_status { 162 struct msgbuf_common_hdr msg; 163 struct msgbuf_completion_hdr compl_hdr; 164 __le16 metadata_len; 165 __le16 tx_status; 166}; 167 168struct msgbuf_rx_complete { 169 struct msgbuf_common_hdr msg; 170 struct msgbuf_completion_hdr compl_hdr; 171 __le16 metadata_len; 172 __le16 data_len; 173 __le16 data_offset; 174 __le16 flags; 175 __le32 rx_status_0; 176 __le32 rx_status_1; 177 __le32 rsvd0; 178}; 179 180struct msgbuf_tx_flowring_create_req { 181 struct msgbuf_common_hdr msg; 182 u8 da[ETH_ALEN]; 183 u8 sa[ETH_ALEN]; 184 u8 tid; 185 u8 if_flags; 186 __le16 flow_ring_id; 187 u8 tc; 188 u8 priority; 189 __le16 int_vector; 190 __le16 max_items; 191 __le16 len_item; 192 struct msgbuf_buf_addr flow_ring_addr; 193}; 194 195struct msgbuf_tx_flowring_delete_req { 196 struct msgbuf_common_hdr msg; 197 __le16 flow_ring_id; 198 __le16 reason; 199 __le32 rsvd0[7]; 200}; 201 202struct msgbuf_flowring_create_resp { 203 struct msgbuf_common_hdr msg; 204 struct msgbuf_completion_hdr compl_hdr; 205 __le32 rsvd0[3]; 206}; 207 208struct msgbuf_flowring_delete_resp { 209 struct msgbuf_common_hdr msg; 210 struct msgbuf_completion_hdr compl_hdr; 211 __le32 rsvd0[3]; 212}; 213 214struct msgbuf_flowring_flush_resp { 215 struct msgbuf_common_hdr msg; 216 struct msgbuf_completion_hdr compl_hdr; 217 __le32 rsvd0[3]; 218}; 219 220struct brcmf_msgbuf_work_item { 221 struct list_head queue; 222 u32 flowid; 223 int ifidx; 224 u8 sa[ETH_ALEN]; 225 u8 da[ETH_ALEN]; 226}; 227 228struct brcmf_msgbuf { 229 struct brcmf_pub *drvr; 230 231 struct brcmf_commonring **commonrings; 232 struct brcmf_commonring **flowrings; 233 dma_addr_t *flowring_dma_handle; 234 235 u16 max_flowrings; 236 u16 max_submissionrings; 237 u16 max_completionrings; 238 239 u16 rx_dataoffset; 240 u32 max_rxbufpost; 241 u16 rx_metadata_offset; 242 u32 rxbufpost; 243 244 u32 max_ioctlrespbuf; 245 u32 cur_ioctlrespbuf; 246 u32 max_eventbuf; 247 u32 cur_eventbuf; 248 249 void *ioctbuf; 250 dma_addr_t ioctbuf_handle; 251 u32 ioctbuf_phys_hi; 252 u32 ioctbuf_phys_lo; 253 int ioctl_resp_status; 254 u32 ioctl_resp_ret_len; 255 u32 ioctl_resp_pktid; 256 257 u16 data_seq_no; 258 u16 ioctl_seq_no; 259 u32 reqid; 260 wait_queue_head_t ioctl_resp_wait; 261 bool ctl_completed; 262 263 struct brcmf_msgbuf_pktids *tx_pktids; 264 struct brcmf_msgbuf_pktids *rx_pktids; 265 struct brcmf_flowring *flow; 266 267 struct workqueue_struct *txflow_wq; 268 struct work_struct txflow_work; 269 unsigned long *flow_map; 270 unsigned long *txstatus_done_map; 271 272 struct work_struct flowring_work; 273 spinlock_t flowring_work_lock; 274 struct list_head work_queue; 275}; 276 277struct brcmf_msgbuf_pktid { 278 atomic_t allocated; 279 u16 data_offset; 280 struct sk_buff *skb; 281 dma_addr_t physaddr; 282}; 283 284struct brcmf_msgbuf_pktids { 285 u32 array_size; 286 u32 last_allocated_idx; 287 enum dma_data_direction direction; 288 struct brcmf_msgbuf_pktid *array; 289}; 290 291static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf); 292 293 294static struct brcmf_msgbuf_pktids * 295brcmf_msgbuf_init_pktids(u32 nr_array_entries, 296 enum dma_data_direction direction) 297{ 298 struct brcmf_msgbuf_pktid *array; 299 struct brcmf_msgbuf_pktids *pktids; 300 301 array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL); 302 if (!array) 303 return NULL; 304 305 pktids = kzalloc(sizeof(*pktids), GFP_KERNEL); 306 if (!pktids) { 307 kfree(array); 308 return NULL; 309 } 310 pktids->array = array; 311 pktids->array_size = nr_array_entries; 312 313 return pktids; 314} 315 316 317static int 318brcmf_msgbuf_alloc_pktid(struct device *dev, 319 struct brcmf_msgbuf_pktids *pktids, 320 struct sk_buff *skb, u16 data_offset, 321 dma_addr_t *physaddr, u32 *idx) 322{ 323 struct brcmf_msgbuf_pktid *array; 324 u32 count; 325 326 array = pktids->array; 327 328 *physaddr = dma_map_single(dev, skb->data + data_offset, 329 skb->len - data_offset, pktids->direction); 330 331 if (dma_mapping_error(dev, *physaddr)) { 332 brcmf_err("dma_map_single failed !!\n"); 333 return -ENOMEM; 334 } 335 336 *idx = pktids->last_allocated_idx; 337 338 count = 0; 339 do { 340 (*idx)++; 341 if (*idx == pktids->array_size) 342 *idx = 0; 343 if (array[*idx].allocated.counter == 0) 344 if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0) 345 break; 346 count++; 347 } while (count < pktids->array_size); 348 349 if (count == pktids->array_size) { 350 dma_unmap_single(dev, *physaddr, skb->len - data_offset, 351 pktids->direction); 352 return -ENOMEM; 353 } 354 355 array[*idx].data_offset = data_offset; 356 array[*idx].physaddr = *physaddr; 357 array[*idx].skb = skb; 358 359 pktids->last_allocated_idx = *idx; 360 361 return 0; 362} 363 364 365static struct sk_buff * 366brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids, 367 u32 idx) 368{ 369 struct brcmf_msgbuf_pktid *pktid; 370 struct sk_buff *skb; 371 372 if (idx >= pktids->array_size) { 373 brcmf_err("Invalid packet id %d (max %d)\n", idx, 374 pktids->array_size); 375 return NULL; 376 } 377 if (pktids->array[idx].allocated.counter) { 378 pktid = &pktids->array[idx]; 379 dma_unmap_single(dev, pktid->physaddr, 380 pktid->skb->len - pktid->data_offset, 381 pktids->direction); 382 skb = pktid->skb; 383 pktid->allocated.counter = 0; 384 return skb; 385 } else { 386 brcmf_err("Invalid packet id %d (not in use)\n", idx); 387 } 388 389 return NULL; 390} 391 392 393static void 394brcmf_msgbuf_release_array(struct device *dev, 395 struct brcmf_msgbuf_pktids *pktids) 396{ 397 struct brcmf_msgbuf_pktid *array; 398 struct brcmf_msgbuf_pktid *pktid; 399 u32 count; 400 401 array = pktids->array; 402 count = 0; 403 do { 404 if (array[count].allocated.counter) { 405 pktid = &array[count]; 406 dma_unmap_single(dev, pktid->physaddr, 407 pktid->skb->len - pktid->data_offset, 408 pktids->direction); 409 brcmu_pkt_buf_free_skb(pktid->skb); 410 } 411 count++; 412 } while (count < pktids->array_size); 413 414 kfree(array); 415 kfree(pktids); 416} 417 418 419static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf) 420{ 421 if (msgbuf->rx_pktids) 422 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 423 msgbuf->rx_pktids); 424 if (msgbuf->tx_pktids) 425 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 426 msgbuf->tx_pktids); 427} 428 429 430static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx, 431 uint cmd, void *buf, uint len) 432{ 433 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 434 struct brcmf_commonring *commonring; 435 struct msgbuf_ioctl_req_hdr *request; 436 u16 buf_len; 437 void *ret_ptr; 438 int err; 439 440 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 441 brcmf_commonring_lock(commonring); 442 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 443 if (!ret_ptr) { 444 bphy_err(drvr, "Failed to reserve space in commonring\n"); 445 brcmf_commonring_unlock(commonring); 446 return -ENOMEM; 447 } 448 449 msgbuf->reqid++; 450 451 request = (struct msgbuf_ioctl_req_hdr *)ret_ptr; 452 request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ; 453 request->msg.ifidx = (u8)ifidx; 454 request->msg.flags = 0; 455 request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID); 456 request->cmd = cpu_to_le32(cmd); 457 request->output_buf_len = cpu_to_le16(len); 458 request->trans_id = cpu_to_le16(msgbuf->reqid); 459 460 buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE); 461 request->input_buf_len = cpu_to_le16(buf_len); 462 request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi); 463 request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo); 464 if (buf) 465 memcpy(msgbuf->ioctbuf, buf, buf_len); 466 else 467 memset(msgbuf->ioctbuf, 0, buf_len); 468 469 err = brcmf_commonring_write_complete(commonring); 470 brcmf_commonring_unlock(commonring); 471 472 return err; 473} 474 475 476static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf) 477{ 478 return wait_event_timeout(msgbuf->ioctl_resp_wait, 479 msgbuf->ctl_completed, 480 MSGBUF_IOCTL_RESP_TIMEOUT); 481} 482 483 484static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf) 485{ 486 msgbuf->ctl_completed = true; 487 wake_up(&msgbuf->ioctl_resp_wait); 488} 489 490 491static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, 492 uint cmd, void *buf, uint len, int *fwerr) 493{ 494 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 495 struct sk_buff *skb = NULL; 496 int timeout; 497 int err; 498 499 brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len); 500 *fwerr = 0; 501 msgbuf->ctl_completed = false; 502 err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len); 503 if (err) 504 return err; 505 506 timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf); 507 if (!timeout) { 508 bphy_err(drvr, "Timeout on response for query command\n"); 509 return -EIO; 510 } 511 512 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 513 msgbuf->rx_pktids, 514 msgbuf->ioctl_resp_pktid); 515 if (msgbuf->ioctl_resp_ret_len != 0) { 516 if (!skb) 517 return -EBADF; 518 519 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? 520 len : msgbuf->ioctl_resp_ret_len); 521 } 522 brcmu_pkt_buf_free_skb(skb); 523 524 *fwerr = msgbuf->ioctl_resp_status; 525 return 0; 526} 527 528 529static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx, 530 uint cmd, void *buf, uint len, int *fwerr) 531{ 532 return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr); 533} 534 535 536static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws, 537 struct sk_buff *skb, struct brcmf_if **ifp) 538{ 539 return -ENODEV; 540} 541 542static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, 543 bool inirq) 544{ 545} 546 547static void 548brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid) 549{ 550 u32 dma_sz; 551 void *dma_buf; 552 553 brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid); 554 555 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 556 dma_buf = msgbuf->flowrings[flowid]->buf_addr; 557 dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf, 558 msgbuf->flowring_dma_handle[flowid]); 559 560 brcmf_flowring_delete(msgbuf->flow, flowid); 561} 562 563 564static struct brcmf_msgbuf_work_item * 565brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf) 566{ 567 struct brcmf_msgbuf_work_item *work = NULL; 568 ulong flags; 569 570 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 571 if (!list_empty(&msgbuf->work_queue)) { 572 work = list_first_entry(&msgbuf->work_queue, 573 struct brcmf_msgbuf_work_item, queue); 574 list_del(&work->queue); 575 } 576 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 577 578 return work; 579} 580 581 582static u32 583brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf, 584 struct brcmf_msgbuf_work_item *work) 585{ 586 struct brcmf_pub *drvr = msgbuf->drvr; 587 struct msgbuf_tx_flowring_create_req *create; 588 struct brcmf_commonring *commonring; 589 void *ret_ptr; 590 u32 flowid; 591 void *dma_buf; 592 u32 dma_sz; 593 u64 address; 594 int err; 595 596 flowid = work->flowid; 597 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 598 dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz, 599 &msgbuf->flowring_dma_handle[flowid], 600 GFP_KERNEL); 601 if (!dma_buf) { 602 bphy_err(drvr, "dma_alloc_coherent failed\n"); 603 brcmf_flowring_delete(msgbuf->flow, flowid); 604 return BRCMF_FLOWRING_INVALID_ID; 605 } 606 607 brcmf_commonring_config(msgbuf->flowrings[flowid], 608 BRCMF_H2D_TXFLOWRING_MAX_ITEM, 609 BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf); 610 611 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 612 brcmf_commonring_lock(commonring); 613 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 614 if (!ret_ptr) { 615 bphy_err(drvr, "Failed to reserve space in commonring\n"); 616 brcmf_commonring_unlock(commonring); 617 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 618 return BRCMF_FLOWRING_INVALID_ID; 619 } 620 621 create = (struct msgbuf_tx_flowring_create_req *)ret_ptr; 622 create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE; 623 create->msg.ifidx = work->ifidx; 624 create->msg.request_id = 0; 625 create->tid = brcmf_flowring_tid(msgbuf->flow, flowid); 626 create->flow_ring_id = cpu_to_le16(flowid + 627 BRCMF_H2D_MSGRING_FLOWRING_IDSTART); 628 memcpy(create->sa, work->sa, ETH_ALEN); 629 memcpy(create->da, work->da, ETH_ALEN); 630 address = (u64)msgbuf->flowring_dma_handle[flowid]; 631 create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32); 632 create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff); 633 create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM); 634 create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE); 635 636 brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n", 637 flowid, work->da, create->tid, work->ifidx); 638 639 err = brcmf_commonring_write_complete(commonring); 640 brcmf_commonring_unlock(commonring); 641 if (err) { 642 bphy_err(drvr, "Failed to write commonring\n"); 643 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 644 return BRCMF_FLOWRING_INVALID_ID; 645 } 646 647 return flowid; 648} 649 650 651static void brcmf_msgbuf_flowring_worker(struct work_struct *work) 652{ 653 struct brcmf_msgbuf *msgbuf; 654 struct brcmf_msgbuf_work_item *create; 655 656 msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work); 657 658 while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) { 659 brcmf_msgbuf_flowring_create_worker(msgbuf, create); 660 kfree(create); 661 } 662} 663 664 665static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, 666 struct sk_buff *skb) 667{ 668 struct brcmf_msgbuf_work_item *create; 669 struct ethhdr *eh = (struct ethhdr *)(skb->data); 670 u32 flowid; 671 ulong flags; 672 673 create = kzalloc(sizeof(*create), GFP_ATOMIC); 674 if (create == NULL) 675 return BRCMF_FLOWRING_INVALID_ID; 676 677 flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest, 678 skb->priority, ifidx); 679 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 680 kfree(create); 681 return flowid; 682 } 683 684 create->flowid = flowid; 685 create->ifidx = ifidx; 686 memcpy(create->sa, eh->h_source, ETH_ALEN); 687 memcpy(create->da, eh->h_dest, ETH_ALEN); 688 689 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 690 list_add_tail(&create->queue, &msgbuf->work_queue); 691 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 692 schedule_work(&msgbuf->flowring_work); 693 694 return flowid; 695} 696 697 698static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid) 699{ 700 struct brcmf_flowring *flow = msgbuf->flow; 701 struct brcmf_pub *drvr = msgbuf->drvr; 702 struct brcmf_commonring *commonring; 703 void *ret_ptr; 704 u32 count; 705 struct sk_buff *skb; 706 dma_addr_t physaddr; 707 u32 pktid; 708 struct msgbuf_tx_msghdr *tx_msghdr; 709 u64 address; 710 711 commonring = msgbuf->flowrings[flowid]; 712 if (!brcmf_commonring_write_available(commonring)) 713 return; 714 715 brcmf_commonring_lock(commonring); 716 717 count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1; 718 while (brcmf_flowring_qlen(flow, flowid)) { 719 skb = brcmf_flowring_dequeue(flow, flowid); 720 if (skb == NULL) { 721 bphy_err(drvr, "No SKB, but qlen %d\n", 722 brcmf_flowring_qlen(flow, flowid)); 723 break; 724 } 725 skb_orphan(skb); 726 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 727 msgbuf->tx_pktids, skb, ETH_HLEN, 728 &physaddr, &pktid)) { 729 brcmf_flowring_reinsert(flow, flowid, skb); 730 bphy_err(drvr, "No PKTID available !!\n"); 731 break; 732 } 733 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 734 if (!ret_ptr) { 735 brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 736 msgbuf->tx_pktids, pktid); 737 brcmf_flowring_reinsert(flow, flowid, skb); 738 break; 739 } 740 count++; 741 742 tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr; 743 744 tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST; 745 tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1); 746 tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid); 747 tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3; 748 tx_msghdr->flags |= (skb->priority & 0x07) << 749 BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT; 750 tx_msghdr->seg_cnt = 1; 751 memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN); 752 tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN); 753 address = (u64)physaddr; 754 tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32); 755 tx_msghdr->data_buf_addr.low_addr = 756 cpu_to_le32(address & 0xffffffff); 757 tx_msghdr->metadata_buf_len = 0; 758 tx_msghdr->metadata_buf_addr.high_addr = 0; 759 tx_msghdr->metadata_buf_addr.low_addr = 0; 760 atomic_inc(&commonring->outstanding_tx); 761 if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) { 762 brcmf_commonring_write_complete(commonring); 763 count = 0; 764 } 765 } 766 if (count) 767 brcmf_commonring_write_complete(commonring); 768 brcmf_commonring_unlock(commonring); 769} 770 771 772static void brcmf_msgbuf_txflow_worker(struct work_struct *worker) 773{ 774 struct brcmf_msgbuf *msgbuf; 775 u32 flowid; 776 777 msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work); 778 for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) { 779 clear_bit(flowid, msgbuf->flow_map); 780 brcmf_msgbuf_txflow(msgbuf, flowid); 781 } 782} 783 784 785static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid, 786 bool force) 787{ 788 struct brcmf_commonring *commonring; 789 790 set_bit(flowid, msgbuf->flow_map); 791 commonring = msgbuf->flowrings[flowid]; 792 if ((force) || (atomic_read(&commonring->outstanding_tx) < 793 BRCMF_MSGBUF_DELAY_TXWORKER_THRS)) 794 queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work); 795 796 return 0; 797} 798 799 800static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx, 801 struct sk_buff *skb) 802{ 803 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 804 struct brcmf_flowring *flow = msgbuf->flow; 805 struct ethhdr *eh = (struct ethhdr *)(skb->data); 806 u32 flowid; 807 u32 queue_count; 808 bool force; 809 810 flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx); 811 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 812 flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb); 813 if (flowid == BRCMF_FLOWRING_INVALID_ID) 814 return -ENOMEM; 815 } 816 queue_count = brcmf_flowring_enqueue(flow, flowid, skb); 817 force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0); 818 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force); 819 820 return 0; 821} 822 823 824static void 825brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx, 826 enum proto_addr_mode addr_mode) 827{ 828 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 829 830 brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode); 831} 832 833 834static void 835brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 836{ 837 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 838 839 brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer); 840} 841 842 843static void 844brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 845{ 846 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 847 848 brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer); 849} 850 851 852static void 853brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf) 854{ 855 struct msgbuf_ioctl_resp_hdr *ioctl_resp; 856 857 ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf; 858 859 msgbuf->ioctl_resp_status = 860 (s16)le16_to_cpu(ioctl_resp->compl_hdr.status); 861 msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len); 862 msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id); 863 864 brcmf_msgbuf_ioctl_resp_wake(msgbuf); 865 866 if (msgbuf->cur_ioctlrespbuf) 867 msgbuf->cur_ioctlrespbuf--; 868 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 869} 870 871 872static void 873brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) 874{ 875 struct brcmf_commonring *commonring; 876 struct msgbuf_tx_status *tx_status; 877 u32 idx; 878 struct sk_buff *skb; 879 u16 flowid; 880 881 tx_status = (struct msgbuf_tx_status *)buf; 882 idx = le32_to_cpu(tx_status->msg.request_id) - 1; 883 flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id); 884 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 885 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 886 msgbuf->tx_pktids, idx); 887 if (!skb) 888 return; 889 890 set_bit(flowid, msgbuf->txstatus_done_map); 891 commonring = msgbuf->flowrings[flowid]; 892 atomic_dec(&commonring->outstanding_tx); 893 894 brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx), 895 skb, true); 896} 897 898 899static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count) 900{ 901 struct brcmf_pub *drvr = msgbuf->drvr; 902 struct brcmf_commonring *commonring; 903 void *ret_ptr; 904 struct sk_buff *skb; 905 u16 alloced; 906 u32 pktlen; 907 dma_addr_t physaddr; 908 struct msgbuf_rx_bufpost *rx_bufpost; 909 u64 address; 910 u32 pktid; 911 u32 i; 912 913 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 914 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 915 count, 916 &alloced); 917 if (!ret_ptr) { 918 brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n"); 919 return 0; 920 } 921 922 for (i = 0; i < alloced; i++) { 923 rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr; 924 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 925 926 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); 927 928 if (skb == NULL) { 929 bphy_err(drvr, "Failed to alloc SKB\n"); 930 brcmf_commonring_write_cancel(commonring, alloced - i); 931 break; 932 } 933 934 pktlen = skb->len; 935 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 936 msgbuf->rx_pktids, skb, 0, 937 &physaddr, &pktid)) { 938 dev_kfree_skb_any(skb); 939 bphy_err(drvr, "No PKTID available !!\n"); 940 brcmf_commonring_write_cancel(commonring, alloced - i); 941 break; 942 } 943 944 if (msgbuf->rx_metadata_offset) { 945 address = (u64)physaddr; 946 rx_bufpost->metadata_buf_len = 947 cpu_to_le16(msgbuf->rx_metadata_offset); 948 rx_bufpost->metadata_buf_addr.high_addr = 949 cpu_to_le32(address >> 32); 950 rx_bufpost->metadata_buf_addr.low_addr = 951 cpu_to_le32(address & 0xffffffff); 952 953 skb_pull(skb, msgbuf->rx_metadata_offset); 954 pktlen = skb->len; 955 physaddr += msgbuf->rx_metadata_offset; 956 } 957 rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST; 958 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 959 960 address = (u64)physaddr; 961 rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen); 962 rx_bufpost->data_buf_addr.high_addr = 963 cpu_to_le32(address >> 32); 964 rx_bufpost->data_buf_addr.low_addr = 965 cpu_to_le32(address & 0xffffffff); 966 967 ret_ptr += brcmf_commonring_len_item(commonring); 968 } 969 970 if (i) 971 brcmf_commonring_write_complete(commonring); 972 973 return i; 974} 975 976 977static void 978brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf) 979{ 980 u32 fillbufs; 981 u32 retcount; 982 983 fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost; 984 985 while (fillbufs) { 986 retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs); 987 if (!retcount) 988 break; 989 msgbuf->rxbufpost += retcount; 990 fillbufs -= retcount; 991 } 992} 993 994 995static void 996brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt) 997{ 998 msgbuf->rxbufpost -= rxcnt; 999 if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost - 1000 BRCMF_MSGBUF_RXBUFPOST_THRESHOLD)) 1001 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 1002} 1003 1004 1005static u32 1006brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, 1007 u32 count) 1008{ 1009 struct brcmf_pub *drvr = msgbuf->drvr; 1010 struct brcmf_commonring *commonring; 1011 void *ret_ptr; 1012 struct sk_buff *skb; 1013 u16 alloced; 1014 u32 pktlen; 1015 dma_addr_t physaddr; 1016 struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost; 1017 u64 address; 1018 u32 pktid; 1019 u32 i; 1020 1021 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1022 brcmf_commonring_lock(commonring); 1023 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 1024 count, 1025 &alloced); 1026 if (!ret_ptr) { 1027 bphy_err(drvr, "Failed to reserve space in commonring\n"); 1028 brcmf_commonring_unlock(commonring); 1029 return 0; 1030 } 1031 1032 for (i = 0; i < alloced; i++) { 1033 rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr; 1034 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 1035 1036 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_CTL_PKT_SIZE); 1037 1038 if (skb == NULL) { 1039 bphy_err(drvr, "Failed to alloc SKB\n"); 1040 brcmf_commonring_write_cancel(commonring, alloced - i); 1041 break; 1042 } 1043 1044 pktlen = skb->len; 1045 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 1046 msgbuf->rx_pktids, skb, 0, 1047 &physaddr, &pktid)) { 1048 dev_kfree_skb_any(skb); 1049 bphy_err(drvr, "No PKTID available !!\n"); 1050 brcmf_commonring_write_cancel(commonring, alloced - i); 1051 break; 1052 } 1053 if (event_buf) 1054 rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST; 1055 else 1056 rx_bufpost->msg.msgtype = 1057 MSGBUF_TYPE_IOCTLRESP_BUF_POST; 1058 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 1059 1060 address = (u64)physaddr; 1061 rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen); 1062 rx_bufpost->host_buf_addr.high_addr = 1063 cpu_to_le32(address >> 32); 1064 rx_bufpost->host_buf_addr.low_addr = 1065 cpu_to_le32(address & 0xffffffff); 1066 1067 ret_ptr += brcmf_commonring_len_item(commonring); 1068 } 1069 1070 if (i) 1071 brcmf_commonring_write_complete(commonring); 1072 1073 brcmf_commonring_unlock(commonring); 1074 1075 return i; 1076} 1077 1078 1079static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf) 1080{ 1081 u32 count; 1082 1083 count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf; 1084 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count); 1085 msgbuf->cur_ioctlrespbuf += count; 1086} 1087 1088 1089static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf) 1090{ 1091 u32 count; 1092 1093 count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf; 1094 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count); 1095 msgbuf->cur_eventbuf += count; 1096} 1097 1098 1099static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) 1100{ 1101 struct brcmf_pub *drvr = msgbuf->drvr; 1102 struct msgbuf_rx_event *event; 1103 u32 idx; 1104 u16 buflen; 1105 struct sk_buff *skb; 1106 struct brcmf_if *ifp; 1107 1108 event = (struct msgbuf_rx_event *)buf; 1109 idx = le32_to_cpu(event->msg.request_id); 1110 buflen = le16_to_cpu(event->event_data_len); 1111 1112 if (msgbuf->cur_eventbuf) 1113 msgbuf->cur_eventbuf--; 1114 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1115 1116 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1117 msgbuf->rx_pktids, idx); 1118 if (!skb) 1119 return; 1120 1121 if (msgbuf->rx_dataoffset) 1122 skb_pull(skb, msgbuf->rx_dataoffset); 1123 1124 skb_trim(skb, buflen); 1125 1126 ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx); 1127 if (!ifp || !ifp->ndev) { 1128 bphy_err(drvr, "Received pkt for invalid ifidx %d\n", 1129 event->msg.ifidx); 1130 goto exit; 1131 } 1132 1133 skb->protocol = eth_type_trans(skb, ifp->ndev); 1134 1135 brcmf_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL); 1136 1137exit: 1138 brcmu_pkt_buf_free_skb(skb); 1139} 1140 1141 1142static void 1143brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) 1144{ 1145 struct brcmf_pub *drvr = msgbuf->drvr; 1146 struct msgbuf_rx_complete *rx_complete; 1147 struct sk_buff *skb; 1148 u16 data_offset; 1149 u16 buflen; 1150 u16 flags; 1151 u32 idx; 1152 struct brcmf_if *ifp; 1153 1154 brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1); 1155 1156 rx_complete = (struct msgbuf_rx_complete *)buf; 1157 data_offset = le16_to_cpu(rx_complete->data_offset); 1158 buflen = le16_to_cpu(rx_complete->data_len); 1159 idx = le32_to_cpu(rx_complete->msg.request_id); 1160 flags = le16_to_cpu(rx_complete->flags); 1161 1162 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1163 msgbuf->rx_pktids, idx); 1164 if (!skb) 1165 return; 1166 1167 if (data_offset) 1168 skb_pull(skb, data_offset); 1169 else if (msgbuf->rx_dataoffset) 1170 skb_pull(skb, msgbuf->rx_dataoffset); 1171 1172 skb_trim(skb, buflen); 1173 1174 if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) == 1175 BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) { 1176 ifp = msgbuf->drvr->mon_if; 1177 1178 if (!ifp) { 1179 bphy_err(drvr, "Received unexpected monitor pkt\n"); 1180 brcmu_pkt_buf_free_skb(skb); 1181 return; 1182 } 1183 1184 brcmf_netif_mon_rx(ifp, skb); 1185 return; 1186 } 1187 1188 ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx); 1189 if (!ifp || !ifp->ndev) { 1190 bphy_err(drvr, "Received pkt for invalid ifidx %d\n", 1191 rx_complete->msg.ifidx); 1192 brcmu_pkt_buf_free_skb(skb); 1193 return; 1194 } 1195 1196 skb->protocol = eth_type_trans(skb, ifp->ndev); 1197 brcmf_netif_rx(ifp, skb, false); 1198} 1199 1200static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf, 1201 void *buf) 1202{ 1203 struct msgbuf_gen_status *gen_status = buf; 1204 struct brcmf_pub *drvr = msgbuf->drvr; 1205 int err; 1206 1207 err = le16_to_cpu(gen_status->compl_hdr.status); 1208 if (err) 1209 bphy_err(drvr, "Firmware reported general error: %d\n", err); 1210} 1211 1212static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf, 1213 void *buf) 1214{ 1215 struct msgbuf_ring_status *ring_status = buf; 1216 struct brcmf_pub *drvr = msgbuf->drvr; 1217 int err; 1218 1219 err = le16_to_cpu(ring_status->compl_hdr.status); 1220 if (err) { 1221 int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id); 1222 1223 bphy_err(drvr, "Firmware reported ring %d error: %d\n", ring, 1224 err); 1225 } 1226} 1227 1228static void 1229brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf, 1230 void *buf) 1231{ 1232 struct brcmf_pub *drvr = msgbuf->drvr; 1233 struct msgbuf_flowring_create_resp *flowring_create_resp; 1234 u16 status; 1235 u16 flowid; 1236 1237 flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf; 1238 1239 flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id); 1240 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 1241 status = le16_to_cpu(flowring_create_resp->compl_hdr.status); 1242 1243 if (status) { 1244 bphy_err(drvr, "Flowring creation failed, code %d\n", status); 1245 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1246 return; 1247 } 1248 brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid, 1249 status); 1250 1251 brcmf_flowring_open(msgbuf->flow, flowid); 1252 1253 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1254} 1255 1256 1257static void 1258brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf, 1259 void *buf) 1260{ 1261 struct brcmf_pub *drvr = msgbuf->drvr; 1262 struct msgbuf_flowring_delete_resp *flowring_delete_resp; 1263 u16 status; 1264 u16 flowid; 1265 1266 flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf; 1267 1268 flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id); 1269 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 1270 status = le16_to_cpu(flowring_delete_resp->compl_hdr.status); 1271 1272 if (status) { 1273 bphy_err(drvr, "Flowring deletion failed, code %d\n", status); 1274 brcmf_flowring_delete(msgbuf->flow, flowid); 1275 return; 1276 } 1277 brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid, 1278 status); 1279 1280 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1281} 1282 1283 1284static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf) 1285{ 1286 struct brcmf_pub *drvr = msgbuf->drvr; 1287 struct msgbuf_common_hdr *msg; 1288 1289 msg = (struct msgbuf_common_hdr *)buf; 1290 switch (msg->msgtype) { 1291 case MSGBUF_TYPE_GEN_STATUS: 1292 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n"); 1293 brcmf_msgbuf_process_gen_status(msgbuf, buf); 1294 break; 1295 case MSGBUF_TYPE_RING_STATUS: 1296 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n"); 1297 brcmf_msgbuf_process_ring_status(msgbuf, buf); 1298 break; 1299 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT: 1300 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n"); 1301 brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf); 1302 break; 1303 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT: 1304 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n"); 1305 brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf); 1306 break; 1307 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK: 1308 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n"); 1309 break; 1310 case MSGBUF_TYPE_IOCTL_CMPLT: 1311 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n"); 1312 brcmf_msgbuf_process_ioctl_complete(msgbuf, buf); 1313 break; 1314 case MSGBUF_TYPE_WL_EVENT: 1315 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n"); 1316 brcmf_msgbuf_process_event(msgbuf, buf); 1317 break; 1318 case MSGBUF_TYPE_TX_STATUS: 1319 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n"); 1320 brcmf_msgbuf_process_txstatus(msgbuf, buf); 1321 break; 1322 case MSGBUF_TYPE_RX_CMPLT: 1323 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n"); 1324 brcmf_msgbuf_process_rx_complete(msgbuf, buf); 1325 break; 1326 default: 1327 bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype); 1328 break; 1329 } 1330} 1331 1332 1333static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf, 1334 struct brcmf_commonring *commonring) 1335{ 1336 void *buf; 1337 u16 count; 1338 u16 processed; 1339 1340again: 1341 buf = brcmf_commonring_get_read_ptr(commonring, &count); 1342 if (buf == NULL) 1343 return; 1344 1345 processed = 0; 1346 while (count) { 1347 brcmf_msgbuf_process_msgtype(msgbuf, 1348 buf + msgbuf->rx_dataoffset); 1349 buf += brcmf_commonring_len_item(commonring); 1350 processed++; 1351 if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) { 1352 brcmf_commonring_read_complete(commonring, processed); 1353 processed = 0; 1354 } 1355 count--; 1356 } 1357 if (processed) 1358 brcmf_commonring_read_complete(commonring, processed); 1359 1360 if (commonring->r_ptr == 0) 1361 goto again; 1362} 1363 1364 1365int brcmf_proto_msgbuf_rx_trigger(struct device *dev) 1366{ 1367 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1368 struct brcmf_pub *drvr = bus_if->drvr; 1369 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1370 struct brcmf_commonring *commonring; 1371 void *buf; 1372 u32 flowid; 1373 int qlen; 1374 1375 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1376 brcmf_msgbuf_process_rx(msgbuf, buf); 1377 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1378 brcmf_msgbuf_process_rx(msgbuf, buf); 1379 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1380 brcmf_msgbuf_process_rx(msgbuf, buf); 1381 1382 for_each_set_bit(flowid, msgbuf->txstatus_done_map, 1383 msgbuf->max_flowrings) { 1384 clear_bit(flowid, msgbuf->txstatus_done_map); 1385 commonring = msgbuf->flowrings[flowid]; 1386 qlen = brcmf_flowring_qlen(msgbuf->flow, flowid); 1387 if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) || 1388 ((qlen) && (atomic_read(&commonring->outstanding_tx) < 1389 BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS))) 1390 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1391 } 1392 1393 return 0; 1394} 1395 1396 1397void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid) 1398{ 1399 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1400 struct msgbuf_tx_flowring_delete_req *delete; 1401 struct brcmf_commonring *commonring; 1402 void *ret_ptr; 1403 u8 ifidx; 1404 int err; 1405 1406 /* no need to submit if firmware can not be reached */ 1407 if (drvr->bus_if->state != BRCMF_BUS_UP) { 1408 brcmf_dbg(MSGBUF, "bus down, flowring will be removed\n"); 1409 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1410 return; 1411 } 1412 1413 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1414 brcmf_commonring_lock(commonring); 1415 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 1416 if (!ret_ptr) { 1417 bphy_err(drvr, "FW unaware, flowring will be removed !!\n"); 1418 brcmf_commonring_unlock(commonring); 1419 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1420 return; 1421 } 1422 1423 delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr; 1424 1425 ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid); 1426 1427 delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE; 1428 delete->msg.ifidx = ifidx; 1429 delete->msg.request_id = 0; 1430 1431 delete->flow_ring_id = cpu_to_le16(flowid + 1432 BRCMF_H2D_MSGRING_FLOWRING_IDSTART); 1433 delete->reason = 0; 1434 1435 brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n", 1436 flowid, ifidx); 1437 1438 err = brcmf_commonring_write_complete(commonring); 1439 brcmf_commonring_unlock(commonring); 1440 if (err) { 1441 bphy_err(drvr, "Failed to submit RING_DELETE, flowring will be removed\n"); 1442 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1443 } 1444} 1445 1446#ifdef DEBUG 1447static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1448{ 1449 struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); 1450 struct brcmf_pub *drvr = bus_if->drvr; 1451 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1452 struct brcmf_commonring *commonring; 1453 u16 i; 1454 struct brcmf_flowring_ring *ring; 1455 struct brcmf_flowring_hash *hash; 1456 1457 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1458 seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n", 1459 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1460 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 1461 seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n", 1462 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1463 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1464 seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n", 1465 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1466 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1467 seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n", 1468 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1469 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1470 seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n", 1471 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1472 1473 seq_printf(seq, "\nh2d_flowrings: depth %u\n", 1474 BRCMF_H2D_TXFLOWRING_MAX_ITEM); 1475 seq_puts(seq, "Active flowrings:\n"); 1476 for (i = 0; i < msgbuf->flow->nrofrings; i++) { 1477 if (!msgbuf->flow->rings[i]) 1478 continue; 1479 ring = msgbuf->flow->rings[i]; 1480 if (ring->status != RING_OPEN) 1481 continue; 1482 commonring = msgbuf->flowrings[i]; 1483 hash = &msgbuf->flow->hash[ring->hash_id]; 1484 seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n" 1485 " ifidx %u, fifo %u, da %pM\n", 1486 i, commonring->r_ptr, commonring->w_ptr, 1487 skb_queue_len(&ring->skblist), ring->blocked, 1488 hash->ifidx, hash->fifo, hash->mac); 1489 } 1490 1491 return 0; 1492} 1493#else 1494static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1495{ 1496 return 0; 1497} 1498#endif 1499 1500static void brcmf_msgbuf_debugfs_create(struct brcmf_pub *drvr) 1501{ 1502 brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read); 1503} 1504 1505int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) 1506{ 1507 struct brcmf_bus_msgbuf *if_msgbuf; 1508 struct brcmf_msgbuf *msgbuf; 1509 u64 address; 1510 u32 count; 1511 1512 if_msgbuf = drvr->bus_if->msgbuf; 1513 1514 if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) { 1515 bphy_err(drvr, "driver not configured for this many flowrings %d\n", 1516 if_msgbuf->max_flowrings); 1517 if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1; 1518 } 1519 1520 msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL); 1521 if (!msgbuf) 1522 goto fail; 1523 1524 msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow"); 1525 if (msgbuf->txflow_wq == NULL) { 1526 bphy_err(drvr, "workqueue creation failed\n"); 1527 goto fail; 1528 } 1529 INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker); 1530 count = BITS_TO_LONGS(if_msgbuf->max_flowrings); 1531 count = count * sizeof(unsigned long); 1532 msgbuf->flow_map = kzalloc(count, GFP_KERNEL); 1533 if (!msgbuf->flow_map) 1534 goto fail; 1535 1536 msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL); 1537 if (!msgbuf->txstatus_done_map) 1538 goto fail; 1539 1540 msgbuf->drvr = drvr; 1541 msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev, 1542 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1543 &msgbuf->ioctbuf_handle, 1544 GFP_KERNEL); 1545 if (!msgbuf->ioctbuf) 1546 goto fail; 1547 address = (u64)msgbuf->ioctbuf_handle; 1548 msgbuf->ioctbuf_phys_hi = address >> 32; 1549 msgbuf->ioctbuf_phys_lo = address & 0xffffffff; 1550 1551 drvr->proto->hdrpull = brcmf_msgbuf_hdrpull; 1552 drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd; 1553 drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd; 1554 drvr->proto->tx_queue_data = brcmf_msgbuf_tx_queue_data; 1555 drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode; 1556 drvr->proto->delete_peer = brcmf_msgbuf_delete_peer; 1557 drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer; 1558 drvr->proto->rxreorder = brcmf_msgbuf_rxreorder; 1559 drvr->proto->debugfs_create = brcmf_msgbuf_debugfs_create; 1560 drvr->proto->pd = msgbuf; 1561 1562 init_waitqueue_head(&msgbuf->ioctl_resp_wait); 1563 1564 msgbuf->commonrings = 1565 (struct brcmf_commonring **)if_msgbuf->commonrings; 1566 msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings; 1567 msgbuf->max_flowrings = if_msgbuf->max_flowrings; 1568 msgbuf->flowring_dma_handle = 1569 kcalloc(msgbuf->max_flowrings, 1570 sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL); 1571 if (!msgbuf->flowring_dma_handle) 1572 goto fail; 1573 1574 msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset; 1575 msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost; 1576 1577 msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST; 1578 msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST; 1579 1580 msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS, 1581 DMA_TO_DEVICE); 1582 if (!msgbuf->tx_pktids) 1583 goto fail; 1584 msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS, 1585 DMA_FROM_DEVICE); 1586 if (!msgbuf->rx_pktids) 1587 goto fail; 1588 1589 msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev, 1590 if_msgbuf->max_flowrings); 1591 if (!msgbuf->flow) 1592 goto fail; 1593 1594 1595 brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n", 1596 msgbuf->max_rxbufpost, msgbuf->max_eventbuf, 1597 msgbuf->max_ioctlrespbuf); 1598 count = 0; 1599 do { 1600 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 1601 if (msgbuf->max_rxbufpost != msgbuf->rxbufpost) 1602 msleep(10); 1603 else 1604 break; 1605 count++; 1606 } while (count < 10); 1607 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1608 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 1609 1610 INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker); 1611 spin_lock_init(&msgbuf->flowring_work_lock); 1612 INIT_LIST_HEAD(&msgbuf->work_queue); 1613 1614 return 0; 1615 1616fail: 1617 if (msgbuf) { 1618 kfree(msgbuf->flow_map); 1619 kfree(msgbuf->txstatus_done_map); 1620 brcmf_msgbuf_release_pktids(msgbuf); 1621 kfree(msgbuf->flowring_dma_handle); 1622 if (msgbuf->ioctbuf) 1623 dma_free_coherent(drvr->bus_if->dev, 1624 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1625 msgbuf->ioctbuf, 1626 msgbuf->ioctbuf_handle); 1627 if (msgbuf->txflow_wq) 1628 destroy_workqueue(msgbuf->txflow_wq); 1629 kfree(msgbuf); 1630 } 1631 return -ENOMEM; 1632} 1633 1634 1635void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) 1636{ 1637 struct brcmf_msgbuf *msgbuf; 1638 struct brcmf_msgbuf_work_item *work; 1639 1640 brcmf_dbg(TRACE, "Enter\n"); 1641 if (drvr->proto->pd) { 1642 msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1643 cancel_work_sync(&msgbuf->flowring_work); 1644 while (!list_empty(&msgbuf->work_queue)) { 1645 work = list_first_entry(&msgbuf->work_queue, 1646 struct brcmf_msgbuf_work_item, 1647 queue); 1648 list_del(&work->queue); 1649 kfree(work); 1650 } 1651 kfree(msgbuf->flow_map); 1652 kfree(msgbuf->txstatus_done_map); 1653 if (msgbuf->txflow_wq) 1654 destroy_workqueue(msgbuf->txflow_wq); 1655 1656 brcmf_flowring_detach(msgbuf->flow); 1657 dma_free_coherent(drvr->bus_if->dev, 1658 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1659 msgbuf->ioctbuf, msgbuf->ioctbuf_handle); 1660 brcmf_msgbuf_release_pktids(msgbuf); 1661 kfree(msgbuf->flowring_dma_handle); 1662 kfree(msgbuf); 1663 drvr->proto->pd = NULL; 1664 } 1665} 1666