1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Cadence USBSS DRD Driver - gadget side. 4 * 5 * Copyright (C) 2018-2019 Cadence Design Systems. 6 * Copyright (C) 2017-2018 NXP 7 * 8 * Authors: Pawel Jez <pjez@cadence.com>, 9 * Pawel Laszczak <pawell@cadence.com> 10 * Peter Chen <peter.chen@nxp.com> 11 */ 12 13/* 14 * Work around 1: 15 * At some situations, the controller may get stale data address in TRB 16 * at below sequences: 17 * 1. Controller read TRB includes data address 18 * 2. Software updates TRBs includes data address and Cycle bit 19 * 3. Controller read TRB which includes Cycle bit 20 * 4. DMA run with stale data address 21 * 22 * To fix this problem, driver needs to make the first TRB in TD as invalid. 23 * After preparing all TRBs driver needs to check the position of DMA and 24 * if the DMA point to the first just added TRB and doorbell is 1, 25 * then driver must defer making this TRB as valid. This TRB will be make 26 * as valid during adding next TRB only if DMA is stopped or at TRBERR 27 * interrupt. 28 * 29 * Issue has been fixed in DEV_VER_V3 version of controller. 30 * 31 * Work around 2: 32 * Controller for OUT endpoints has shared on-chip buffers for all incoming 33 * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA 34 * in correct order. If the first packet in the buffer will not be handled, 35 * then the following packets directed for other endpoints and functions 36 * will be blocked. 37 * Additionally the packets directed to one endpoint can block entire on-chip 38 * buffers. In this case transfer to other endpoints also will blocked. 39 * 40 * To resolve this issue after raising the descriptor missing interrupt 41 * driver prepares internal usb_request object and use it to arm DMA transfer. 42 * 43 * The problematic situation was observed in case when endpoint has been enabled 44 * but no usb_request were queued. Driver try detects such endpoints and will 45 * use this workaround only for these endpoint. 46 * 47 * Driver use limited number of buffer. This number can be set by macro 48 * CDNS3_WA2_NUM_BUFFERS. 49 * 50 * Such blocking situation was observed on ACM gadget. For this function 51 * host send OUT data packet but ACM function is not prepared for this packet. 52 * It's cause that buffer placed in on chip memory block transfer to other 53 * endpoints. 54 * 55 * Issue has been fixed in DEV_VER_V2 version of controller. 56 * 57 */ 58 59#include <linux/dma-mapping.h> 60#include <linux/usb/gadget.h> 61#include <linux/module.h> 62#include <linux/iopoll.h> 63 64#include "core.h" 65#include "gadget-export.h" 66#include "gadget.h" 67#include "trace.h" 68#include "drd.h" 69 70static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 71 struct usb_request *request, 72 gfp_t gfp_flags); 73 74static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 75 struct usb_request *request); 76 77static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 78 struct usb_request *request); 79 80/** 81 * cdns3_clear_register_bit - clear bit in given register. 82 * @ptr: address of device controller register to be read and changed 83 * @mask: bits requested to clar 84 */ 85static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask) 86{ 87 mask = readl(ptr) & ~mask; 88 writel(mask, ptr); 89} 90 91/** 92 * cdns3_set_register_bit - set bit in given register. 93 * @ptr: address of device controller register to be read and changed 94 * @mask: bits requested to set 95 */ 96void cdns3_set_register_bit(void __iomem *ptr, u32 mask) 97{ 98 mask = readl(ptr) | mask; 99 writel(mask, ptr); 100} 101 102/** 103 * cdns3_ep_addr_to_index - Macro converts endpoint address to 104 * index of endpoint object in cdns3_device.eps[] container 105 * @ep_addr: endpoint address for which endpoint object is required 106 * 107 */ 108u8 cdns3_ep_addr_to_index(u8 ep_addr) 109{ 110 return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0)); 111} 112 113static int cdns3_get_dma_pos(struct cdns3_device *priv_dev, 114 struct cdns3_endpoint *priv_ep) 115{ 116 int dma_index; 117 118 dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma; 119 120 return dma_index / TRB_SIZE; 121} 122 123/** 124 * cdns3_next_request - returns next request from list 125 * @list: list containing requests 126 * 127 * Returns request or NULL if no requests in list 128 */ 129struct usb_request *cdns3_next_request(struct list_head *list) 130{ 131 return list_first_entry_or_null(list, struct usb_request, list); 132} 133 134/** 135 * cdns3_next_align_buf - returns next buffer from list 136 * @list: list containing buffers 137 * 138 * Returns buffer or NULL if no buffers in list 139 */ 140static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) 141{ 142 return list_first_entry_or_null(list, struct cdns3_aligned_buf, list); 143} 144 145/** 146 * cdns3_next_priv_request - returns next request from list 147 * @list: list containing requests 148 * 149 * Returns request or NULL if no requests in list 150 */ 151static struct cdns3_request *cdns3_next_priv_request(struct list_head *list) 152{ 153 return list_first_entry_or_null(list, struct cdns3_request, list); 154} 155 156/** 157 * select_ep - selects endpoint 158 * @priv_dev: extended gadget object 159 * @ep: endpoint address 160 */ 161void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep) 162{ 163 if (priv_dev->selected_ep == ep) 164 return; 165 166 priv_dev->selected_ep = ep; 167 writel(ep, &priv_dev->regs->ep_sel); 168} 169 170/** 171 * cdns3_get_tdl - gets current tdl for selected endpoint. 172 * @priv_dev: extended gadget object 173 * 174 * Before calling this function the appropriate endpoint must 175 * be selected by means of cdns3_select_ep function. 176 */ 177static int cdns3_get_tdl(struct cdns3_device *priv_dev) 178{ 179 if (priv_dev->dev_ver < DEV_VER_V3) 180 return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 181 else 182 return readl(&priv_dev->regs->ep_tdl); 183} 184 185dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, 186 struct cdns3_trb *trb) 187{ 188 u32 offset = (char *)trb - (char *)priv_ep->trb_pool; 189 190 return priv_ep->trb_pool_dma + offset; 191} 192 193static int cdns3_ring_size(struct cdns3_endpoint *priv_ep) 194{ 195 switch (priv_ep->type) { 196 case USB_ENDPOINT_XFER_ISOC: 197 return TRB_ISO_RING_SIZE; 198 case USB_ENDPOINT_XFER_CONTROL: 199 return TRB_CTRL_RING_SIZE; 200 default: 201 if (priv_ep->use_streams) 202 return TRB_STREAM_RING_SIZE; 203 else 204 return TRB_RING_SIZE; 205 } 206} 207 208static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) 209{ 210 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 211 212 if (priv_ep->trb_pool) { 213 dma_free_coherent(priv_dev->sysdev, 214 cdns3_ring_size(priv_ep), 215 priv_ep->trb_pool, priv_ep->trb_pool_dma); 216 priv_ep->trb_pool = NULL; 217 } 218} 219 220/** 221 * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint 222 * @priv_ep: endpoint object 223 * 224 * Function will return 0 on success or -ENOMEM on allocation error 225 */ 226int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) 227{ 228 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 229 int ring_size = cdns3_ring_size(priv_ep); 230 int num_trbs = ring_size / TRB_SIZE; 231 struct cdns3_trb *link_trb; 232 233 if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size) 234 cdns3_free_trb_pool(priv_ep); 235 236 if (!priv_ep->trb_pool) { 237 priv_ep->trb_pool = dma_alloc_coherent(priv_dev->sysdev, 238 ring_size, 239 &priv_ep->trb_pool_dma, 240 GFP_DMA32 | GFP_ATOMIC); 241 if (!priv_ep->trb_pool) 242 return -ENOMEM; 243 244 priv_ep->alloc_ring_size = ring_size; 245 } 246 247 memset(priv_ep->trb_pool, 0, ring_size); 248 249 priv_ep->num_trbs = num_trbs; 250 251 if (!priv_ep->num) 252 return 0; 253 254 /* Initialize the last TRB as Link TRB */ 255 link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1)); 256 257 if (priv_ep->use_streams) { 258 /* 259 * For stream capable endpoints driver use single correct TRB. 260 * The last trb has zeroed cycle bit 261 */ 262 link_trb->control = 0; 263 } else { 264 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma)); 265 link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE); 266 } 267 return 0; 268} 269 270/** 271 * cdns3_ep_stall_flush - Stalls and flushes selected endpoint 272 * @priv_ep: endpoint object 273 * 274 * Endpoint must be selected before call to this function 275 */ 276static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep) 277{ 278 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 279 int val; 280 281 trace_cdns3_halt(priv_ep, 1, 1); 282 283 writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL, 284 &priv_dev->regs->ep_cmd); 285 286 /* wait for DFLUSH cleared */ 287 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 288 !(val & EP_CMD_DFLUSH), 1, 1000); 289 priv_ep->flags |= EP_STALLED; 290 priv_ep->flags &= ~EP_STALL_PENDING; 291} 292 293/** 294 * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller. 295 * @priv_dev: extended gadget object 296 */ 297void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev) 298{ 299 int i; 300 301 writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf); 302 303 cdns3_allow_enable_l1(priv_dev, 0); 304 priv_dev->hw_configured_flag = 0; 305 priv_dev->onchip_used_size = 0; 306 priv_dev->out_mem_is_allocated = 0; 307 priv_dev->wait_for_setup = 0; 308 priv_dev->using_streams = 0; 309 310 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 311 if (priv_dev->eps[i]) 312 priv_dev->eps[i]->flags &= ~EP_CONFIGURED; 313} 314 315/** 316 * cdns3_ep_inc_trb - increment a trb index. 317 * @index: Pointer to the TRB index to increment. 318 * @cs: Cycle state 319 * @trb_in_seg: number of TRBs in segment 320 * 321 * The index should never point to the link TRB. After incrementing, 322 * if it is point to the link TRB, wrap around to the beginning and revert 323 * cycle state bit The 324 * link TRB is always at the last TRB entry. 325 */ 326static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg) 327{ 328 (*index)++; 329 if (*index == (trb_in_seg - 1)) { 330 *index = 0; 331 *cs ^= 1; 332 } 333} 334 335/** 336 * cdns3_ep_inc_enq - increment endpoint's enqueue pointer 337 * @priv_ep: The endpoint whose enqueue pointer we're incrementing 338 */ 339static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep) 340{ 341 priv_ep->free_trbs--; 342 cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs); 343} 344 345/** 346 * cdns3_ep_inc_deq - increment endpoint's dequeue pointer 347 * @priv_ep: The endpoint whose dequeue pointer we're incrementing 348 */ 349static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) 350{ 351 priv_ep->free_trbs++; 352 cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); 353} 354 355/** 356 * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. 357 * @priv_dev: Extended gadget object 358 * @enable: Enable/disable permit to transition to L1. 359 * 360 * If bit USB_CONF_L1EN is set and device receive Extended Token packet, 361 * then controller answer with ACK handshake. 362 * If bit USB_CONF_L1DS is set and device receive Extended Token packet, 363 * then controller answer with NYET handshake. 364 */ 365void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable) 366{ 367 if (enable) 368 writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf); 369 else 370 writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf); 371} 372 373enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev) 374{ 375 u32 reg; 376 377 reg = readl(&priv_dev->regs->usb_sts); 378 379 if (DEV_SUPERSPEED(reg)) 380 return USB_SPEED_SUPER; 381 else if (DEV_HIGHSPEED(reg)) 382 return USB_SPEED_HIGH; 383 else if (DEV_FULLSPEED(reg)) 384 return USB_SPEED_FULL; 385 else if (DEV_LOWSPEED(reg)) 386 return USB_SPEED_LOW; 387 return USB_SPEED_UNKNOWN; 388} 389 390/** 391 * cdns3_start_all_request - add to ring all request not started 392 * @priv_dev: Extended gadget object 393 * @priv_ep: The endpoint for whom request will be started. 394 * 395 * Returns return ENOMEM if transfer ring i not enough TRBs to start 396 * all requests. 397 */ 398static int cdns3_start_all_request(struct cdns3_device *priv_dev, 399 struct cdns3_endpoint *priv_ep) 400{ 401 struct usb_request *request; 402 int ret = 0; 403 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 404 405 /* 406 * If the last pending transfer is INTERNAL 407 * OR streams are enabled for this endpoint 408 * do NOT start new transfer till the last one is pending 409 */ 410 if (!pending_empty) { 411 struct cdns3_request *priv_req; 412 413 request = cdns3_next_request(&priv_ep->pending_req_list); 414 priv_req = to_cdns3_request(request); 415 if ((priv_req->flags & REQUEST_INTERNAL) || 416 (priv_ep->flags & EP_TDLCHK_EN) || 417 priv_ep->use_streams) { 418 dev_dbg(priv_dev->dev, "Blocking external request\n"); 419 return ret; 420 } 421 } 422 423 while (!list_empty(&priv_ep->deferred_req_list)) { 424 request = cdns3_next_request(&priv_ep->deferred_req_list); 425 426 if (!priv_ep->use_streams) { 427 ret = cdns3_ep_run_transfer(priv_ep, request); 428 } else { 429 priv_ep->stream_sg_idx = 0; 430 ret = cdns3_ep_run_stream_transfer(priv_ep, request); 431 } 432 if (ret) 433 return ret; 434 435 list_del(&request->list); 436 list_add_tail(&request->list, 437 &priv_ep->pending_req_list); 438 if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN)) 439 break; 440 } 441 442 priv_ep->flags &= ~EP_RING_FULL; 443 return ret; 444} 445 446/* 447 * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set 448 * driver try to detect whether endpoint need additional internal 449 * buffer for unblocking on-chip FIFO buffer. This flag will be cleared 450 * if before first DESCMISS interrupt the DMA will be armed. 451 */ 452#define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \ 453 if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \ 454 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \ 455 (reg) |= EP_STS_EN_DESCMISEN; \ 456 } } while (0) 457 458static void __cdns3_descmiss_copy_data(struct usb_request *request, 459 struct usb_request *descmiss_req) 460{ 461 int length = request->actual + descmiss_req->actual; 462 struct scatterlist *s = request->sg; 463 464 if (!s) { 465 if (length <= request->length) { 466 memcpy(&((u8 *)request->buf)[request->actual], 467 descmiss_req->buf, 468 descmiss_req->actual); 469 request->actual = length; 470 } else { 471 /* It should never occures */ 472 request->status = -ENOMEM; 473 } 474 } else { 475 if (length <= sg_dma_len(s)) { 476 void *p = phys_to_virt(sg_dma_address(s)); 477 478 memcpy(&((u8 *)p)[request->actual], 479 descmiss_req->buf, 480 descmiss_req->actual); 481 request->actual = length; 482 } else { 483 request->status = -ENOMEM; 484 } 485 } 486} 487 488/** 489 * cdns3_wa2_descmiss_copy_data copy data from internal requests to 490 * request queued by class driver. 491 * @priv_ep: extended endpoint object 492 * @request: request object 493 */ 494static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, 495 struct usb_request *request) 496{ 497 struct usb_request *descmiss_req; 498 struct cdns3_request *descmiss_priv_req; 499 500 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 501 int chunk_end; 502 503 descmiss_priv_req = 504 cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 505 descmiss_req = &descmiss_priv_req->request; 506 507 /* driver can't touch pending request */ 508 if (descmiss_priv_req->flags & REQUEST_PENDING) 509 break; 510 511 chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH; 512 request->status = descmiss_req->status; 513 __cdns3_descmiss_copy_data(request, descmiss_req); 514 list_del_init(&descmiss_priv_req->list); 515 kfree(descmiss_req->buf); 516 cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req); 517 --priv_ep->wa2_counter; 518 519 if (!chunk_end) 520 break; 521 } 522} 523 524static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, 525 struct cdns3_endpoint *priv_ep, 526 struct cdns3_request *priv_req) 527{ 528 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN && 529 priv_req->flags & REQUEST_INTERNAL) { 530 struct usb_request *req; 531 532 req = cdns3_next_request(&priv_ep->deferred_req_list); 533 534 priv_ep->descmis_req = NULL; 535 536 if (!req) 537 return NULL; 538 539 /* unmap the gadget request before copying data */ 540 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req, 541 priv_ep->dir); 542 543 cdns3_wa2_descmiss_copy_data(priv_ep, req); 544 if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) && 545 req->length != req->actual) { 546 /* wait for next part of transfer */ 547 /* re-map the gadget request buffer*/ 548 usb_gadget_map_request_by_dev(priv_dev->sysdev, req, 549 usb_endpoint_dir_in(priv_ep->endpoint.desc)); 550 return NULL; 551 } 552 553 if (req->status == -EINPROGRESS) 554 req->status = 0; 555 556 list_del_init(&req->list); 557 cdns3_start_all_request(priv_dev, priv_ep); 558 return req; 559 } 560 561 return &priv_req->request; 562} 563 564static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, 565 struct cdns3_endpoint *priv_ep, 566 struct cdns3_request *priv_req) 567{ 568 int deferred = 0; 569 570 /* 571 * If transfer was queued before DESCMISS appear than we 572 * can disable handling of DESCMISS interrupt. Driver assumes that it 573 * can disable special treatment for this endpoint. 574 */ 575 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 576 u32 reg; 577 578 cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir); 579 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 580 reg = readl(&priv_dev->regs->ep_sts_en); 581 reg &= ~EP_STS_EN_DESCMISEN; 582 trace_cdns3_wa2(priv_ep, "workaround disabled\n"); 583 writel(reg, &priv_dev->regs->ep_sts_en); 584 } 585 586 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 587 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 588 u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list); 589 590 /* 591 * DESCMISS transfer has been finished, so data will be 592 * directly copied from internal allocated usb_request 593 * objects. 594 */ 595 if (pending_empty && !descmiss_empty && 596 !(priv_req->flags & REQUEST_INTERNAL)) { 597 cdns3_wa2_descmiss_copy_data(priv_ep, 598 &priv_req->request); 599 600 trace_cdns3_wa2(priv_ep, "get internal stored data"); 601 602 list_add_tail(&priv_req->request.list, 603 &priv_ep->pending_req_list); 604 cdns3_gadget_giveback(priv_ep, priv_req, 605 priv_req->request.status); 606 607 /* 608 * Intentionally driver returns positive value as 609 * correct value. It informs that transfer has 610 * been finished. 611 */ 612 return EINPROGRESS; 613 } 614 615 /* 616 * Driver will wait for completion DESCMISS transfer, 617 * before starts new, not DESCMISS transfer. 618 */ 619 if (!pending_empty && !descmiss_empty) { 620 trace_cdns3_wa2(priv_ep, "wait for pending transfer\n"); 621 deferred = 1; 622 } 623 624 if (priv_req->flags & REQUEST_INTERNAL) 625 list_add_tail(&priv_req->list, 626 &priv_ep->wa2_descmiss_req_list); 627 } 628 629 return deferred; 630} 631 632static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep) 633{ 634 struct cdns3_request *priv_req; 635 636 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 637 u8 chain; 638 639 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 640 chain = !!(priv_req->flags & REQUEST_INTERNAL_CH); 641 642 trace_cdns3_wa2(priv_ep, "removes eldest request"); 643 644 kfree(priv_req->request.buf); 645 list_del_init(&priv_req->list); 646 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 647 &priv_req->request); 648 --priv_ep->wa2_counter; 649 650 if (!chain) 651 break; 652 } 653} 654 655/** 656 * cdns3_wa2_descmissing_packet - handles descriptor missing event. 657 * @priv_ep: extended gadget object 658 * 659 * This function is used only for WA2. For more information see Work around 2 660 * description. 661 */ 662static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) 663{ 664 struct cdns3_request *priv_req; 665 struct usb_request *request; 666 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 667 668 /* check for pending transfer */ 669 if (!pending_empty) { 670 trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n"); 671 return; 672 } 673 674 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 675 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 676 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN; 677 } 678 679 trace_cdns3_wa2(priv_ep, "Description Missing detected\n"); 680 681 if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) { 682 trace_cdns3_wa2(priv_ep, "WA2 overflow\n"); 683 cdns3_wa2_remove_old_request(priv_ep); 684 } 685 686 request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint, 687 GFP_ATOMIC); 688 if (!request) 689 goto err; 690 691 priv_req = to_cdns3_request(request); 692 priv_req->flags |= REQUEST_INTERNAL; 693 694 /* if this field is still assigned it indicate that transfer related 695 * with this request has not been finished yet. Driver in this 696 * case simply allocate next request and assign flag REQUEST_INTERNAL_CH 697 * flag to previous one. It will indicate that current request is 698 * part of the previous one. 699 */ 700 if (priv_ep->descmis_req) 701 priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH; 702 703 priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE, 704 GFP_ATOMIC); 705 priv_ep->wa2_counter++; 706 707 if (!priv_req->request.buf) { 708 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 709 goto err; 710 } 711 712 priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE; 713 priv_ep->descmis_req = priv_req; 714 715 __cdns3_gadget_ep_queue(&priv_ep->endpoint, 716 &priv_ep->descmis_req->request, 717 GFP_ATOMIC); 718 719 return; 720 721err: 722 dev_err(priv_ep->cdns3_dev->dev, 723 "Failed: No sufficient memory for DESCMIS\n"); 724} 725 726static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev) 727{ 728 u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 729 730 if (tdl) { 731 u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl; 732 733 writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL, 734 &priv_dev->regs->ep_cmd); 735 } 736} 737 738static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev) 739{ 740 u32 ep_sts_reg; 741 742 /* select EP0-out */ 743 cdns3_select_ep(priv_dev, 0); 744 745 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 746 747 if (EP_STS_OUTQ_VAL(ep_sts_reg)) { 748 u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg); 749 struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num]; 750 751 if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) && 752 outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) { 753 u8 pending_empty = list_empty(&outq_ep->pending_req_list); 754 755 if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) || 756 (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) || 757 !pending_empty) { 758 } else { 759 u32 ep_sts_en_reg; 760 u32 ep_cmd_reg; 761 762 cdns3_select_ep(priv_dev, outq_ep->num | 763 outq_ep->dir); 764 ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en); 765 ep_cmd_reg = readl(&priv_dev->regs->ep_cmd); 766 767 outq_ep->flags |= EP_TDLCHK_EN; 768 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 769 EP_CFG_TDL_CHK); 770 771 cdns3_wa2_enable_detection(priv_dev, outq_ep, 772 ep_sts_en_reg); 773 writel(ep_sts_en_reg, 774 &priv_dev->regs->ep_sts_en); 775 /* reset tdl value to zero */ 776 cdns3_wa2_reset_tdl(priv_dev); 777 /* 778 * Memory barrier - Reset tdl before ringing the 779 * doorbell. 780 */ 781 wmb(); 782 if (EP_CMD_DRDY & ep_cmd_reg) { 783 trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n"); 784 785 } else { 786 trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n"); 787 /* 788 * ring doorbell to generate DESCMIS irq 789 */ 790 writel(EP_CMD_DRDY, 791 &priv_dev->regs->ep_cmd); 792 } 793 } 794 } 795 } 796} 797 798/** 799 * cdns3_gadget_giveback - call struct usb_request's ->complete callback 800 * @priv_ep: The endpoint to whom the request belongs to 801 * @priv_req: The request we're giving back 802 * @status: completion code for the request 803 * 804 * Must be called with controller's lock held and interrupts disabled. This 805 * function will unmap @req and call its ->complete() callback to notify upper 806 * layers that it has completed. 807 */ 808void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, 809 struct cdns3_request *priv_req, 810 int status) 811{ 812 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 813 struct usb_request *request = &priv_req->request; 814 815 list_del_init(&request->list); 816 817 if (request->status == -EINPROGRESS) 818 request->status = status; 819 820 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request, 821 priv_ep->dir); 822 823 if ((priv_req->flags & REQUEST_UNALIGNED) && 824 priv_ep->dir == USB_DIR_OUT && !request->status) 825 memcpy(request->buf, priv_req->aligned_buf->buf, 826 request->length); 827 828 priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); 829 /* All TRBs have finished, clear the counter */ 830 priv_req->finished_trb = 0; 831 trace_cdns3_gadget_giveback(priv_req); 832 833 if (priv_dev->dev_ver < DEV_VER_V2) { 834 request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep, 835 priv_req); 836 if (!request) 837 return; 838 } 839 840 if (request->complete) { 841 spin_unlock(&priv_dev->lock); 842 usb_gadget_giveback_request(&priv_ep->endpoint, 843 request); 844 spin_lock(&priv_dev->lock); 845 } 846 847 if (request->buf == priv_dev->zlp_buf) 848 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 849} 850 851static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) 852{ 853 /* Work around for stale data address in TRB*/ 854 if (priv_ep->wa1_set) { 855 trace_cdns3_wa1(priv_ep, "restore cycle bit"); 856 857 priv_ep->wa1_set = 0; 858 priv_ep->wa1_trb_index = 0xFFFF; 859 if (priv_ep->wa1_cycle_bit) { 860 priv_ep->wa1_trb->control = 861 priv_ep->wa1_trb->control | cpu_to_le32(0x1); 862 } else { 863 priv_ep->wa1_trb->control = 864 priv_ep->wa1_trb->control & cpu_to_le32(~0x1); 865 } 866 } 867} 868 869static void cdns3_free_aligned_request_buf(struct work_struct *work) 870{ 871 struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, 872 aligned_buf_wq); 873 struct cdns3_aligned_buf *buf, *tmp; 874 unsigned long flags; 875 876 spin_lock_irqsave(&priv_dev->lock, flags); 877 878 list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) { 879 if (!buf->in_use) { 880 list_del(&buf->list); 881 882 /* 883 * Re-enable interrupts to free DMA capable memory. 884 * Driver can't free this memory with disabled 885 * interrupts. 886 */ 887 spin_unlock_irqrestore(&priv_dev->lock, flags); 888 dma_free_coherent(priv_dev->sysdev, buf->size, 889 buf->buf, buf->dma); 890 kfree(buf); 891 spin_lock_irqsave(&priv_dev->lock, flags); 892 } 893 } 894 895 spin_unlock_irqrestore(&priv_dev->lock, flags); 896} 897 898static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) 899{ 900 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 901 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 902 struct cdns3_aligned_buf *buf; 903 904 /* check if buffer is aligned to 8. */ 905 if (!((uintptr_t)priv_req->request.buf & 0x7)) 906 return 0; 907 908 buf = priv_req->aligned_buf; 909 910 if (!buf || priv_req->request.length > buf->size) { 911 buf = kzalloc(sizeof(*buf), GFP_ATOMIC); 912 if (!buf) 913 return -ENOMEM; 914 915 buf->size = priv_req->request.length; 916 917 buf->buf = dma_alloc_coherent(priv_dev->sysdev, 918 buf->size, 919 &buf->dma, 920 GFP_ATOMIC); 921 if (!buf->buf) { 922 kfree(buf); 923 return -ENOMEM; 924 } 925 926 if (priv_req->aligned_buf) { 927 trace_cdns3_free_aligned_request(priv_req); 928 priv_req->aligned_buf->in_use = 0; 929 queue_work(system_freezable_wq, 930 &priv_dev->aligned_buf_wq); 931 } 932 933 buf->in_use = 1; 934 priv_req->aligned_buf = buf; 935 936 list_add_tail(&buf->list, 937 &priv_dev->aligned_buf_list); 938 } 939 940 if (priv_ep->dir == USB_DIR_IN) { 941 memcpy(buf->buf, priv_req->request.buf, 942 priv_req->request.length); 943 } 944 945 priv_req->flags |= REQUEST_UNALIGNED; 946 trace_cdns3_prepare_aligned_request(priv_req); 947 948 return 0; 949} 950 951static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep, 952 struct cdns3_trb *trb) 953{ 954 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 955 956 if (!priv_ep->wa1_set) { 957 u32 doorbell; 958 959 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 960 961 if (doorbell) { 962 priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0; 963 priv_ep->wa1_set = 1; 964 priv_ep->wa1_trb = trb; 965 priv_ep->wa1_trb_index = priv_ep->enqueue; 966 trace_cdns3_wa1(priv_ep, "set guard"); 967 return 0; 968 } 969 } 970 return 1; 971} 972 973static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev, 974 struct cdns3_endpoint *priv_ep) 975{ 976 int dma_index; 977 u32 doorbell; 978 979 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 980 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 981 982 if (!doorbell || dma_index != priv_ep->wa1_trb_index) 983 cdns3_wa1_restore_cycle_bit(priv_ep); 984} 985 986static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 987 struct usb_request *request) 988{ 989 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 990 struct cdns3_request *priv_req; 991 struct cdns3_trb *trb; 992 dma_addr_t trb_dma; 993 int address; 994 u32 control; 995 u32 length; 996 u32 tdl; 997 unsigned int sg_idx = priv_ep->stream_sg_idx; 998 999 priv_req = to_cdns3_request(request); 1000 address = priv_ep->endpoint.desc->bEndpointAddress; 1001 1002 priv_ep->flags |= EP_PENDING_REQUEST; 1003 1004 /* must allocate buffer aligned to 8 */ 1005 if (priv_req->flags & REQUEST_UNALIGNED) 1006 trb_dma = priv_req->aligned_buf->dma; 1007 else 1008 trb_dma = request->dma; 1009 1010 /* For stream capable endpoints driver use only single TD. */ 1011 trb = priv_ep->trb_pool + priv_ep->enqueue; 1012 priv_req->start_trb = priv_ep->enqueue; 1013 priv_req->end_trb = priv_req->start_trb; 1014 priv_req->trb = trb; 1015 1016 cdns3_select_ep(priv_ep->cdns3_dev, address); 1017 1018 control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE | 1019 TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP; 1020 1021 if (!request->num_sgs) { 1022 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1023 length = request->length; 1024 } else { 1025 trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address)); 1026 length = request->sg[sg_idx].length; 1027 } 1028 1029 tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); 1030 1031 trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length)); 1032 1033 /* 1034 * For DEV_VER_V2 controller version we have enabled 1035 * USB_CONF2_EN_TDL_TRB in DMULT configuration. 1036 * This enables TDL calculation based on TRB, hence setting TDL in TRB. 1037 */ 1038 if (priv_dev->dev_ver >= DEV_VER_V2) { 1039 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1040 trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl)); 1041 } 1042 priv_req->flags |= REQUEST_PENDING; 1043 1044 trb->control = cpu_to_le32(control); 1045 1046 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1047 1048 /* 1049 * Memory barrier - Cycle Bit must be set before trb->length and 1050 * trb->buffer fields. 1051 */ 1052 wmb(); 1053 1054 /* always first element */ 1055 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), 1056 &priv_dev->regs->ep_traddr); 1057 1058 if (!(priv_ep->flags & EP_STALLED)) { 1059 trace_cdns3_ring(priv_ep); 1060 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1061 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1062 1063 priv_ep->prime_flag = false; 1064 1065 /* 1066 * Controller version DEV_VER_V2 tdl calculation 1067 * is based on TRB 1068 */ 1069 1070 if (priv_dev->dev_ver < DEV_VER_V2) 1071 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1072 &priv_dev->regs->ep_cmd); 1073 else if (priv_dev->dev_ver > DEV_VER_V2) 1074 writel(tdl, &priv_dev->regs->ep_tdl); 1075 1076 priv_ep->last_stream_id = priv_req->request.stream_id; 1077 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1078 writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) | 1079 EP_CMD_ERDY, &priv_dev->regs->ep_cmd); 1080 1081 trace_cdns3_doorbell_epx(priv_ep->name, 1082 readl(&priv_dev->regs->ep_traddr)); 1083 } 1084 1085 /* WORKAROUND for transition to L0 */ 1086 __cdns3_gadget_wakeup(priv_dev); 1087 1088 return 0; 1089} 1090 1091static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep) 1092{ 1093 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1094 1095 if (priv_dev->dev_ver < DEV_VER_V3) 1096 return; 1097 1098 if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) { 1099 writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts); 1100 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1101 } 1102} 1103 1104/** 1105 * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware 1106 * @priv_ep: endpoint object 1107 * @request: request object 1108 * 1109 * Returns zero on success or negative value on failure 1110 */ 1111static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 1112 struct usb_request *request) 1113{ 1114 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1115 struct cdns3_request *priv_req; 1116 struct cdns3_trb *trb; 1117 struct cdns3_trb *link_trb = NULL; 1118 dma_addr_t trb_dma; 1119 u32 togle_pcs = 1; 1120 int sg_iter = 0; 1121 int num_trb_req; 1122 int trb_burst; 1123 int num_trb; 1124 int address; 1125 u32 control; 1126 int pcs; 1127 u16 total_tdl = 0; 1128 struct scatterlist *s = NULL; 1129 bool sg_supported = !!(request->num_mapped_sgs); 1130 1131 num_trb_req = sg_supported ? request->num_mapped_sgs : 1; 1132 1133 /* ISO transfer require each SOF have a TD, each TD include some TRBs */ 1134 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) 1135 num_trb = priv_ep->interval * num_trb_req; 1136 else 1137 num_trb = num_trb_req; 1138 1139 priv_req = to_cdns3_request(request); 1140 address = priv_ep->endpoint.desc->bEndpointAddress; 1141 1142 priv_ep->flags |= EP_PENDING_REQUEST; 1143 1144 /* must allocate buffer aligned to 8 */ 1145 if (priv_req->flags & REQUEST_UNALIGNED) 1146 trb_dma = priv_req->aligned_buf->dma; 1147 else 1148 trb_dma = request->dma; 1149 1150 trb = priv_ep->trb_pool + priv_ep->enqueue; 1151 priv_req->start_trb = priv_ep->enqueue; 1152 priv_req->trb = trb; 1153 1154 cdns3_select_ep(priv_ep->cdns3_dev, address); 1155 1156 /* prepare ring */ 1157 if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) { 1158 int doorbell, dma_index; 1159 u32 ch_bit = 0; 1160 1161 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1162 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1163 1164 /* Driver can't update LINK TRB if it is current processed. */ 1165 if (doorbell && dma_index == priv_ep->num_trbs - 1) { 1166 priv_ep->flags |= EP_DEFERRED_DRDY; 1167 return -ENOBUFS; 1168 } 1169 1170 /*updating C bt in Link TRB before starting DMA*/ 1171 link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1); 1172 /* 1173 * For TRs size equal 2 enabling TRB_CHAIN for epXin causes 1174 * that DMA stuck at the LINK TRB. 1175 * On the other hand, removing TRB_CHAIN for longer TRs for 1176 * epXout cause that DMA stuck after handling LINK TRB. 1177 * To eliminate this strange behavioral driver set TRB_CHAIN 1178 * bit only for TR size > 2. 1179 */ 1180 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC || 1181 TRBS_PER_SEGMENT > 2) 1182 ch_bit = TRB_CHAIN; 1183 1184 link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | 1185 TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); 1186 1187 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 1188 /* 1189 * ISO require LINK TRB must be first one of TD. 1190 * Fill LINK TRBs for left trb space to simply software process logic. 1191 */ 1192 while (priv_ep->enqueue) { 1193 *trb = *link_trb; 1194 trace_cdns3_prepare_trb(priv_ep, trb); 1195 1196 cdns3_ep_inc_enq(priv_ep); 1197 trb = priv_ep->trb_pool + priv_ep->enqueue; 1198 priv_req->trb = trb; 1199 } 1200 } 1201 } 1202 1203 if (num_trb > priv_ep->free_trbs) { 1204 priv_ep->flags |= EP_RING_FULL; 1205 return -ENOBUFS; 1206 } 1207 1208 if (priv_dev->dev_ver <= DEV_VER_V2) 1209 togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); 1210 1211 /* set incorrect Cycle Bit for first trb*/ 1212 control = priv_ep->pcs ? 0 : TRB_CYCLE; 1213 trb->length = 0; 1214 if (priv_dev->dev_ver >= DEV_VER_V2) { 1215 u16 td_size; 1216 1217 td_size = DIV_ROUND_UP(request->length, 1218 priv_ep->endpoint.maxpacket); 1219 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1220 trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size)); 1221 else 1222 control |= TRB_TDL_HS_SIZE(td_size); 1223 } 1224 1225 do { 1226 u32 length; 1227 1228 if (!(sg_iter % num_trb_req) && sg_supported) 1229 s = request->sg; 1230 1231 /* fill TRB */ 1232 control |= TRB_TYPE(TRB_NORMAL); 1233 if (sg_supported) { 1234 trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s))); 1235 length = sg_dma_len(s); 1236 } else { 1237 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1238 length = request->length; 1239 } 1240 1241 if (priv_ep->flags & EP_TDLCHK_EN) 1242 total_tdl += DIV_ROUND_UP(length, 1243 priv_ep->endpoint.maxpacket); 1244 1245 trb_burst = priv_ep->trb_burst_size; 1246 1247 /* 1248 * Supposed DMA cross 4k bounder problem should be fixed at DEV_VER_V2, but still 1249 * met problem when do ISO transfer if sg enabled. 1250 * 1251 * Data pattern likes below when sg enabled, package size is 1k and mult is 2 1252 * [UVC Header(8B) ] [data(3k - 8)] ... 1253 * 1254 * The received data at offset 0xd000 will get 0xc000 data, len 0x70. Error happen 1255 * as below pattern: 1256 * 0xd000: wrong 1257 * 0xe000: wrong 1258 * 0xf000: correct 1259 * 0x10000: wrong 1260 * 0x11000: wrong 1261 * 0x12000: correct 1262 * ... 1263 * 1264 * But it is still unclear about why error have not happen below 0xd000, it should 1265 * cross 4k bounder. But anyway, the below code can fix this problem. 1266 * 1267 * To avoid DMA cross 4k bounder at ISO transfer, reduce burst len according to 16. 1268 */ 1269 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_dev->dev_ver <= DEV_VER_V2) 1270 if (ALIGN_DOWN(trb->buffer, SZ_4K) != 1271 ALIGN_DOWN(trb->buffer + length, SZ_4K)) 1272 trb_burst = 16; 1273 1274 trb->length |= cpu_to_le32(TRB_BURST_LEN(trb_burst) | 1275 TRB_LEN(length)); 1276 pcs = priv_ep->pcs ? TRB_CYCLE : 0; 1277 1278 /* 1279 * first trb should be prepared as last to avoid processing 1280 * transfer to early 1281 */ 1282 if (sg_iter != 0) 1283 control |= pcs; 1284 1285 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 1286 control |= TRB_IOC | TRB_ISP; 1287 } else { 1288 /* for last element in TD or in SG list */ 1289 if (sg_iter == (num_trb - 1) && sg_iter != 0) 1290 control |= pcs | TRB_IOC | TRB_ISP; 1291 } 1292 1293 if (sg_iter) 1294 trb->control = cpu_to_le32(control); 1295 else 1296 priv_req->trb->control = cpu_to_le32(control); 1297 1298 if (sg_supported) { 1299 trb->control |= cpu_to_le32(TRB_ISP); 1300 /* Don't set chain bit for last TRB */ 1301 if ((sg_iter % num_trb_req) < num_trb_req - 1) 1302 trb->control |= cpu_to_le32(TRB_CHAIN); 1303 1304 s = sg_next(s); 1305 } 1306 1307 control = 0; 1308 ++sg_iter; 1309 priv_req->end_trb = priv_ep->enqueue; 1310 cdns3_ep_inc_enq(priv_ep); 1311 trb = priv_ep->trb_pool + priv_ep->enqueue; 1312 trb->length = 0; 1313 } while (sg_iter < num_trb); 1314 1315 trb = priv_req->trb; 1316 1317 priv_req->flags |= REQUEST_PENDING; 1318 priv_req->num_of_trb = num_trb; 1319 1320 if (sg_iter == 1) 1321 trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP); 1322 1323 if (priv_dev->dev_ver < DEV_VER_V2 && 1324 (priv_ep->flags & EP_TDLCHK_EN)) { 1325 u16 tdl = total_tdl; 1326 u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 1327 1328 if (tdl > EP_CMD_TDL_MAX) { 1329 tdl = EP_CMD_TDL_MAX; 1330 priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX; 1331 } 1332 1333 if (old_tdl < tdl) { 1334 tdl -= old_tdl; 1335 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1336 &priv_dev->regs->ep_cmd); 1337 } 1338 } 1339 1340 /* 1341 * Memory barrier - cycle bit must be set before other filds in trb. 1342 */ 1343 wmb(); 1344 1345 /* give the TD to the consumer*/ 1346 if (togle_pcs) 1347 trb->control = trb->control ^ cpu_to_le32(1); 1348 1349 if (priv_dev->dev_ver <= DEV_VER_V2) 1350 cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); 1351 1352 if (num_trb > 1) { 1353 int i = 0; 1354 1355 while (i < num_trb) { 1356 trace_cdns3_prepare_trb(priv_ep, trb + i); 1357 if (trb + i == link_trb) { 1358 trb = priv_ep->trb_pool; 1359 num_trb = num_trb - i; 1360 i = 0; 1361 } else { 1362 i++; 1363 } 1364 } 1365 } else { 1366 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1367 } 1368 1369 /* 1370 * Memory barrier - Cycle Bit must be set before trb->length and 1371 * trb->buffer fields. 1372 */ 1373 wmb(); 1374 1375 /* 1376 * For DMULT mode we can set address to transfer ring only once after 1377 * enabling endpoint. 1378 */ 1379 if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) { 1380 /* 1381 * Until SW is not ready to handle the OUT transfer the ISO OUT 1382 * Endpoint should be disabled (EP_CFG.ENABLE = 0). 1383 * EP_CFG_ENABLE must be set before updating ep_traddr. 1384 */ 1385 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir && 1386 !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) { 1387 priv_ep->flags |= EP_QUIRK_ISO_OUT_EN; 1388 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 1389 EP_CFG_ENABLE); 1390 } 1391 1392 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + 1393 priv_req->start_trb * TRB_SIZE), 1394 &priv_dev->regs->ep_traddr); 1395 1396 priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR; 1397 } 1398 1399 if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) { 1400 trace_cdns3_ring(priv_ep); 1401 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1402 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1403 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1404 cdns3_rearm_drdy_if_needed(priv_ep); 1405 trace_cdns3_doorbell_epx(priv_ep->name, 1406 readl(&priv_dev->regs->ep_traddr)); 1407 } 1408 1409 /* WORKAROUND for transition to L0 */ 1410 __cdns3_gadget_wakeup(priv_dev); 1411 1412 return 0; 1413} 1414 1415void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) 1416{ 1417 struct cdns3_endpoint *priv_ep; 1418 struct usb_ep *ep; 1419 1420 if (priv_dev->hw_configured_flag) 1421 return; 1422 1423 writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf); 1424 1425 cdns3_set_register_bit(&priv_dev->regs->usb_conf, 1426 USB_CONF_U1EN | USB_CONF_U2EN); 1427 1428 priv_dev->hw_configured_flag = 1; 1429 1430 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 1431 if (ep->enabled) { 1432 priv_ep = ep_to_cdns3_ep(ep); 1433 cdns3_start_all_request(priv_dev, priv_ep); 1434 } 1435 } 1436 1437 cdns3_allow_enable_l1(priv_dev, 1); 1438} 1439 1440/** 1441 * cdns3_trb_handled - check whether trb has been handled by DMA 1442 * 1443 * @priv_ep: extended endpoint object. 1444 * @priv_req: request object for checking 1445 * 1446 * Endpoint must be selected before invoking this function. 1447 * 1448 * Returns false if request has not been handled by DMA, else returns true. 1449 * 1450 * SR - start ring 1451 * ER - end ring 1452 * DQ = priv_ep->dequeue - dequeue position 1453 * EQ = priv_ep->enqueue - enqueue position 1454 * ST = priv_req->start_trb - index of first TRB in transfer ring 1455 * ET = priv_req->end_trb - index of last TRB in transfer ring 1456 * CI = current_index - index of processed TRB by DMA. 1457 * 1458 * As first step, we check if the TRB between the ST and ET. 1459 * Then, we check if cycle bit for index priv_ep->dequeue 1460 * is correct. 1461 * 1462 * some rules: 1463 * 1. priv_ep->dequeue never equals to current_index. 1464 * 2 priv_ep->enqueue never exceed priv_ep->dequeue 1465 * 3. exception: priv_ep->enqueue == priv_ep->dequeue 1466 * and priv_ep->free_trbs is zero. 1467 * This case indicate that TR is full. 1468 * 1469 * At below two cases, the request have been handled. 1470 * Case 1 - priv_ep->dequeue < current_index 1471 * SR ... EQ ... DQ ... CI ... ER 1472 * SR ... DQ ... CI ... EQ ... ER 1473 * 1474 * Case 2 - priv_ep->dequeue > current_index 1475 * This situation takes place when CI go through the LINK TRB at the end of 1476 * transfer ring. 1477 * SR ... CI ... EQ ... DQ ... ER 1478 */ 1479static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep, 1480 struct cdns3_request *priv_req) 1481{ 1482 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1483 struct cdns3_trb *trb; 1484 int current_index = 0; 1485 int handled = 0; 1486 int doorbell; 1487 1488 current_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1489 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1490 1491 /* current trb doesn't belong to this request */ 1492 if (priv_req->start_trb < priv_req->end_trb) { 1493 if (priv_ep->dequeue > priv_req->end_trb) 1494 goto finish; 1495 1496 if (priv_ep->dequeue < priv_req->start_trb) 1497 goto finish; 1498 } 1499 1500 if ((priv_req->start_trb > priv_req->end_trb) && 1501 (priv_ep->dequeue > priv_req->end_trb) && 1502 (priv_ep->dequeue < priv_req->start_trb)) 1503 goto finish; 1504 1505 if ((priv_req->start_trb == priv_req->end_trb) && 1506 (priv_ep->dequeue != priv_req->end_trb)) 1507 goto finish; 1508 1509 trb = &priv_ep->trb_pool[priv_ep->dequeue]; 1510 1511 if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs) 1512 goto finish; 1513 1514 if (doorbell == 1 && current_index == priv_ep->dequeue) 1515 goto finish; 1516 1517 /* The corner case for TRBS_PER_SEGMENT equal 2). */ 1518 if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 1519 handled = 1; 1520 goto finish; 1521 } 1522 1523 if (priv_ep->enqueue == priv_ep->dequeue && 1524 priv_ep->free_trbs == 0) { 1525 handled = 1; 1526 } else if (priv_ep->dequeue < current_index) { 1527 if ((current_index == (priv_ep->num_trbs - 1)) && 1528 !priv_ep->dequeue) 1529 goto finish; 1530 1531 handled = 1; 1532 } else if (priv_ep->dequeue > current_index) { 1533 handled = 1; 1534 } 1535 1536finish: 1537 trace_cdns3_request_handled(priv_req, current_index, handled); 1538 1539 return handled; 1540} 1541 1542static void cdns3_transfer_completed(struct cdns3_device *priv_dev, 1543 struct cdns3_endpoint *priv_ep) 1544{ 1545 struct cdns3_request *priv_req; 1546 struct usb_request *request; 1547 struct cdns3_trb *trb; 1548 bool request_handled = false; 1549 bool transfer_end = false; 1550 1551 while (!list_empty(&priv_ep->pending_req_list)) { 1552 request = cdns3_next_request(&priv_ep->pending_req_list); 1553 priv_req = to_cdns3_request(request); 1554 1555 trb = priv_ep->trb_pool + priv_ep->dequeue; 1556 1557 /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */ 1558 while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { 1559 1560 /* ISO ep_traddr may stop at LINK TRB */ 1561 if (priv_ep->dequeue == cdns3_get_dma_pos(priv_dev, priv_ep) && 1562 priv_ep->type == USB_ENDPOINT_XFER_ISOC) 1563 break; 1564 1565 trace_cdns3_complete_trb(priv_ep, trb); 1566 cdns3_ep_inc_deq(priv_ep); 1567 trb = priv_ep->trb_pool + priv_ep->dequeue; 1568 } 1569 1570 if (!request->stream_id) { 1571 /* Re-select endpoint. It could be changed by other CPU 1572 * during handling usb_gadget_giveback_request. 1573 */ 1574 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1575 1576 while (cdns3_trb_handled(priv_ep, priv_req)) { 1577 priv_req->finished_trb++; 1578 if (priv_req->finished_trb >= priv_req->num_of_trb) 1579 request_handled = true; 1580 1581 trb = priv_ep->trb_pool + priv_ep->dequeue; 1582 trace_cdns3_complete_trb(priv_ep, trb); 1583 1584 if (!transfer_end) 1585 request->actual += 1586 TRB_LEN(le32_to_cpu(trb->length)); 1587 1588 if (priv_req->num_of_trb > 1 && 1589 le32_to_cpu(trb->control) & TRB_SMM && 1590 le32_to_cpu(trb->control) & TRB_CHAIN) 1591 transfer_end = true; 1592 1593 cdns3_ep_inc_deq(priv_ep); 1594 } 1595 1596 if (request_handled) { 1597 /* TRBs are duplicated by priv_ep->interval time for ISO IN */ 1598 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_ep->dir) 1599 request->actual /= priv_ep->interval; 1600 1601 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1602 request_handled = false; 1603 transfer_end = false; 1604 } else { 1605 goto prepare_next_td; 1606 } 1607 1608 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && 1609 TRBS_PER_SEGMENT == 2) 1610 break; 1611 } else { 1612 /* Re-select endpoint. It could be changed by other CPU 1613 * during handling usb_gadget_giveback_request. 1614 */ 1615 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1616 1617 trb = priv_ep->trb_pool; 1618 trace_cdns3_complete_trb(priv_ep, trb); 1619 1620 if (trb != priv_req->trb) 1621 dev_warn(priv_dev->dev, 1622 "request_trb=0x%p, queue_trb=0x%p\n", 1623 priv_req->trb, trb); 1624 1625 request->actual += TRB_LEN(le32_to_cpu(trb->length)); 1626 1627 if (!request->num_sgs || 1628 (request->num_sgs == (priv_ep->stream_sg_idx + 1))) { 1629 priv_ep->stream_sg_idx = 0; 1630 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1631 } else { 1632 priv_ep->stream_sg_idx++; 1633 cdns3_ep_run_stream_transfer(priv_ep, request); 1634 } 1635 break; 1636 } 1637 } 1638 priv_ep->flags &= ~EP_PENDING_REQUEST; 1639 1640prepare_next_td: 1641 if (!(priv_ep->flags & EP_STALLED) && 1642 !(priv_ep->flags & EP_STALL_PENDING)) 1643 cdns3_start_all_request(priv_dev, priv_ep); 1644} 1645 1646void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm) 1647{ 1648 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1649 1650 cdns3_wa1_restore_cycle_bit(priv_ep); 1651 1652 if (rearm) { 1653 trace_cdns3_ring(priv_ep); 1654 1655 /* Cycle Bit must be updated before arming DMA. */ 1656 wmb(); 1657 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1658 1659 __cdns3_gadget_wakeup(priv_dev); 1660 1661 trace_cdns3_doorbell_epx(priv_ep->name, 1662 readl(&priv_dev->regs->ep_traddr)); 1663 } 1664} 1665 1666static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep) 1667{ 1668 u16 tdl = priv_ep->pending_tdl; 1669 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1670 1671 if (tdl > EP_CMD_TDL_MAX) { 1672 tdl = EP_CMD_TDL_MAX; 1673 priv_ep->pending_tdl -= EP_CMD_TDL_MAX; 1674 } else { 1675 priv_ep->pending_tdl = 0; 1676 } 1677 1678 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd); 1679} 1680 1681/** 1682 * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint 1683 * @priv_ep: endpoint object 1684 * 1685 * Returns 0 1686 */ 1687static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) 1688{ 1689 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1690 u32 ep_sts_reg; 1691 struct usb_request *deferred_request; 1692 struct usb_request *pending_request; 1693 u32 tdl = 0; 1694 1695 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1696 1697 trace_cdns3_epx_irq(priv_dev, priv_ep); 1698 1699 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 1700 writel(ep_sts_reg, &priv_dev->regs->ep_sts); 1701 1702 if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) { 1703 bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY); 1704 1705 tdl = cdns3_get_tdl(priv_dev); 1706 1707 /* 1708 * Continue the previous transfer: 1709 * There is some racing between ERDY and PRIME. The device send 1710 * ERDY and almost in the same time Host send PRIME. It cause 1711 * that host ignore the ERDY packet and driver has to send it 1712 * again. 1713 */ 1714 if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) || 1715 EP_STS_HOSTPP(ep_sts_reg))) { 1716 writel(EP_CMD_ERDY | 1717 EP_CMD_ERDY_SID(priv_ep->last_stream_id), 1718 &priv_dev->regs->ep_cmd); 1719 ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC); 1720 } else { 1721 priv_ep->prime_flag = true; 1722 1723 pending_request = cdns3_next_request(&priv_ep->pending_req_list); 1724 deferred_request = cdns3_next_request(&priv_ep->deferred_req_list); 1725 1726 if (deferred_request && !pending_request) { 1727 cdns3_start_all_request(priv_dev, priv_ep); 1728 } 1729 } 1730 } 1731 1732 if (ep_sts_reg & EP_STS_TRBERR) { 1733 if (priv_ep->flags & EP_STALL_PENDING && 1734 !(ep_sts_reg & EP_STS_DESCMIS && 1735 priv_dev->dev_ver < DEV_VER_V2)) { 1736 cdns3_ep_stall_flush(priv_ep); 1737 } 1738 1739 /* 1740 * For isochronous transfer driver completes request on 1741 * IOC or on TRBERR. IOC appears only when device receive 1742 * OUT data packet. If host disable stream or lost some packet 1743 * then the only way to finish all queued transfer is to do it 1744 * on TRBERR event. 1745 */ 1746 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && 1747 !priv_ep->wa1_set) { 1748 if (!priv_ep->dir) { 1749 u32 ep_cfg = readl(&priv_dev->regs->ep_cfg); 1750 1751 ep_cfg &= ~EP_CFG_ENABLE; 1752 writel(ep_cfg, &priv_dev->regs->ep_cfg); 1753 priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; 1754 priv_ep->flags |= EP_UPDATE_EP_TRBADDR; 1755 } 1756 cdns3_transfer_completed(priv_dev, priv_ep); 1757 } else if (!(priv_ep->flags & EP_STALLED) && 1758 !(priv_ep->flags & EP_STALL_PENDING)) { 1759 if (priv_ep->flags & EP_DEFERRED_DRDY) { 1760 priv_ep->flags &= ~EP_DEFERRED_DRDY; 1761 cdns3_start_all_request(priv_dev, priv_ep); 1762 } else { 1763 cdns3_rearm_transfer(priv_ep, 1764 priv_ep->wa1_set); 1765 } 1766 } 1767 } 1768 1769 if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) || 1770 (ep_sts_reg & EP_STS_IOT)) { 1771 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 1772 if (ep_sts_reg & EP_STS_ISP) 1773 priv_ep->flags |= EP_QUIRK_END_TRANSFER; 1774 else 1775 priv_ep->flags &= ~EP_QUIRK_END_TRANSFER; 1776 } 1777 1778 if (!priv_ep->use_streams) { 1779 if ((ep_sts_reg & EP_STS_IOC) || 1780 (ep_sts_reg & EP_STS_ISP)) { 1781 cdns3_transfer_completed(priv_dev, priv_ep); 1782 } else if ((priv_ep->flags & EP_TDLCHK_EN) & 1783 priv_ep->pending_tdl) { 1784 /* handle IOT with pending tdl */ 1785 cdns3_reprogram_tdl(priv_ep); 1786 } 1787 } else if (priv_ep->dir == USB_DIR_OUT) { 1788 priv_ep->ep_sts_pending |= ep_sts_reg; 1789 } else if (ep_sts_reg & EP_STS_IOT) { 1790 cdns3_transfer_completed(priv_dev, priv_ep); 1791 } 1792 } 1793 1794 /* 1795 * MD_EXIT interrupt sets when stream capable endpoint exits 1796 * from MOVE DATA state of Bulk IN/OUT stream protocol state machine 1797 */ 1798 if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) && 1799 (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) { 1800 priv_ep->ep_sts_pending = 0; 1801 cdns3_transfer_completed(priv_dev, priv_ep); 1802 } 1803 1804 /* 1805 * WA2: this condition should only be meet when 1806 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or 1807 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. 1808 * In other cases this interrupt will be disabled. 1809 */ 1810 if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 && 1811 !(priv_ep->flags & EP_STALLED)) 1812 cdns3_wa2_descmissing_packet(priv_ep); 1813 1814 return 0; 1815} 1816 1817static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev) 1818{ 1819 if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) 1820 priv_dev->gadget_driver->disconnect(&priv_dev->gadget); 1821} 1822 1823/** 1824 * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device 1825 * @priv_dev: extended gadget object 1826 * @usb_ists: bitmap representation of device's reported interrupts 1827 * (usb_ists register value) 1828 */ 1829static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, 1830 u32 usb_ists) 1831__must_hold(&priv_dev->lock) 1832{ 1833 int speed = 0; 1834 1835 trace_cdns3_usb_irq(priv_dev, usb_ists); 1836 if (usb_ists & USB_ISTS_L1ENTI) { 1837 /* 1838 * WORKAROUND: CDNS3 controller has issue with hardware resuming 1839 * from L1. To fix it, if any DMA transfer is pending driver 1840 * must starts driving resume signal immediately. 1841 */ 1842 if (readl(&priv_dev->regs->drbl)) 1843 __cdns3_gadget_wakeup(priv_dev); 1844 } 1845 1846 /* Connection detected */ 1847 if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { 1848 speed = cdns3_get_speed(priv_dev); 1849 priv_dev->gadget.speed = speed; 1850 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED); 1851 cdns3_ep0_config(priv_dev); 1852 } 1853 1854 /* Disconnection detected */ 1855 if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) { 1856 spin_unlock(&priv_dev->lock); 1857 cdns3_disconnect_gadget(priv_dev); 1858 spin_lock(&priv_dev->lock); 1859 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 1860 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 1861 cdns3_hw_reset_eps_config(priv_dev); 1862 } 1863 1864 if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { 1865 if (priv_dev->gadget_driver && 1866 priv_dev->gadget_driver->suspend) { 1867 spin_unlock(&priv_dev->lock); 1868 priv_dev->gadget_driver->suspend(&priv_dev->gadget); 1869 spin_lock(&priv_dev->lock); 1870 } 1871 } 1872 1873 if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) { 1874 if (priv_dev->gadget_driver && 1875 priv_dev->gadget_driver->resume) { 1876 spin_unlock(&priv_dev->lock); 1877 priv_dev->gadget_driver->resume(&priv_dev->gadget); 1878 spin_lock(&priv_dev->lock); 1879 } 1880 } 1881 1882 /* reset*/ 1883 if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) { 1884 if (priv_dev->gadget_driver) { 1885 spin_unlock(&priv_dev->lock); 1886 usb_gadget_udc_reset(&priv_dev->gadget, 1887 priv_dev->gadget_driver); 1888 spin_lock(&priv_dev->lock); 1889 1890 /*read again to check the actual speed*/ 1891 speed = cdns3_get_speed(priv_dev); 1892 priv_dev->gadget.speed = speed; 1893 cdns3_hw_reset_eps_config(priv_dev); 1894 cdns3_ep0_config(priv_dev); 1895 } 1896 } 1897} 1898 1899/** 1900 * cdns3_device_irq_handler- interrupt handler for device part of controller 1901 * 1902 * @irq: irq number for cdns3 core device 1903 * @data: structure of cdns3 1904 * 1905 * Returns IRQ_HANDLED or IRQ_NONE 1906 */ 1907static irqreturn_t cdns3_device_irq_handler(int irq, void *data) 1908{ 1909 struct cdns3_device *priv_dev = data; 1910 struct cdns3 *cdns = dev_get_drvdata(priv_dev->dev); 1911 irqreturn_t ret = IRQ_NONE; 1912 u32 reg; 1913 1914 if (cdns->in_lpm) 1915 return ret; 1916 1917 /* check USB device interrupt */ 1918 reg = readl(&priv_dev->regs->usb_ists); 1919 if (reg) { 1920 /* After masking interrupts the new interrupts won't be 1921 * reported in usb_ists/ep_ists. In order to not lose some 1922 * of them driver disables only detected interrupts. 1923 * They will be enabled ASAP after clearing source of 1924 * interrupt. This an unusual behavior only applies to 1925 * usb_ists register. 1926 */ 1927 reg = ~reg & readl(&priv_dev->regs->usb_ien); 1928 /* mask deferred interrupt. */ 1929 writel(reg, &priv_dev->regs->usb_ien); 1930 ret = IRQ_WAKE_THREAD; 1931 } 1932 1933 /* check endpoint interrupt */ 1934 reg = readl(&priv_dev->regs->ep_ists); 1935 if (reg) { 1936 writel(0, &priv_dev->regs->ep_ien); 1937 ret = IRQ_WAKE_THREAD; 1938 } 1939 1940 return ret; 1941} 1942 1943/** 1944 * cdns3_device_thread_irq_handler- interrupt handler for device part 1945 * of controller 1946 * 1947 * @irq: irq number for cdns3 core device 1948 * @data: structure of cdns3 1949 * 1950 * Returns IRQ_HANDLED or IRQ_NONE 1951 */ 1952static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) 1953{ 1954 struct cdns3_device *priv_dev = data; 1955 irqreturn_t ret = IRQ_NONE; 1956 unsigned long flags; 1957 unsigned int bit; 1958 unsigned long reg; 1959 1960 spin_lock_irqsave(&priv_dev->lock, flags); 1961 1962 reg = readl(&priv_dev->regs->usb_ists); 1963 if (reg) { 1964 writel(reg, &priv_dev->regs->usb_ists); 1965 writel(USB_IEN_INIT, &priv_dev->regs->usb_ien); 1966 cdns3_check_usb_interrupt_proceed(priv_dev, reg); 1967 ret = IRQ_HANDLED; 1968 } 1969 1970 reg = readl(&priv_dev->regs->ep_ists); 1971 1972 /* handle default endpoint OUT */ 1973 if (reg & EP_ISTS_EP_OUT0) { 1974 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT); 1975 ret = IRQ_HANDLED; 1976 } 1977 1978 /* handle default endpoint IN */ 1979 if (reg & EP_ISTS_EP_IN0) { 1980 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN); 1981 ret = IRQ_HANDLED; 1982 } 1983 1984 /* check if interrupt from non default endpoint, if no exit */ 1985 reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0); 1986 if (!reg) 1987 goto irqend; 1988 1989 for_each_set_bit(bit, ®, 1990 sizeof(u32) * BITS_PER_BYTE) { 1991 cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]); 1992 ret = IRQ_HANDLED; 1993 } 1994 1995 if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams) 1996 cdns3_wa2_check_outq_status(priv_dev); 1997 1998irqend: 1999 writel(~0, &priv_dev->regs->ep_ien); 2000 spin_unlock_irqrestore(&priv_dev->lock, flags); 2001 2002 return ret; 2003} 2004 2005/** 2006 * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP 2007 * 2008 * The real reservation will occur during write to EP_CFG register, 2009 * this function is used to check if the 'size' reservation is allowed. 2010 * 2011 * @priv_dev: extended gadget object 2012 * @size: the size (KB) for EP would like to allocate 2013 * @is_in: endpoint direction 2014 * 2015 * Return 0 if the required size can met or negative value on failure 2016 */ 2017static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, 2018 int size, int is_in) 2019{ 2020 int remained; 2021 2022 /* 2KB are reserved for EP0*/ 2023 remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2; 2024 2025 if (is_in) { 2026 if (remained < size) 2027 return -EPERM; 2028 2029 priv_dev->onchip_used_size += size; 2030 } else { 2031 int required; 2032 2033 /** 2034 * ALL OUT EPs are shared the same chunk onchip memory, so 2035 * driver checks if it already has assigned enough buffers 2036 */ 2037 if (priv_dev->out_mem_is_allocated >= size) 2038 return 0; 2039 2040 required = size - priv_dev->out_mem_is_allocated; 2041 2042 if (required > remained) 2043 return -EPERM; 2044 2045 priv_dev->out_mem_is_allocated += required; 2046 priv_dev->onchip_used_size += required; 2047 } 2048 2049 return 0; 2050} 2051 2052static void cdns3_configure_dmult(struct cdns3_device *priv_dev, 2053 struct cdns3_endpoint *priv_ep) 2054{ 2055 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 2056 2057 /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */ 2058 if (priv_dev->dev_ver <= DEV_VER_V2) 2059 writel(USB_CONF_DMULT, ®s->usb_conf); 2060 2061 if (priv_dev->dev_ver == DEV_VER_V2) 2062 writel(USB_CONF2_EN_TDL_TRB, ®s->usb_conf2); 2063 2064 if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) { 2065 u32 mask; 2066 2067 if (priv_ep->dir) 2068 mask = BIT(priv_ep->num + 16); 2069 else 2070 mask = BIT(priv_ep->num); 2071 2072 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 2073 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2074 cdns3_set_register_bit(®s->tdl_beh, mask); 2075 cdns3_set_register_bit(®s->tdl_beh2, mask); 2076 cdns3_set_register_bit(®s->dma_adv_td, mask); 2077 } 2078 2079 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2080 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2081 2082 cdns3_set_register_bit(®s->dtrans, mask); 2083 } 2084} 2085 2086/** 2087 * cdns3_ep_config Configure hardware endpoint 2088 * @priv_ep: extended endpoint object 2089 * @enable: set EP_CFG_ENABLE bit in ep_cfg register. 2090 */ 2091int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) 2092{ 2093 bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC); 2094 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2095 u32 bEndpointAddress = priv_ep->num | priv_ep->dir; 2096 u32 max_packet_size = priv_ep->wMaxPacketSize; 2097 u8 maxburst = priv_ep->bMaxBurst; 2098 u32 ep_cfg = 0; 2099 u8 buffering; 2100 int ret; 2101 2102 buffering = priv_dev->ep_buf_size - 1; 2103 2104 cdns3_configure_dmult(priv_dev, priv_ep); 2105 2106 switch (priv_ep->type) { 2107 case USB_ENDPOINT_XFER_INT: 2108 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT); 2109 2110 if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) 2111 ep_cfg |= EP_CFG_TDL_CHK; 2112 break; 2113 case USB_ENDPOINT_XFER_BULK: 2114 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK); 2115 2116 if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) 2117 ep_cfg |= EP_CFG_TDL_CHK; 2118 break; 2119 default: 2120 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); 2121 buffering = (priv_ep->bMaxBurst + 1) * (priv_ep->mult + 1) - 1; 2122 } 2123 2124 switch (priv_dev->gadget.speed) { 2125 case USB_SPEED_FULL: 2126 max_packet_size = is_iso_ep ? 1023 : 64; 2127 break; 2128 case USB_SPEED_HIGH: 2129 max_packet_size = is_iso_ep ? 1024 : 512; 2130 break; 2131 case USB_SPEED_SUPER: 2132 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 2133 max_packet_size = 1024; 2134 maxburst = priv_dev->ep_buf_size - 1; 2135 } 2136 break; 2137 default: 2138 /* all other speed are not supported */ 2139 return -EINVAL; 2140 } 2141 2142 if (max_packet_size == 1024) 2143 priv_ep->trb_burst_size = 128; 2144 else if (max_packet_size >= 512) 2145 priv_ep->trb_burst_size = 64; 2146 else 2147 priv_ep->trb_burst_size = 16; 2148 2149 /* 2150 * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs 2151 * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the 2152 * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI 2153 * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This 2154 * results in data corruption when it crosses the 4K border. The corruption 2155 * specifically occurs from the position (4K - (address & 0x7F)) to 4K. 2156 * 2157 * So force trb_burst_size to 16 at such platform. 2158 */ 2159 if (priv_dev->dev_ver < DEV_VER_V2) 2160 priv_ep->trb_burst_size = 16; 2161 2162 buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); 2163 maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX); 2164 2165 /* onchip buffer is only allocated before configuration */ 2166 if (!priv_dev->hw_configured_flag) { 2167 ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1, 2168 !!priv_ep->dir); 2169 if (ret) { 2170 dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n"); 2171 return ret; 2172 } 2173 } 2174 2175 if (enable) 2176 ep_cfg |= EP_CFG_ENABLE; 2177 2178 if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2179 if (priv_dev->dev_ver >= DEV_VER_V3) { 2180 u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0)); 2181 2182 /* 2183 * Stream capable endpoints are handled by using ep_tdl 2184 * register. Other endpoints use TDL from TRB feature. 2185 */ 2186 cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, 2187 mask); 2188 } 2189 2190 /* Enable Stream Bit TDL chk and SID chk */ 2191 ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK; 2192 } 2193 2194 ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) | 2195 EP_CFG_MULT(priv_ep->mult) | /* must match EP setting */ 2196 EP_CFG_BUFFERING(buffering) | 2197 EP_CFG_MAXBURST(maxburst); 2198 2199 cdns3_select_ep(priv_dev, bEndpointAddress); 2200 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2201 priv_ep->flags |= EP_CONFIGURED; 2202 2203 dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n", 2204 priv_ep->name, ep_cfg); 2205 2206 return 0; 2207} 2208 2209/* Find correct direction for HW endpoint according to description */ 2210static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc, 2211 struct cdns3_endpoint *priv_ep) 2212{ 2213 return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) || 2214 (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc)); 2215} 2216 2217static struct 2218cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev, 2219 struct usb_endpoint_descriptor *desc) 2220{ 2221 struct usb_ep *ep; 2222 struct cdns3_endpoint *priv_ep; 2223 2224 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2225 unsigned long num; 2226 int ret; 2227 /* ep name pattern likes epXin or epXout */ 2228 char c[2] = {ep->name[2], '\0'}; 2229 2230 ret = kstrtoul(c, 10, &num); 2231 if (ret) 2232 return ERR_PTR(ret); 2233 2234 priv_ep = ep_to_cdns3_ep(ep); 2235 if (cdns3_ep_dir_is_correct(desc, priv_ep)) { 2236 if (!(priv_ep->flags & EP_CLAIMED)) { 2237 priv_ep->num = num; 2238 return priv_ep; 2239 } 2240 } 2241 } 2242 2243 return ERR_PTR(-ENOENT); 2244} 2245 2246/* 2247 * Cadence IP has one limitation that all endpoints must be configured 2248 * (Type & MaxPacketSize) before setting configuration through hardware 2249 * register, it means we can't change endpoints configuration after 2250 * set_configuration. 2251 * 2252 * This function set EP_CLAIMED flag which is added when the gadget driver 2253 * uses usb_ep_autoconfig to configure specific endpoint; 2254 * When the udc driver receives set_configurion request, 2255 * it goes through all claimed endpoints, and configure all endpoints 2256 * accordingly. 2257 * 2258 * At usb_ep_ops.enable/disable, we only enable and disable endpoint through 2259 * ep_cfg register which can be changed after set_configuration, and do 2260 * some software operation accordingly. 2261 */ 2262static struct 2263usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, 2264 struct usb_endpoint_descriptor *desc, 2265 struct usb_ss_ep_comp_descriptor *comp_desc) 2266{ 2267 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2268 struct cdns3_endpoint *priv_ep; 2269 unsigned long flags; 2270 2271 priv_ep = cdns3_find_available_ep(priv_dev, desc); 2272 if (IS_ERR(priv_ep)) { 2273 dev_err(priv_dev->dev, "no available ep\n"); 2274 return NULL; 2275 } 2276 2277 dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name); 2278 2279 spin_lock_irqsave(&priv_dev->lock, flags); 2280 priv_ep->endpoint.desc = desc; 2281 priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT; 2282 priv_ep->type = usb_endpoint_type(desc); 2283 priv_ep->flags |= EP_CLAIMED; 2284 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2285 priv_ep->wMaxPacketSize = usb_endpoint_maxp(desc); 2286 priv_ep->mult = USB_EP_MAXP_MULT(priv_ep->wMaxPacketSize); 2287 priv_ep->wMaxPacketSize &= USB_ENDPOINT_MAXP_MASK; 2288 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && comp_desc) { 2289 priv_ep->mult = USB_SS_MULT(comp_desc->bmAttributes) - 1; 2290 priv_ep->bMaxBurst = comp_desc->bMaxBurst; 2291 } 2292 2293 spin_unlock_irqrestore(&priv_dev->lock, flags); 2294 return &priv_ep->endpoint; 2295} 2296 2297/** 2298 * cdns3_gadget_ep_alloc_request Allocates request 2299 * @ep: endpoint object associated with request 2300 * @gfp_flags: gfp flags 2301 * 2302 * Returns allocated request address, NULL on allocation error 2303 */ 2304struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, 2305 gfp_t gfp_flags) 2306{ 2307 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2308 struct cdns3_request *priv_req; 2309 2310 priv_req = kzalloc(sizeof(*priv_req), gfp_flags); 2311 if (!priv_req) 2312 return NULL; 2313 2314 priv_req->priv_ep = priv_ep; 2315 2316 trace_cdns3_alloc_request(priv_req); 2317 return &priv_req->request; 2318} 2319 2320/** 2321 * cdns3_gadget_ep_free_request Free memory occupied by request 2322 * @ep: endpoint object associated with request 2323 * @request: request to free memory 2324 */ 2325void cdns3_gadget_ep_free_request(struct usb_ep *ep, 2326 struct usb_request *request) 2327{ 2328 struct cdns3_request *priv_req = to_cdns3_request(request); 2329 2330 if (priv_req->aligned_buf) 2331 priv_req->aligned_buf->in_use = 0; 2332 2333 trace_cdns3_free_request(priv_req); 2334 kfree(priv_req); 2335} 2336 2337/** 2338 * cdns3_gadget_ep_enable Enable endpoint 2339 * @ep: endpoint object 2340 * @desc: endpoint descriptor 2341 * 2342 * Returns 0 on success, error code elsewhere 2343 */ 2344static int cdns3_gadget_ep_enable(struct usb_ep *ep, 2345 const struct usb_endpoint_descriptor *desc) 2346{ 2347 struct cdns3_endpoint *priv_ep; 2348 struct cdns3_device *priv_dev; 2349 const struct usb_ss_ep_comp_descriptor *comp_desc; 2350 u32 reg = EP_STS_EN_TRBERREN; 2351 u32 bEndpointAddress; 2352 unsigned long flags; 2353 int enable = 1; 2354 int ret = 0; 2355 int val; 2356 2357 if (!ep) { 2358 pr_debug("usbss: ep not configured?\n"); 2359 return -EINVAL; 2360 } 2361 2362 priv_ep = ep_to_cdns3_ep(ep); 2363 priv_dev = priv_ep->cdns3_dev; 2364 comp_desc = priv_ep->endpoint.comp_desc; 2365 2366 if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 2367 dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); 2368 return -EINVAL; 2369 } 2370 2371 if (!desc->wMaxPacketSize) { 2372 dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n"); 2373 return -EINVAL; 2374 } 2375 2376 if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED, 2377 "%s is already enabled\n", priv_ep->name)) 2378 return 0; 2379 2380 spin_lock_irqsave(&priv_dev->lock, flags); 2381 2382 priv_ep->endpoint.desc = desc; 2383 priv_ep->type = usb_endpoint_type(desc); 2384 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2385 2386 if (priv_ep->interval > ISO_MAX_INTERVAL && 2387 priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2388 dev_err(priv_dev->dev, "Driver is limited to %d period\n", 2389 ISO_MAX_INTERVAL); 2390 2391 ret = -EINVAL; 2392 goto exit; 2393 } 2394 2395 bEndpointAddress = priv_ep->num | priv_ep->dir; 2396 cdns3_select_ep(priv_dev, bEndpointAddress); 2397 2398 /* 2399 * For some versions of controller at some point during ISO OUT traffic 2400 * DMA reads Transfer Ring for the EP which has never got doorbell. 2401 * This issue was detected only on simulation, but to avoid this issue 2402 * driver add protection against it. To fix it driver enable ISO OUT 2403 * endpoint before setting DRBL. This special treatment of ISO OUT 2404 * endpoints are recommended by controller specification. 2405 */ 2406 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2407 enable = 0; 2408 2409 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 2410 /* 2411 * Enable stream support (SS mode) related interrupts 2412 * in EP_STS_EN Register 2413 */ 2414 if (priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2415 reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN | 2416 EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN | 2417 EP_STS_EN_STREAMREN; 2418 priv_ep->use_streams = true; 2419 ret = cdns3_ep_config(priv_ep, enable); 2420 priv_dev->using_streams |= true; 2421 } 2422 } else { 2423 ret = cdns3_ep_config(priv_ep, enable); 2424 } 2425 2426 if (ret) 2427 goto exit; 2428 2429 ret = cdns3_allocate_trb_pool(priv_ep); 2430 if (ret) 2431 goto exit; 2432 2433 bEndpointAddress = priv_ep->num | priv_ep->dir; 2434 cdns3_select_ep(priv_dev, bEndpointAddress); 2435 2436 trace_cdns3_gadget_ep_enable(priv_ep); 2437 2438 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2439 2440 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2441 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2442 1, 1000); 2443 2444 if (unlikely(ret)) { 2445 cdns3_free_trb_pool(priv_ep); 2446 ret = -EINVAL; 2447 goto exit; 2448 } 2449 2450 /* enable interrupt for selected endpoint */ 2451 cdns3_set_register_bit(&priv_dev->regs->ep_ien, 2452 BIT(cdns3_ep_addr_to_index(bEndpointAddress))); 2453 2454 if (priv_dev->dev_ver < DEV_VER_V2) 2455 cdns3_wa2_enable_detection(priv_dev, priv_ep, reg); 2456 2457 writel(reg, &priv_dev->regs->ep_sts_en); 2458 2459 ep->desc = desc; 2460 priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING | 2461 EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN); 2462 priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR; 2463 priv_ep->wa1_set = 0; 2464 priv_ep->enqueue = 0; 2465 priv_ep->dequeue = 0; 2466 reg = readl(&priv_dev->regs->ep_sts); 2467 priv_ep->pcs = !!EP_STS_CCS(reg); 2468 priv_ep->ccs = !!EP_STS_CCS(reg); 2469 /* one TRB is reserved for link TRB used in DMULT mode*/ 2470 priv_ep->free_trbs = priv_ep->num_trbs - 1; 2471exit: 2472 spin_unlock_irqrestore(&priv_dev->lock, flags); 2473 2474 return ret; 2475} 2476 2477/** 2478 * cdns3_gadget_ep_disable Disable endpoint 2479 * @ep: endpoint object 2480 * 2481 * Returns 0 on success, error code elsewhere 2482 */ 2483static int cdns3_gadget_ep_disable(struct usb_ep *ep) 2484{ 2485 struct cdns3_endpoint *priv_ep; 2486 struct cdns3_request *priv_req; 2487 struct cdns3_device *priv_dev; 2488 struct usb_request *request; 2489 unsigned long flags; 2490 int ret = 0; 2491 u32 ep_cfg; 2492 int val; 2493 2494 if (!ep) { 2495 pr_err("usbss: invalid parameters\n"); 2496 return -EINVAL; 2497 } 2498 2499 priv_ep = ep_to_cdns3_ep(ep); 2500 priv_dev = priv_ep->cdns3_dev; 2501 2502 if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED), 2503 "%s is already disabled\n", priv_ep->name)) 2504 return 0; 2505 2506 spin_lock_irqsave(&priv_dev->lock, flags); 2507 2508 trace_cdns3_gadget_ep_disable(priv_ep); 2509 2510 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2511 2512 ep_cfg = readl(&priv_dev->regs->ep_cfg); 2513 ep_cfg &= ~EP_CFG_ENABLE; 2514 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2515 2516 /** 2517 * Driver needs some time before resetting endpoint. 2518 * It need waits for clearing DBUSY bit or for timeout expired. 2519 * 10us is enough time for controller to stop transfer. 2520 */ 2521 readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val, 2522 !(val & EP_STS_DBUSY), 1, 10); 2523 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2524 2525 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2526 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2527 1, 1000); 2528 if (unlikely(ret)) 2529 dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n", 2530 priv_ep->name); 2531 2532 while (!list_empty(&priv_ep->pending_req_list)) { 2533 request = cdns3_next_request(&priv_ep->pending_req_list); 2534 2535 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2536 -ESHUTDOWN); 2537 } 2538 2539 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 2540 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 2541 2542 kfree(priv_req->request.buf); 2543 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 2544 &priv_req->request); 2545 list_del_init(&priv_req->list); 2546 --priv_ep->wa2_counter; 2547 } 2548 2549 while (!list_empty(&priv_ep->deferred_req_list)) { 2550 request = cdns3_next_request(&priv_ep->deferred_req_list); 2551 2552 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2553 -ESHUTDOWN); 2554 } 2555 2556 priv_ep->descmis_req = NULL; 2557 2558 ep->desc = NULL; 2559 priv_ep->flags &= ~EP_ENABLED; 2560 priv_ep->use_streams = false; 2561 2562 spin_unlock_irqrestore(&priv_dev->lock, flags); 2563 2564 return ret; 2565} 2566 2567/** 2568 * cdns3_gadget_ep_queue Transfer data on endpoint 2569 * @ep: endpoint object 2570 * @request: request object 2571 * @gfp_flags: gfp flags 2572 * 2573 * Returns 0 on success, error code elsewhere 2574 */ 2575static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 2576 struct usb_request *request, 2577 gfp_t gfp_flags) 2578{ 2579 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2580 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2581 struct cdns3_request *priv_req; 2582 int ret = 0; 2583 2584 request->actual = 0; 2585 request->status = -EINPROGRESS; 2586 priv_req = to_cdns3_request(request); 2587 trace_cdns3_ep_queue(priv_req); 2588 2589 if (priv_dev->dev_ver < DEV_VER_V2) { 2590 ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep, 2591 priv_req); 2592 2593 if (ret == EINPROGRESS) 2594 return 0; 2595 } 2596 2597 ret = cdns3_prepare_aligned_request_buf(priv_req); 2598 if (ret < 0) 2599 return ret; 2600 2601 ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request, 2602 usb_endpoint_dir_in(ep->desc)); 2603 if (ret) 2604 return ret; 2605 2606 list_add_tail(&request->list, &priv_ep->deferred_req_list); 2607 2608 /* 2609 * For stream capable endpoint if prime irq flag is set then only start 2610 * request. 2611 * If hardware endpoint configuration has not been set yet then 2612 * just queue request in deferred list. Transfer will be started in 2613 * cdns3_set_hw_configuration. 2614 */ 2615 if (!request->stream_id) { 2616 if (priv_dev->hw_configured_flag && 2617 !(priv_ep->flags & EP_STALLED) && 2618 !(priv_ep->flags & EP_STALL_PENDING)) 2619 cdns3_start_all_request(priv_dev, priv_ep); 2620 } else { 2621 if (priv_dev->hw_configured_flag && priv_ep->prime_flag) 2622 cdns3_start_all_request(priv_dev, priv_ep); 2623 } 2624 2625 return 0; 2626} 2627 2628static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 2629 gfp_t gfp_flags) 2630{ 2631 struct usb_request *zlp_request; 2632 struct cdns3_endpoint *priv_ep; 2633 struct cdns3_device *priv_dev; 2634 unsigned long flags; 2635 int ret; 2636 2637 if (!request || !ep) 2638 return -EINVAL; 2639 2640 priv_ep = ep_to_cdns3_ep(ep); 2641 priv_dev = priv_ep->cdns3_dev; 2642 2643 spin_lock_irqsave(&priv_dev->lock, flags); 2644 2645 ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags); 2646 2647 if (ret == 0 && request->zero && request->length && 2648 (request->length % ep->maxpacket == 0)) { 2649 struct cdns3_request *priv_req; 2650 2651 zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 2652 zlp_request->buf = priv_dev->zlp_buf; 2653 zlp_request->length = 0; 2654 2655 priv_req = to_cdns3_request(zlp_request); 2656 priv_req->flags |= REQUEST_ZLP; 2657 2658 dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n", 2659 priv_ep->name); 2660 ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags); 2661 } 2662 2663 spin_unlock_irqrestore(&priv_dev->lock, flags); 2664 return ret; 2665} 2666 2667/** 2668 * cdns3_gadget_ep_dequeue Remove request from transfer queue 2669 * @ep: endpoint object associated with request 2670 * @request: request object 2671 * 2672 * Returns 0 on success, error code elsewhere 2673 */ 2674int cdns3_gadget_ep_dequeue(struct usb_ep *ep, 2675 struct usb_request *request) 2676{ 2677 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2678 struct cdns3_device *priv_dev; 2679 struct usb_request *req, *req_temp; 2680 struct cdns3_request *priv_req; 2681 struct cdns3_trb *link_trb; 2682 u8 req_on_hw_ring = 0; 2683 unsigned long flags; 2684 int ret = 0; 2685 2686 if (!ep || !request || !ep->desc) 2687 return -EINVAL; 2688 2689 priv_dev = priv_ep->cdns3_dev; 2690 2691 spin_lock_irqsave(&priv_dev->lock, flags); 2692 2693 priv_req = to_cdns3_request(request); 2694 2695 trace_cdns3_ep_dequeue(priv_req); 2696 2697 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2698 2699 list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list, 2700 list) { 2701 if (request == req) { 2702 req_on_hw_ring = 1; 2703 goto found; 2704 } 2705 } 2706 2707 list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list, 2708 list) { 2709 if (request == req) 2710 goto found; 2711 } 2712 2713 goto not_found; 2714 2715found: 2716 link_trb = priv_req->trb; 2717 2718 /* Update ring only if removed request is on pending_req_list list */ 2719 if (req_on_hw_ring && link_trb) { 2720 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma + 2721 ((priv_req->end_trb + 1) * TRB_SIZE))); 2722 link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | 2723 TRB_TYPE(TRB_LINK) | TRB_CHAIN); 2724 2725 if (priv_ep->wa1_trb == priv_req->trb) 2726 cdns3_wa1_restore_cycle_bit(priv_ep); 2727 } 2728 2729 cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); 2730 2731not_found: 2732 spin_unlock_irqrestore(&priv_dev->lock, flags); 2733 return ret; 2734} 2735 2736/** 2737 * __cdns3_gadget_ep_set_halt Sets stall on selected endpoint 2738 * Should be called after acquiring spin_lock and selecting ep 2739 * @priv_ep: endpoint object to set stall on. 2740 */ 2741void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep) 2742{ 2743 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2744 2745 trace_cdns3_halt(priv_ep, 1, 0); 2746 2747 if (!(priv_ep->flags & EP_STALLED)) { 2748 u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 2749 2750 if (!(ep_sts_reg & EP_STS_DBUSY)) 2751 cdns3_ep_stall_flush(priv_ep); 2752 else 2753 priv_ep->flags |= EP_STALL_PENDING; 2754 } 2755} 2756 2757/** 2758 * __cdns3_gadget_ep_clear_halt Clears stall on selected endpoint 2759 * Should be called after acquiring spin_lock and selecting ep 2760 * @priv_ep: endpoint object to clear stall on 2761 */ 2762int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) 2763{ 2764 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2765 struct usb_request *request; 2766 struct cdns3_request *priv_req; 2767 struct cdns3_trb *trb = NULL; 2768 struct cdns3_trb trb_tmp; 2769 int ret; 2770 int val; 2771 2772 trace_cdns3_halt(priv_ep, 0, 0); 2773 2774 request = cdns3_next_request(&priv_ep->pending_req_list); 2775 if (request) { 2776 priv_req = to_cdns3_request(request); 2777 trb = priv_req->trb; 2778 if (trb) { 2779 trb_tmp = *trb; 2780 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2781 } 2782 } 2783 2784 writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2785 2786 /* wait for EPRST cleared */ 2787 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2788 !(val & EP_CMD_EPRST), 1, 100); 2789 if (ret) 2790 return -EINVAL; 2791 2792 priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); 2793 2794 if (request) { 2795 if (trb) 2796 *trb = trb_tmp; 2797 2798 cdns3_rearm_transfer(priv_ep, 1); 2799 } 2800 2801 cdns3_start_all_request(priv_dev, priv_ep); 2802 return ret; 2803} 2804 2805/** 2806 * cdns3_gadget_ep_set_halt Sets/clears stall on selected endpoint 2807 * @ep: endpoint object to set/clear stall on 2808 * @value: 1 for set stall, 0 for clear stall 2809 * 2810 * Returns 0 on success, error code elsewhere 2811 */ 2812int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value) 2813{ 2814 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2815 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2816 unsigned long flags; 2817 int ret = 0; 2818 2819 if (!(priv_ep->flags & EP_ENABLED)) 2820 return -EPERM; 2821 2822 spin_lock_irqsave(&priv_dev->lock, flags); 2823 2824 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2825 2826 if (!value) { 2827 priv_ep->flags &= ~EP_WEDGE; 2828 ret = __cdns3_gadget_ep_clear_halt(priv_ep); 2829 } else { 2830 __cdns3_gadget_ep_set_halt(priv_ep); 2831 } 2832 2833 spin_unlock_irqrestore(&priv_dev->lock, flags); 2834 2835 return ret; 2836} 2837 2838extern const struct usb_ep_ops cdns3_gadget_ep0_ops; 2839 2840static const struct usb_ep_ops cdns3_gadget_ep_ops = { 2841 .enable = cdns3_gadget_ep_enable, 2842 .disable = cdns3_gadget_ep_disable, 2843 .alloc_request = cdns3_gadget_ep_alloc_request, 2844 .free_request = cdns3_gadget_ep_free_request, 2845 .queue = cdns3_gadget_ep_queue, 2846 .dequeue = cdns3_gadget_ep_dequeue, 2847 .set_halt = cdns3_gadget_ep_set_halt, 2848 .set_wedge = cdns3_gadget_ep_set_wedge, 2849}; 2850 2851/** 2852 * cdns3_gadget_get_frame Returns number of actual ITP frame 2853 * @gadget: gadget object 2854 * 2855 * Returns number of actual ITP frame 2856 */ 2857static int cdns3_gadget_get_frame(struct usb_gadget *gadget) 2858{ 2859 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2860 2861 return readl(&priv_dev->regs->usb_itpn); 2862} 2863 2864int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev) 2865{ 2866 enum usb_device_speed speed; 2867 2868 speed = cdns3_get_speed(priv_dev); 2869 2870 if (speed >= USB_SPEED_SUPER) 2871 return 0; 2872 2873 /* Start driving resume signaling to indicate remote wakeup. */ 2874 writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf); 2875 2876 return 0; 2877} 2878 2879static int cdns3_gadget_wakeup(struct usb_gadget *gadget) 2880{ 2881 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2882 unsigned long flags; 2883 int ret = 0; 2884 2885 spin_lock_irqsave(&priv_dev->lock, flags); 2886 ret = __cdns3_gadget_wakeup(priv_dev); 2887 spin_unlock_irqrestore(&priv_dev->lock, flags); 2888 return ret; 2889} 2890 2891static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget, 2892 int is_selfpowered) 2893{ 2894 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2895 unsigned long flags; 2896 2897 spin_lock_irqsave(&priv_dev->lock, flags); 2898 priv_dev->is_selfpowered = !!is_selfpowered; 2899 spin_unlock_irqrestore(&priv_dev->lock, flags); 2900 return 0; 2901} 2902 2903static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on) 2904{ 2905 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2906 2907 if (is_on) { 2908 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 2909 } else { 2910 writel(~0, &priv_dev->regs->ep_ists); 2911 writel(~0, &priv_dev->regs->usb_ists); 2912 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2913 } 2914 2915 return 0; 2916} 2917 2918static void cdns3_gadget_config(struct cdns3_device *priv_dev) 2919{ 2920 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 2921 u32 reg; 2922 2923 cdns3_ep0_config(priv_dev); 2924 2925 /* enable interrupts for endpoint 0 (in and out) */ 2926 writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, ®s->ep_ien); 2927 2928 /* 2929 * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1 2930 * revision of controller. 2931 */ 2932 if (priv_dev->dev_ver == DEV_VER_TI_V1) { 2933 reg = readl(®s->dbg_link1); 2934 2935 reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK; 2936 reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) | 2937 DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET; 2938 writel(reg, ®s->dbg_link1); 2939 } 2940 2941 /* 2942 * By default some platforms has set protected access to memory. 2943 * This cause problem with cache, so driver restore non-secure 2944 * access to memory. 2945 */ 2946 reg = readl(®s->dma_axi_ctrl); 2947 reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) | 2948 DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE); 2949 writel(reg, ®s->dma_axi_ctrl); 2950 2951 /* enable generic interrupt*/ 2952 writel(USB_IEN_INIT, ®s->usb_ien); 2953 writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); 2954 /* keep Fast Access bit */ 2955 writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr); 2956 2957 cdns3_configure_dmult(priv_dev, NULL); 2958} 2959 2960/** 2961 * cdns3_gadget_udc_start Gadget start 2962 * @gadget: gadget object 2963 * @driver: driver which operates on this gadget 2964 * 2965 * Returns 0 on success, error code elsewhere 2966 */ 2967static int cdns3_gadget_udc_start(struct usb_gadget *gadget, 2968 struct usb_gadget_driver *driver) 2969{ 2970 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2971 unsigned long flags; 2972 enum usb_device_speed max_speed = driver->max_speed; 2973 2974 spin_lock_irqsave(&priv_dev->lock, flags); 2975 priv_dev->gadget_driver = driver; 2976 2977 /* limit speed if necessary */ 2978 max_speed = min(driver->max_speed, gadget->max_speed); 2979 2980 switch (max_speed) { 2981 case USB_SPEED_FULL: 2982 writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); 2983 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2984 break; 2985 case USB_SPEED_HIGH: 2986 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2987 break; 2988 case USB_SPEED_SUPER: 2989 break; 2990 default: 2991 dev_err(priv_dev->dev, 2992 "invalid maximum_speed parameter %d\n", 2993 max_speed); 2994 fallthrough; 2995 case USB_SPEED_UNKNOWN: 2996 /* default to superspeed */ 2997 max_speed = USB_SPEED_SUPER; 2998 break; 2999 } 3000 3001 cdns3_gadget_config(priv_dev); 3002 spin_unlock_irqrestore(&priv_dev->lock, flags); 3003 return 0; 3004} 3005 3006/** 3007 * cdns3_gadget_udc_stop Stops gadget 3008 * @gadget: gadget object 3009 * 3010 * Returns 0 3011 */ 3012static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) 3013{ 3014 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 3015 struct cdns3_endpoint *priv_ep; 3016 u32 bEndpointAddress; 3017 struct usb_ep *ep; 3018 int val; 3019 3020 priv_dev->gadget_driver = NULL; 3021 3022 priv_dev->onchip_used_size = 0; 3023 priv_dev->out_mem_is_allocated = 0; 3024 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3025 3026 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 3027 priv_ep = ep_to_cdns3_ep(ep); 3028 bEndpointAddress = priv_ep->num | priv_ep->dir; 3029 cdns3_select_ep(priv_dev, bEndpointAddress); 3030 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 3031 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 3032 !(val & EP_CMD_EPRST), 1, 100); 3033 3034 priv_ep->flags &= ~EP_CLAIMED; 3035 } 3036 3037 /* disable interrupt for device */ 3038 writel(0, &priv_dev->regs->usb_ien); 3039 writel(0, &priv_dev->regs->usb_pwr); 3040 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 3041 3042 return 0; 3043} 3044 3045/** 3046 * cdns3_gadget_check_config - ensure cdns3 can support the USB configuration 3047 * @gadget: pointer to the USB gadget 3048 * 3049 * Used to record the maximum number of endpoints being used in a USB composite 3050 * device. (across all configurations) This is to be used in the calculation 3051 * of the TXFIFO sizes when resizing internal memory for individual endpoints. 3052 * It will help ensured that the resizing logic reserves enough space for at 3053 * least one max packet. 3054 */ 3055static int cdns3_gadget_check_config(struct usb_gadget *gadget) 3056{ 3057 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 3058 struct cdns3_endpoint *priv_ep; 3059 struct usb_ep *ep; 3060 int n_in = 0; 3061 int iso = 0; 3062 int out = 1; 3063 int total; 3064 int n; 3065 3066 list_for_each_entry(ep, &gadget->ep_list, ep_list) { 3067 priv_ep = ep_to_cdns3_ep(ep); 3068 if (!(priv_ep->flags & EP_CLAIMED)) 3069 continue; 3070 3071 n = (priv_ep->mult + 1) * (priv_ep->bMaxBurst + 1); 3072 if (ep->address & USB_DIR_IN) { 3073 /* 3074 * ISO transfer: DMA start move data when get ISO, only transfer 3075 * data as min(TD size, iso). No benefit for allocate bigger 3076 * internal memory than 'iso'. 3077 */ 3078 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) 3079 iso += n; 3080 else 3081 n_in++; 3082 } else { 3083 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) 3084 out = max_t(int, out, n); 3085 } 3086 } 3087 3088 /* 2KB are reserved for EP0, 1KB for out*/ 3089 total = 2 + n_in + out + iso; 3090 3091 if (total > priv_dev->onchip_buffers) 3092 return -ENOMEM; 3093 3094 priv_dev->ep_buf_size = (priv_dev->onchip_buffers - 2 - iso) / (n_in + out); 3095 3096 return 0; 3097} 3098 3099static const struct usb_gadget_ops cdns3_gadget_ops = { 3100 .get_frame = cdns3_gadget_get_frame, 3101 .wakeup = cdns3_gadget_wakeup, 3102 .set_selfpowered = cdns3_gadget_set_selfpowered, 3103 .pullup = cdns3_gadget_pullup, 3104 .udc_start = cdns3_gadget_udc_start, 3105 .udc_stop = cdns3_gadget_udc_stop, 3106 .match_ep = cdns3_gadget_match_ep, 3107 .check_config = cdns3_gadget_check_config, 3108}; 3109 3110static void cdns3_free_all_eps(struct cdns3_device *priv_dev) 3111{ 3112 int i; 3113 3114 /* ep0 OUT point to ep0 IN. */ 3115 priv_dev->eps[16] = NULL; 3116 3117 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 3118 if (priv_dev->eps[i]) { 3119 cdns3_free_trb_pool(priv_dev->eps[i]); 3120 devm_kfree(priv_dev->dev, priv_dev->eps[i]); 3121 } 3122} 3123 3124/** 3125 * cdns3_init_eps Initializes software endpoints of gadget 3126 * @priv_dev: extended gadget object 3127 * 3128 * Returns 0 on success, error code elsewhere 3129 */ 3130static int cdns3_init_eps(struct cdns3_device *priv_dev) 3131{ 3132 u32 ep_enabled_reg, iso_ep_reg; 3133 struct cdns3_endpoint *priv_ep; 3134 int ep_dir, ep_number; 3135 u32 ep_mask; 3136 int ret = 0; 3137 int i; 3138 3139 /* Read it from USB_CAP3 to USB_CAP5 */ 3140 ep_enabled_reg = readl(&priv_dev->regs->usb_cap3); 3141 iso_ep_reg = readl(&priv_dev->regs->usb_cap4); 3142 3143 dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n"); 3144 3145 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { 3146 ep_dir = i >> 4; /* i div 16 */ 3147 ep_number = i & 0xF; /* i % 16 */ 3148 ep_mask = BIT(i); 3149 3150 if (!(ep_enabled_reg & ep_mask)) 3151 continue; 3152 3153 if (ep_dir && !ep_number) { 3154 priv_dev->eps[i] = priv_dev->eps[0]; 3155 continue; 3156 } 3157 3158 priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep), 3159 GFP_KERNEL); 3160 if (!priv_ep) 3161 goto err; 3162 3163 /* set parent of endpoint object */ 3164 priv_ep->cdns3_dev = priv_dev; 3165 priv_dev->eps[i] = priv_ep; 3166 priv_ep->num = ep_number; 3167 priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT; 3168 3169 if (!ep_number) { 3170 ret = cdns3_init_ep0(priv_dev, priv_ep); 3171 if (ret) { 3172 dev_err(priv_dev->dev, "Failed to init ep0\n"); 3173 goto err; 3174 } 3175 } else { 3176 snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s", 3177 ep_number, !!ep_dir ? "in" : "out"); 3178 priv_ep->endpoint.name = priv_ep->name; 3179 3180 usb_ep_set_maxpacket_limit(&priv_ep->endpoint, 3181 CDNS3_EP_MAX_PACKET_LIMIT); 3182 priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS; 3183 priv_ep->endpoint.ops = &cdns3_gadget_ep_ops; 3184 if (ep_dir) 3185 priv_ep->endpoint.caps.dir_in = 1; 3186 else 3187 priv_ep->endpoint.caps.dir_out = 1; 3188 3189 if (iso_ep_reg & ep_mask) 3190 priv_ep->endpoint.caps.type_iso = 1; 3191 3192 priv_ep->endpoint.caps.type_bulk = 1; 3193 priv_ep->endpoint.caps.type_int = 1; 3194 3195 list_add_tail(&priv_ep->endpoint.ep_list, 3196 &priv_dev->gadget.ep_list); 3197 } 3198 3199 priv_ep->flags = 0; 3200 3201 dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n", 3202 priv_ep->name, 3203 priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "", 3204 priv_ep->endpoint.caps.type_iso ? "ISO" : ""); 3205 3206 INIT_LIST_HEAD(&priv_ep->pending_req_list); 3207 INIT_LIST_HEAD(&priv_ep->deferred_req_list); 3208 INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list); 3209 } 3210 3211 return 0; 3212err: 3213 cdns3_free_all_eps(priv_dev); 3214 return -ENOMEM; 3215} 3216 3217static void cdns3_gadget_release(struct device *dev) 3218{ 3219 struct cdns3_device *priv_dev = container_of(dev, 3220 struct cdns3_device, gadget.dev); 3221 3222 kfree(priv_dev); 3223} 3224 3225void cdns3_gadget_exit(struct cdns3 *cdns) 3226{ 3227 struct cdns3_device *priv_dev; 3228 3229 priv_dev = cdns->gadget_dev; 3230 3231 3232 pm_runtime_mark_last_busy(cdns->dev); 3233 pm_runtime_put_autosuspend(cdns->dev); 3234 3235 usb_del_gadget(&priv_dev->gadget); 3236 devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); 3237 3238 cdns3_free_all_eps(priv_dev); 3239 3240 while (!list_empty(&priv_dev->aligned_buf_list)) { 3241 struct cdns3_aligned_buf *buf; 3242 3243 buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list); 3244 dma_free_coherent(priv_dev->sysdev, buf->size, 3245 buf->buf, 3246 buf->dma); 3247 3248 list_del(&buf->list); 3249 kfree(buf); 3250 } 3251 3252 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3253 priv_dev->setup_dma); 3254 3255 kfree(priv_dev->zlp_buf); 3256 usb_put_gadget(&priv_dev->gadget); 3257 cdns->gadget_dev = NULL; 3258 cdns3_drd_gadget_off(cdns); 3259} 3260 3261static int cdns3_gadget_start(struct cdns3 *cdns) 3262{ 3263 struct cdns3_device *priv_dev; 3264 u32 max_speed; 3265 int ret; 3266 3267 priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL); 3268 if (!priv_dev) 3269 return -ENOMEM; 3270 3271 usb_initialize_gadget(cdns->dev, &priv_dev->gadget, 3272 cdns3_gadget_release); 3273 cdns->gadget_dev = priv_dev; 3274 priv_dev->sysdev = cdns->dev; 3275 priv_dev->dev = cdns->dev; 3276 priv_dev->regs = cdns->dev_regs; 3277 3278 device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size", 3279 &priv_dev->onchip_buffers); 3280 3281 if (priv_dev->onchip_buffers <= 0) { 3282 u32 reg = readl(&priv_dev->regs->usb_cap2); 3283 3284 priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg); 3285 } 3286 3287 if (!priv_dev->onchip_buffers) 3288 priv_dev->onchip_buffers = 256; 3289 3290 max_speed = usb_get_maximum_speed(cdns->dev); 3291 3292 /* Check the maximum_speed parameter */ 3293 switch (max_speed) { 3294 case USB_SPEED_FULL: 3295 case USB_SPEED_HIGH: 3296 case USB_SPEED_SUPER: 3297 break; 3298 default: 3299 dev_err(cdns->dev, "invalid maximum_speed parameter %d\n", 3300 max_speed); 3301 fallthrough; 3302 case USB_SPEED_UNKNOWN: 3303 /* default to superspeed */ 3304 max_speed = USB_SPEED_SUPER; 3305 break; 3306 } 3307 3308 /* fill gadget fields */ 3309 priv_dev->gadget.max_speed = max_speed; 3310 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3311 priv_dev->gadget.ops = &cdns3_gadget_ops; 3312 priv_dev->gadget.name = "usb-ss-gadget"; 3313 priv_dev->gadget.quirk_avoids_skb_reserve = 1; 3314 priv_dev->gadget.irq = cdns->dev_irq; 3315 3316 spin_lock_init(&priv_dev->lock); 3317 INIT_WORK(&priv_dev->pending_status_wq, 3318 cdns3_pending_setup_status_handler); 3319 3320 INIT_WORK(&priv_dev->aligned_buf_wq, 3321 cdns3_free_aligned_request_buf); 3322 3323 /* initialize endpoint container */ 3324 INIT_LIST_HEAD(&priv_dev->gadget.ep_list); 3325 INIT_LIST_HEAD(&priv_dev->aligned_buf_list); 3326 3327 ret = cdns3_init_eps(priv_dev); 3328 if (ret) { 3329 dev_err(priv_dev->dev, "Failed to create endpoints\n"); 3330 goto err1; 3331 } 3332 3333 /* allocate memory for setup packet buffer */ 3334 priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8, 3335 &priv_dev->setup_dma, GFP_DMA); 3336 if (!priv_dev->setup_buf) { 3337 ret = -ENOMEM; 3338 goto err2; 3339 } 3340 3341 priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); 3342 3343 dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", 3344 readl(&priv_dev->regs->usb_cap6)); 3345 dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n", 3346 readl(&priv_dev->regs->usb_cap1)); 3347 dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n", 3348 readl(&priv_dev->regs->usb_cap2)); 3349 3350 priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver); 3351 if (priv_dev->dev_ver >= DEV_VER_V2) 3352 priv_dev->gadget.sg_supported = 1; 3353 3354 priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL); 3355 if (!priv_dev->zlp_buf) { 3356 ret = -ENOMEM; 3357 goto err3; 3358 } 3359 3360 /* add USB gadget device */ 3361 ret = usb_add_gadget(&priv_dev->gadget); 3362 if (ret < 0) { 3363 dev_err(priv_dev->dev, "Failed to add gadget\n"); 3364 goto err4; 3365 } 3366 3367 return 0; 3368err4: 3369 kfree(priv_dev->zlp_buf); 3370err3: 3371 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3372 priv_dev->setup_dma); 3373err2: 3374 cdns3_free_all_eps(priv_dev); 3375err1: 3376 usb_put_gadget(&priv_dev->gadget); 3377 cdns->gadget_dev = NULL; 3378 return ret; 3379} 3380 3381static int __cdns3_gadget_init(struct cdns3 *cdns) 3382{ 3383 int ret = 0; 3384 3385 /* Ensure 32-bit DMA Mask in case we switched back from Host mode */ 3386 ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32)); 3387 if (ret) { 3388 dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret); 3389 return ret; 3390 } 3391 3392 cdns3_drd_gadget_on(cdns); 3393 pm_runtime_get_sync(cdns->dev); 3394 3395 ret = cdns3_gadget_start(cdns); 3396 if (ret) { 3397 pm_runtime_put_sync(cdns->dev); 3398 return ret; 3399 } 3400 3401 /* 3402 * Because interrupt line can be shared with other components in 3403 * driver it can't use IRQF_ONESHOT flag here. 3404 */ 3405 ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, 3406 cdns3_device_irq_handler, 3407 cdns3_device_thread_irq_handler, 3408 IRQF_SHARED, dev_name(cdns->dev), 3409 cdns->gadget_dev); 3410 3411 if (ret) 3412 goto err0; 3413 3414 return 0; 3415err0: 3416 cdns3_gadget_exit(cdns); 3417 return ret; 3418} 3419 3420static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup) 3421__must_hold(&cdns->lock) 3422{ 3423 struct cdns3_device *priv_dev = cdns->gadget_dev; 3424 3425 spin_unlock(&cdns->lock); 3426 cdns3_disconnect_gadget(priv_dev); 3427 spin_lock(&cdns->lock); 3428 3429 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3430 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 3431 cdns3_hw_reset_eps_config(priv_dev); 3432 3433 /* disable interrupt for device */ 3434 writel(0, &priv_dev->regs->usb_ien); 3435 3436 return 0; 3437} 3438 3439static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated) 3440{ 3441 struct cdns3_device *priv_dev = cdns->gadget_dev; 3442 3443 if (!priv_dev->gadget_driver) 3444 return 0; 3445 3446 cdns3_gadget_config(priv_dev); 3447 3448 return 0; 3449} 3450 3451/** 3452 * cdns3_gadget_init - initialize device structure 3453 * 3454 * @cdns: cdns3 instance 3455 * 3456 * This function initializes the gadget. 3457 */ 3458int cdns3_gadget_init(struct cdns3 *cdns) 3459{ 3460 struct cdns3_role_driver *rdrv; 3461 3462 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 3463 if (!rdrv) 3464 return -ENOMEM; 3465 3466 rdrv->start = __cdns3_gadget_init; 3467 rdrv->stop = cdns3_gadget_exit; 3468 rdrv->suspend = cdns3_gadget_suspend; 3469 rdrv->resume = cdns3_gadget_resume; 3470 rdrv->state = CDNS3_ROLE_STATE_INACTIVE; 3471 rdrv->name = "gadget"; 3472 cdns->roles[USB_ROLE_DEVICE] = rdrv; 3473 3474 return 0; 3475} 3476