cdns3-gadget.c (91682B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Cadence USBSS DRD Driver - gadget side. 4 * 5 * Copyright (C) 2018-2019 Cadence Design Systems. 6 * Copyright (C) 2017-2018 NXP 7 * 8 * Authors: Pawel Jez <pjez@cadence.com>, 9 * Pawel Laszczak <pawell@cadence.com> 10 * Peter Chen <peter.chen@nxp.com> 11 */ 12 13/* 14 * Work around 1: 15 * At some situations, the controller may get stale data address in TRB 16 * at below sequences: 17 * 1. Controller read TRB includes data address 18 * 2. Software updates TRBs includes data address and Cycle bit 19 * 3. Controller read TRB which includes Cycle bit 20 * 4. DMA run with stale data address 21 * 22 * To fix this problem, driver needs to make the first TRB in TD as invalid. 23 * After preparing all TRBs driver needs to check the position of DMA and 24 * if the DMA point to the first just added TRB and doorbell is 1, 25 * then driver must defer making this TRB as valid. This TRB will be make 26 * as valid during adding next TRB only if DMA is stopped or at TRBERR 27 * interrupt. 28 * 29 * Issue has been fixed in DEV_VER_V3 version of controller. 30 * 31 * Work around 2: 32 * Controller for OUT endpoints has shared on-chip buffers for all incoming 33 * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA 34 * in correct order. If the first packet in the buffer will not be handled, 35 * then the following packets directed for other endpoints and functions 36 * will be blocked. 37 * Additionally the packets directed to one endpoint can block entire on-chip 38 * buffers. In this case transfer to other endpoints also will blocked. 39 * 40 * To resolve this issue after raising the descriptor missing interrupt 41 * driver prepares internal usb_request object and use it to arm DMA transfer. 42 * 43 * The problematic situation was observed in case when endpoint has been enabled 44 * but no usb_request were queued. Driver try detects such endpoints and will 45 * use this workaround only for these endpoint. 46 * 47 * Driver use limited number of buffer. This number can be set by macro 48 * CDNS3_WA2_NUM_BUFFERS. 49 * 50 * Such blocking situation was observed on ACM gadget. For this function 51 * host send OUT data packet but ACM function is not prepared for this packet. 52 * It's cause that buffer placed in on chip memory block transfer to other 53 * endpoints. 54 * 55 * Issue has been fixed in DEV_VER_V2 version of controller. 56 * 57 */ 58 59#include <linux/dma-mapping.h> 60#include <linux/usb/gadget.h> 61#include <linux/module.h> 62#include <linux/dmapool.h> 63#include <linux/iopoll.h> 64 65#include "core.h" 66#include "gadget-export.h" 67#include "cdns3-gadget.h" 68#include "cdns3-trace.h" 69#include "drd.h" 70 71static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 72 struct usb_request *request, 73 gfp_t gfp_flags); 74 75static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 76 struct usb_request *request); 77 78static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 79 struct usb_request *request); 80 81/** 82 * cdns3_clear_register_bit - clear bit in given register. 83 * @ptr: address of device controller register to be read and changed 84 * @mask: bits requested to clar 85 */ 86static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask) 87{ 88 mask = readl(ptr) & ~mask; 89 writel(mask, ptr); 90} 91 92/** 93 * cdns3_set_register_bit - set bit in given register. 94 * @ptr: address of device controller register to be read and changed 95 * @mask: bits requested to set 96 */ 97void cdns3_set_register_bit(void __iomem *ptr, u32 mask) 98{ 99 mask = readl(ptr) | mask; 100 writel(mask, ptr); 101} 102 103/** 104 * cdns3_ep_addr_to_index - Macro converts endpoint address to 105 * index of endpoint object in cdns3_device.eps[] container 106 * @ep_addr: endpoint address for which endpoint object is required 107 * 108 */ 109u8 cdns3_ep_addr_to_index(u8 ep_addr) 110{ 111 return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0)); 112} 113 114static int cdns3_get_dma_pos(struct cdns3_device *priv_dev, 115 struct cdns3_endpoint *priv_ep) 116{ 117 int dma_index; 118 119 dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma; 120 121 return dma_index / TRB_SIZE; 122} 123 124/** 125 * cdns3_next_request - returns next request from list 126 * @list: list containing requests 127 * 128 * Returns request or NULL if no requests in list 129 */ 130struct usb_request *cdns3_next_request(struct list_head *list) 131{ 132 return list_first_entry_or_null(list, struct usb_request, list); 133} 134 135/** 136 * cdns3_next_align_buf - returns next buffer from list 137 * @list: list containing buffers 138 * 139 * Returns buffer or NULL if no buffers in list 140 */ 141static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list) 142{ 143 return list_first_entry_or_null(list, struct cdns3_aligned_buf, list); 144} 145 146/** 147 * cdns3_next_priv_request - returns next request from list 148 * @list: list containing requests 149 * 150 * Returns request or NULL if no requests in list 151 */ 152static struct cdns3_request *cdns3_next_priv_request(struct list_head *list) 153{ 154 return list_first_entry_or_null(list, struct cdns3_request, list); 155} 156 157/** 158 * cdns3_select_ep - selects endpoint 159 * @priv_dev: extended gadget object 160 * @ep: endpoint address 161 */ 162void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep) 163{ 164 if (priv_dev->selected_ep == ep) 165 return; 166 167 priv_dev->selected_ep = ep; 168 writel(ep, &priv_dev->regs->ep_sel); 169} 170 171/** 172 * cdns3_get_tdl - gets current tdl for selected endpoint. 173 * @priv_dev: extended gadget object 174 * 175 * Before calling this function the appropriate endpoint must 176 * be selected by means of cdns3_select_ep function. 177 */ 178static int cdns3_get_tdl(struct cdns3_device *priv_dev) 179{ 180 if (priv_dev->dev_ver < DEV_VER_V3) 181 return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 182 else 183 return readl(&priv_dev->regs->ep_tdl); 184} 185 186dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, 187 struct cdns3_trb *trb) 188{ 189 u32 offset = (char *)trb - (char *)priv_ep->trb_pool; 190 191 return priv_ep->trb_pool_dma + offset; 192} 193 194static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) 195{ 196 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 197 198 if (priv_ep->trb_pool) { 199 dma_pool_free(priv_dev->eps_dma_pool, 200 priv_ep->trb_pool, priv_ep->trb_pool_dma); 201 priv_ep->trb_pool = NULL; 202 } 203} 204 205/** 206 * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint 207 * @priv_ep: endpoint object 208 * 209 * Function will return 0 on success or -ENOMEM on allocation error 210 */ 211int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) 212{ 213 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 214 int ring_size = TRB_RING_SIZE; 215 int num_trbs = ring_size / TRB_SIZE; 216 struct cdns3_trb *link_trb; 217 218 if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size) 219 cdns3_free_trb_pool(priv_ep); 220 221 if (!priv_ep->trb_pool) { 222 priv_ep->trb_pool = dma_pool_alloc(priv_dev->eps_dma_pool, 223 GFP_DMA32 | GFP_ATOMIC, 224 &priv_ep->trb_pool_dma); 225 226 if (!priv_ep->trb_pool) 227 return -ENOMEM; 228 229 priv_ep->alloc_ring_size = ring_size; 230 } 231 232 memset(priv_ep->trb_pool, 0, ring_size); 233 234 priv_ep->num_trbs = num_trbs; 235 236 if (!priv_ep->num) 237 return 0; 238 239 /* Initialize the last TRB as Link TRB */ 240 link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1)); 241 242 if (priv_ep->use_streams) { 243 /* 244 * For stream capable endpoints driver use single correct TRB. 245 * The last trb has zeroed cycle bit 246 */ 247 link_trb->control = 0; 248 } else { 249 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma)); 250 link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE); 251 } 252 return 0; 253} 254 255/** 256 * cdns3_ep_stall_flush - Stalls and flushes selected endpoint 257 * @priv_ep: endpoint object 258 * 259 * Endpoint must be selected before call to this function 260 */ 261static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep) 262{ 263 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 264 int val; 265 266 trace_cdns3_halt(priv_ep, 1, 1); 267 268 writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL, 269 &priv_dev->regs->ep_cmd); 270 271 /* wait for DFLUSH cleared */ 272 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 273 !(val & EP_CMD_DFLUSH), 1, 1000); 274 priv_ep->flags |= EP_STALLED; 275 priv_ep->flags &= ~EP_STALL_PENDING; 276} 277 278/** 279 * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller. 280 * @priv_dev: extended gadget object 281 */ 282void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev) 283{ 284 int i; 285 286 writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf); 287 288 cdns3_allow_enable_l1(priv_dev, 0); 289 priv_dev->hw_configured_flag = 0; 290 priv_dev->onchip_used_size = 0; 291 priv_dev->out_mem_is_allocated = 0; 292 priv_dev->wait_for_setup = 0; 293 priv_dev->using_streams = 0; 294 295 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 296 if (priv_dev->eps[i]) 297 priv_dev->eps[i]->flags &= ~EP_CONFIGURED; 298} 299 300/** 301 * cdns3_ep_inc_trb - increment a trb index. 302 * @index: Pointer to the TRB index to increment. 303 * @cs: Cycle state 304 * @trb_in_seg: number of TRBs in segment 305 * 306 * The index should never point to the link TRB. After incrementing, 307 * if it is point to the link TRB, wrap around to the beginning and revert 308 * cycle state bit The 309 * link TRB is always at the last TRB entry. 310 */ 311static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg) 312{ 313 (*index)++; 314 if (*index == (trb_in_seg - 1)) { 315 *index = 0; 316 *cs ^= 1; 317 } 318} 319 320/** 321 * cdns3_ep_inc_enq - increment endpoint's enqueue pointer 322 * @priv_ep: The endpoint whose enqueue pointer we're incrementing 323 */ 324static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep) 325{ 326 priv_ep->free_trbs--; 327 cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs); 328} 329 330/** 331 * cdns3_ep_inc_deq - increment endpoint's dequeue pointer 332 * @priv_ep: The endpoint whose dequeue pointer we're incrementing 333 */ 334static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep) 335{ 336 priv_ep->free_trbs++; 337 cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs); 338} 339 340/** 341 * cdns3_allow_enable_l1 - enable/disable permits to transition to L1. 342 * @priv_dev: Extended gadget object 343 * @enable: Enable/disable permit to transition to L1. 344 * 345 * If bit USB_CONF_L1EN is set and device receive Extended Token packet, 346 * then controller answer with ACK handshake. 347 * If bit USB_CONF_L1DS is set and device receive Extended Token packet, 348 * then controller answer with NYET handshake. 349 */ 350void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable) 351{ 352 if (enable) 353 writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf); 354 else 355 writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf); 356} 357 358enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev) 359{ 360 u32 reg; 361 362 reg = readl(&priv_dev->regs->usb_sts); 363 364 if (DEV_SUPERSPEED(reg)) 365 return USB_SPEED_SUPER; 366 else if (DEV_HIGHSPEED(reg)) 367 return USB_SPEED_HIGH; 368 else if (DEV_FULLSPEED(reg)) 369 return USB_SPEED_FULL; 370 else if (DEV_LOWSPEED(reg)) 371 return USB_SPEED_LOW; 372 return USB_SPEED_UNKNOWN; 373} 374 375/** 376 * cdns3_start_all_request - add to ring all request not started 377 * @priv_dev: Extended gadget object 378 * @priv_ep: The endpoint for whom request will be started. 379 * 380 * Returns return ENOMEM if transfer ring i not enough TRBs to start 381 * all requests. 382 */ 383static int cdns3_start_all_request(struct cdns3_device *priv_dev, 384 struct cdns3_endpoint *priv_ep) 385{ 386 struct usb_request *request; 387 int ret = 0; 388 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 389 390 /* 391 * If the last pending transfer is INTERNAL 392 * OR streams are enabled for this endpoint 393 * do NOT start new transfer till the last one is pending 394 */ 395 if (!pending_empty) { 396 struct cdns3_request *priv_req; 397 398 request = cdns3_next_request(&priv_ep->pending_req_list); 399 priv_req = to_cdns3_request(request); 400 if ((priv_req->flags & REQUEST_INTERNAL) || 401 (priv_ep->flags & EP_TDLCHK_EN) || 402 priv_ep->use_streams) { 403 dev_dbg(priv_dev->dev, "Blocking external request\n"); 404 return ret; 405 } 406 } 407 408 while (!list_empty(&priv_ep->deferred_req_list)) { 409 request = cdns3_next_request(&priv_ep->deferred_req_list); 410 411 if (!priv_ep->use_streams) { 412 ret = cdns3_ep_run_transfer(priv_ep, request); 413 } else { 414 priv_ep->stream_sg_idx = 0; 415 ret = cdns3_ep_run_stream_transfer(priv_ep, request); 416 } 417 if (ret) 418 return ret; 419 420 list_move_tail(&request->list, &priv_ep->pending_req_list); 421 if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN)) 422 break; 423 } 424 425 priv_ep->flags &= ~EP_RING_FULL; 426 return ret; 427} 428 429/* 430 * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set 431 * driver try to detect whether endpoint need additional internal 432 * buffer for unblocking on-chip FIFO buffer. This flag will be cleared 433 * if before first DESCMISS interrupt the DMA will be armed. 434 */ 435#define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \ 436 if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \ 437 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \ 438 (reg) |= EP_STS_EN_DESCMISEN; \ 439 } } while (0) 440 441static void __cdns3_descmiss_copy_data(struct usb_request *request, 442 struct usb_request *descmiss_req) 443{ 444 int length = request->actual + descmiss_req->actual; 445 struct scatterlist *s = request->sg; 446 447 if (!s) { 448 if (length <= request->length) { 449 memcpy(&((u8 *)request->buf)[request->actual], 450 descmiss_req->buf, 451 descmiss_req->actual); 452 request->actual = length; 453 } else { 454 /* It should never occures */ 455 request->status = -ENOMEM; 456 } 457 } else { 458 if (length <= sg_dma_len(s)) { 459 void *p = phys_to_virt(sg_dma_address(s)); 460 461 memcpy(&((u8 *)p)[request->actual], 462 descmiss_req->buf, 463 descmiss_req->actual); 464 request->actual = length; 465 } else { 466 request->status = -ENOMEM; 467 } 468 } 469} 470 471/** 472 * cdns3_wa2_descmiss_copy_data - copy data from internal requests to 473 * request queued by class driver. 474 * @priv_ep: extended endpoint object 475 * @request: request object 476 */ 477static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, 478 struct usb_request *request) 479{ 480 struct usb_request *descmiss_req; 481 struct cdns3_request *descmiss_priv_req; 482 483 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 484 int chunk_end; 485 486 descmiss_priv_req = 487 cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 488 descmiss_req = &descmiss_priv_req->request; 489 490 /* driver can't touch pending request */ 491 if (descmiss_priv_req->flags & REQUEST_PENDING) 492 break; 493 494 chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH; 495 request->status = descmiss_req->status; 496 __cdns3_descmiss_copy_data(request, descmiss_req); 497 list_del_init(&descmiss_priv_req->list); 498 kfree(descmiss_req->buf); 499 cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req); 500 --priv_ep->wa2_counter; 501 502 if (!chunk_end) 503 break; 504 } 505} 506 507static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, 508 struct cdns3_endpoint *priv_ep, 509 struct cdns3_request *priv_req) 510{ 511 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN && 512 priv_req->flags & REQUEST_INTERNAL) { 513 struct usb_request *req; 514 515 req = cdns3_next_request(&priv_ep->deferred_req_list); 516 517 priv_ep->descmis_req = NULL; 518 519 if (!req) 520 return NULL; 521 522 /* unmap the gadget request before copying data */ 523 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req, 524 priv_ep->dir); 525 526 cdns3_wa2_descmiss_copy_data(priv_ep, req); 527 if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) && 528 req->length != req->actual) { 529 /* wait for next part of transfer */ 530 /* re-map the gadget request buffer*/ 531 usb_gadget_map_request_by_dev(priv_dev->sysdev, req, 532 usb_endpoint_dir_in(priv_ep->endpoint.desc)); 533 return NULL; 534 } 535 536 if (req->status == -EINPROGRESS) 537 req->status = 0; 538 539 list_del_init(&req->list); 540 cdns3_start_all_request(priv_dev, priv_ep); 541 return req; 542 } 543 544 return &priv_req->request; 545} 546 547static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev, 548 struct cdns3_endpoint *priv_ep, 549 struct cdns3_request *priv_req) 550{ 551 int deferred = 0; 552 553 /* 554 * If transfer was queued before DESCMISS appear than we 555 * can disable handling of DESCMISS interrupt. Driver assumes that it 556 * can disable special treatment for this endpoint. 557 */ 558 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 559 u32 reg; 560 561 cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir); 562 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 563 reg = readl(&priv_dev->regs->ep_sts_en); 564 reg &= ~EP_STS_EN_DESCMISEN; 565 trace_cdns3_wa2(priv_ep, "workaround disabled\n"); 566 writel(reg, &priv_dev->regs->ep_sts_en); 567 } 568 569 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 570 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 571 u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list); 572 573 /* 574 * DESCMISS transfer has been finished, so data will be 575 * directly copied from internal allocated usb_request 576 * objects. 577 */ 578 if (pending_empty && !descmiss_empty && 579 !(priv_req->flags & REQUEST_INTERNAL)) { 580 cdns3_wa2_descmiss_copy_data(priv_ep, 581 &priv_req->request); 582 583 trace_cdns3_wa2(priv_ep, "get internal stored data"); 584 585 list_add_tail(&priv_req->request.list, 586 &priv_ep->pending_req_list); 587 cdns3_gadget_giveback(priv_ep, priv_req, 588 priv_req->request.status); 589 590 /* 591 * Intentionally driver returns positive value as 592 * correct value. It informs that transfer has 593 * been finished. 594 */ 595 return EINPROGRESS; 596 } 597 598 /* 599 * Driver will wait for completion DESCMISS transfer, 600 * before starts new, not DESCMISS transfer. 601 */ 602 if (!pending_empty && !descmiss_empty) { 603 trace_cdns3_wa2(priv_ep, "wait for pending transfer\n"); 604 deferred = 1; 605 } 606 607 if (priv_req->flags & REQUEST_INTERNAL) 608 list_add_tail(&priv_req->list, 609 &priv_ep->wa2_descmiss_req_list); 610 } 611 612 return deferred; 613} 614 615static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep) 616{ 617 struct cdns3_request *priv_req; 618 619 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 620 u8 chain; 621 622 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 623 chain = !!(priv_req->flags & REQUEST_INTERNAL_CH); 624 625 trace_cdns3_wa2(priv_ep, "removes eldest request"); 626 627 kfree(priv_req->request.buf); 628 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 629 &priv_req->request); 630 list_del_init(&priv_req->list); 631 --priv_ep->wa2_counter; 632 633 if (!chain) 634 break; 635 } 636} 637 638/** 639 * cdns3_wa2_descmissing_packet - handles descriptor missing event. 640 * @priv_ep: extended gadget object 641 * 642 * This function is used only for WA2. For more information see Work around 2 643 * description. 644 */ 645static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) 646{ 647 struct cdns3_request *priv_req; 648 struct usb_request *request; 649 u8 pending_empty = list_empty(&priv_ep->pending_req_list); 650 651 /* check for pending transfer */ 652 if (!pending_empty) { 653 trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n"); 654 return; 655 } 656 657 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { 658 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; 659 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN; 660 } 661 662 trace_cdns3_wa2(priv_ep, "Description Missing detected\n"); 663 664 if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) { 665 trace_cdns3_wa2(priv_ep, "WA2 overflow\n"); 666 cdns3_wa2_remove_old_request(priv_ep); 667 } 668 669 request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint, 670 GFP_ATOMIC); 671 if (!request) 672 goto err; 673 674 priv_req = to_cdns3_request(request); 675 priv_req->flags |= REQUEST_INTERNAL; 676 677 /* if this field is still assigned it indicate that transfer related 678 * with this request has not been finished yet. Driver in this 679 * case simply allocate next request and assign flag REQUEST_INTERNAL_CH 680 * flag to previous one. It will indicate that current request is 681 * part of the previous one. 682 */ 683 if (priv_ep->descmis_req) 684 priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH; 685 686 priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE, 687 GFP_ATOMIC); 688 priv_ep->wa2_counter++; 689 690 if (!priv_req->request.buf) { 691 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 692 goto err; 693 } 694 695 priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE; 696 priv_ep->descmis_req = priv_req; 697 698 __cdns3_gadget_ep_queue(&priv_ep->endpoint, 699 &priv_ep->descmis_req->request, 700 GFP_ATOMIC); 701 702 return; 703 704err: 705 dev_err(priv_ep->cdns3_dev->dev, 706 "Failed: No sufficient memory for DESCMIS\n"); 707} 708 709static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev) 710{ 711 u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 712 713 if (tdl) { 714 u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl; 715 716 writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL, 717 &priv_dev->regs->ep_cmd); 718 } 719} 720 721static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev) 722{ 723 u32 ep_sts_reg; 724 725 /* select EP0-out */ 726 cdns3_select_ep(priv_dev, 0); 727 728 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 729 730 if (EP_STS_OUTQ_VAL(ep_sts_reg)) { 731 u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg); 732 struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num]; 733 734 if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) && 735 outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) { 736 u8 pending_empty = list_empty(&outq_ep->pending_req_list); 737 738 if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) || 739 (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) || 740 !pending_empty) { 741 } else { 742 u32 ep_sts_en_reg; 743 u32 ep_cmd_reg; 744 745 cdns3_select_ep(priv_dev, outq_ep->num | 746 outq_ep->dir); 747 ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en); 748 ep_cmd_reg = readl(&priv_dev->regs->ep_cmd); 749 750 outq_ep->flags |= EP_TDLCHK_EN; 751 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 752 EP_CFG_TDL_CHK); 753 754 cdns3_wa2_enable_detection(priv_dev, outq_ep, 755 ep_sts_en_reg); 756 writel(ep_sts_en_reg, 757 &priv_dev->regs->ep_sts_en); 758 /* reset tdl value to zero */ 759 cdns3_wa2_reset_tdl(priv_dev); 760 /* 761 * Memory barrier - Reset tdl before ringing the 762 * doorbell. 763 */ 764 wmb(); 765 if (EP_CMD_DRDY & ep_cmd_reg) { 766 trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n"); 767 768 } else { 769 trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n"); 770 /* 771 * ring doorbell to generate DESCMIS irq 772 */ 773 writel(EP_CMD_DRDY, 774 &priv_dev->regs->ep_cmd); 775 } 776 } 777 } 778 } 779} 780 781/** 782 * cdns3_gadget_giveback - call struct usb_request's ->complete callback 783 * @priv_ep: The endpoint to whom the request belongs to 784 * @priv_req: The request we're giving back 785 * @status: completion code for the request 786 * 787 * Must be called with controller's lock held and interrupts disabled. This 788 * function will unmap @req and call its ->complete() callback to notify upper 789 * layers that it has completed. 790 */ 791void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, 792 struct cdns3_request *priv_req, 793 int status) 794{ 795 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 796 struct usb_request *request = &priv_req->request; 797 798 list_del_init(&request->list); 799 800 if (request->status == -EINPROGRESS) 801 request->status = status; 802 803 usb_gadget_unmap_request_by_dev(priv_dev->sysdev, request, 804 priv_ep->dir); 805 806 if ((priv_req->flags & REQUEST_UNALIGNED) && 807 priv_ep->dir == USB_DIR_OUT && !request->status) { 808 /* Make DMA buffer CPU accessible */ 809 dma_sync_single_for_cpu(priv_dev->sysdev, 810 priv_req->aligned_buf->dma, 811 priv_req->aligned_buf->size, 812 priv_req->aligned_buf->dir); 813 memcpy(request->buf, priv_req->aligned_buf->buf, 814 request->length); 815 } 816 817 priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); 818 /* All TRBs have finished, clear the counter */ 819 priv_req->finished_trb = 0; 820 trace_cdns3_gadget_giveback(priv_req); 821 822 if (priv_dev->dev_ver < DEV_VER_V2) { 823 request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep, 824 priv_req); 825 if (!request) 826 return; 827 } 828 829 if (request->complete) { 830 spin_unlock(&priv_dev->lock); 831 usb_gadget_giveback_request(&priv_ep->endpoint, 832 request); 833 spin_lock(&priv_dev->lock); 834 } 835 836 if (request->buf == priv_dev->zlp_buf) 837 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request); 838} 839 840static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) 841{ 842 /* Work around for stale data address in TRB*/ 843 if (priv_ep->wa1_set) { 844 trace_cdns3_wa1(priv_ep, "restore cycle bit"); 845 846 priv_ep->wa1_set = 0; 847 priv_ep->wa1_trb_index = 0xFFFF; 848 if (priv_ep->wa1_cycle_bit) { 849 priv_ep->wa1_trb->control = 850 priv_ep->wa1_trb->control | cpu_to_le32(0x1); 851 } else { 852 priv_ep->wa1_trb->control = 853 priv_ep->wa1_trb->control & cpu_to_le32(~0x1); 854 } 855 } 856} 857 858static void cdns3_free_aligned_request_buf(struct work_struct *work) 859{ 860 struct cdns3_device *priv_dev = container_of(work, struct cdns3_device, 861 aligned_buf_wq); 862 struct cdns3_aligned_buf *buf, *tmp; 863 unsigned long flags; 864 865 spin_lock_irqsave(&priv_dev->lock, flags); 866 867 list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) { 868 if (!buf->in_use) { 869 list_del(&buf->list); 870 871 /* 872 * Re-enable interrupts to free DMA capable memory. 873 * Driver can't free this memory with disabled 874 * interrupts. 875 */ 876 spin_unlock_irqrestore(&priv_dev->lock, flags); 877 dma_free_noncoherent(priv_dev->sysdev, buf->size, 878 buf->buf, buf->dma, buf->dir); 879 kfree(buf); 880 spin_lock_irqsave(&priv_dev->lock, flags); 881 } 882 } 883 884 spin_unlock_irqrestore(&priv_dev->lock, flags); 885} 886 887static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) 888{ 889 struct cdns3_endpoint *priv_ep = priv_req->priv_ep; 890 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 891 struct cdns3_aligned_buf *buf; 892 893 /* check if buffer is aligned to 8. */ 894 if (!((uintptr_t)priv_req->request.buf & 0x7)) 895 return 0; 896 897 buf = priv_req->aligned_buf; 898 899 if (!buf || priv_req->request.length > buf->size) { 900 buf = kzalloc(sizeof(*buf), GFP_ATOMIC); 901 if (!buf) 902 return -ENOMEM; 903 904 buf->size = priv_req->request.length; 905 buf->dir = usb_endpoint_dir_in(priv_ep->endpoint.desc) ? 906 DMA_TO_DEVICE : DMA_FROM_DEVICE; 907 908 buf->buf = dma_alloc_noncoherent(priv_dev->sysdev, 909 buf->size, 910 &buf->dma, 911 buf->dir, 912 GFP_ATOMIC); 913 if (!buf->buf) { 914 kfree(buf); 915 return -ENOMEM; 916 } 917 918 if (priv_req->aligned_buf) { 919 trace_cdns3_free_aligned_request(priv_req); 920 priv_req->aligned_buf->in_use = 0; 921 queue_work(system_freezable_wq, 922 &priv_dev->aligned_buf_wq); 923 } 924 925 buf->in_use = 1; 926 priv_req->aligned_buf = buf; 927 928 list_add_tail(&buf->list, 929 &priv_dev->aligned_buf_list); 930 } 931 932 if (priv_ep->dir == USB_DIR_IN) { 933 /* Make DMA buffer CPU accessible */ 934 dma_sync_single_for_cpu(priv_dev->sysdev, 935 buf->dma, buf->size, buf->dir); 936 memcpy(buf->buf, priv_req->request.buf, 937 priv_req->request.length); 938 } 939 940 /* Transfer DMA buffer ownership back to device */ 941 dma_sync_single_for_device(priv_dev->sysdev, 942 buf->dma, buf->size, buf->dir); 943 944 priv_req->flags |= REQUEST_UNALIGNED; 945 trace_cdns3_prepare_aligned_request(priv_req); 946 947 return 0; 948} 949 950static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep, 951 struct cdns3_trb *trb) 952{ 953 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 954 955 if (!priv_ep->wa1_set) { 956 u32 doorbell; 957 958 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 959 960 if (doorbell) { 961 priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0; 962 priv_ep->wa1_set = 1; 963 priv_ep->wa1_trb = trb; 964 priv_ep->wa1_trb_index = priv_ep->enqueue; 965 trace_cdns3_wa1(priv_ep, "set guard"); 966 return 0; 967 } 968 } 969 return 1; 970} 971 972static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev, 973 struct cdns3_endpoint *priv_ep) 974{ 975 int dma_index; 976 u32 doorbell; 977 978 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 979 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 980 981 if (!doorbell || dma_index != priv_ep->wa1_trb_index) 982 cdns3_wa1_restore_cycle_bit(priv_ep); 983} 984 985static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, 986 struct usb_request *request) 987{ 988 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 989 struct cdns3_request *priv_req; 990 struct cdns3_trb *trb; 991 dma_addr_t trb_dma; 992 int address; 993 u32 control; 994 u32 length; 995 u32 tdl; 996 unsigned int sg_idx = priv_ep->stream_sg_idx; 997 998 priv_req = to_cdns3_request(request); 999 address = priv_ep->endpoint.desc->bEndpointAddress; 1000 1001 priv_ep->flags |= EP_PENDING_REQUEST; 1002 1003 /* must allocate buffer aligned to 8 */ 1004 if (priv_req->flags & REQUEST_UNALIGNED) 1005 trb_dma = priv_req->aligned_buf->dma; 1006 else 1007 trb_dma = request->dma; 1008 1009 /* For stream capable endpoints driver use only single TD. */ 1010 trb = priv_ep->trb_pool + priv_ep->enqueue; 1011 priv_req->start_trb = priv_ep->enqueue; 1012 priv_req->end_trb = priv_req->start_trb; 1013 priv_req->trb = trb; 1014 1015 cdns3_select_ep(priv_ep->cdns3_dev, address); 1016 1017 control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE | 1018 TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP; 1019 1020 if (!request->num_sgs) { 1021 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1022 length = request->length; 1023 } else { 1024 trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address)); 1025 length = request->sg[sg_idx].length; 1026 } 1027 1028 tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); 1029 1030 trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length)); 1031 1032 /* 1033 * For DEV_VER_V2 controller version we have enabled 1034 * USB_CONF2_EN_TDL_TRB in DMULT configuration. 1035 * This enables TDL calculation based on TRB, hence setting TDL in TRB. 1036 */ 1037 if (priv_dev->dev_ver >= DEV_VER_V2) { 1038 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1039 trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl)); 1040 } 1041 priv_req->flags |= REQUEST_PENDING; 1042 1043 trb->control = cpu_to_le32(control); 1044 1045 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1046 1047 /* 1048 * Memory barrier - Cycle Bit must be set before trb->length and 1049 * trb->buffer fields. 1050 */ 1051 wmb(); 1052 1053 /* always first element */ 1054 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), 1055 &priv_dev->regs->ep_traddr); 1056 1057 if (!(priv_ep->flags & EP_STALLED)) { 1058 trace_cdns3_ring(priv_ep); 1059 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1060 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1061 1062 priv_ep->prime_flag = false; 1063 1064 /* 1065 * Controller version DEV_VER_V2 tdl calculation 1066 * is based on TRB 1067 */ 1068 1069 if (priv_dev->dev_ver < DEV_VER_V2) 1070 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1071 &priv_dev->regs->ep_cmd); 1072 else if (priv_dev->dev_ver > DEV_VER_V2) 1073 writel(tdl, &priv_dev->regs->ep_tdl); 1074 1075 priv_ep->last_stream_id = priv_req->request.stream_id; 1076 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1077 writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) | 1078 EP_CMD_ERDY, &priv_dev->regs->ep_cmd); 1079 1080 trace_cdns3_doorbell_epx(priv_ep->name, 1081 readl(&priv_dev->regs->ep_traddr)); 1082 } 1083 1084 /* WORKAROUND for transition to L0 */ 1085 __cdns3_gadget_wakeup(priv_dev); 1086 1087 return 0; 1088} 1089 1090static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep) 1091{ 1092 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1093 1094 if (priv_dev->dev_ver < DEV_VER_V3) 1095 return; 1096 1097 if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) { 1098 writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts); 1099 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1100 } 1101} 1102 1103/** 1104 * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware 1105 * @priv_ep: endpoint object 1106 * @request: request object 1107 * 1108 * Returns zero on success or negative value on failure 1109 */ 1110static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, 1111 struct usb_request *request) 1112{ 1113 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1114 struct cdns3_request *priv_req; 1115 struct cdns3_trb *trb; 1116 struct cdns3_trb *link_trb = NULL; 1117 dma_addr_t trb_dma; 1118 u32 togle_pcs = 1; 1119 int sg_iter = 0; 1120 int num_trb; 1121 int address; 1122 u32 control; 1123 int pcs; 1124 u16 total_tdl = 0; 1125 struct scatterlist *s = NULL; 1126 bool sg_supported = !!(request->num_mapped_sgs); 1127 1128 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) 1129 num_trb = priv_ep->interval; 1130 else 1131 num_trb = sg_supported ? request->num_mapped_sgs : 1; 1132 1133 if (num_trb > priv_ep->free_trbs) { 1134 priv_ep->flags |= EP_RING_FULL; 1135 return -ENOBUFS; 1136 } 1137 1138 priv_req = to_cdns3_request(request); 1139 address = priv_ep->endpoint.desc->bEndpointAddress; 1140 1141 priv_ep->flags |= EP_PENDING_REQUEST; 1142 1143 /* must allocate buffer aligned to 8 */ 1144 if (priv_req->flags & REQUEST_UNALIGNED) 1145 trb_dma = priv_req->aligned_buf->dma; 1146 else 1147 trb_dma = request->dma; 1148 1149 trb = priv_ep->trb_pool + priv_ep->enqueue; 1150 priv_req->start_trb = priv_ep->enqueue; 1151 priv_req->trb = trb; 1152 1153 cdns3_select_ep(priv_ep->cdns3_dev, address); 1154 1155 /* prepare ring */ 1156 if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) { 1157 int doorbell, dma_index; 1158 u32 ch_bit = 0; 1159 1160 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1161 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1162 1163 /* Driver can't update LINK TRB if it is current processed. */ 1164 if (doorbell && dma_index == priv_ep->num_trbs - 1) { 1165 priv_ep->flags |= EP_DEFERRED_DRDY; 1166 return -ENOBUFS; 1167 } 1168 1169 /*updating C bt in Link TRB before starting DMA*/ 1170 link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1); 1171 /* 1172 * For TRs size equal 2 enabling TRB_CHAIN for epXin causes 1173 * that DMA stuck at the LINK TRB. 1174 * On the other hand, removing TRB_CHAIN for longer TRs for 1175 * epXout cause that DMA stuck after handling LINK TRB. 1176 * To eliminate this strange behavioral driver set TRB_CHAIN 1177 * bit only for TR size > 2. 1178 */ 1179 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC || 1180 TRBS_PER_SEGMENT > 2) 1181 ch_bit = TRB_CHAIN; 1182 1183 link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | 1184 TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); 1185 } 1186 1187 if (priv_dev->dev_ver <= DEV_VER_V2) 1188 togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); 1189 1190 if (sg_supported) 1191 s = request->sg; 1192 1193 /* set incorrect Cycle Bit for first trb*/ 1194 control = priv_ep->pcs ? 0 : TRB_CYCLE; 1195 trb->length = 0; 1196 if (priv_dev->dev_ver >= DEV_VER_V2) { 1197 u16 td_size; 1198 1199 td_size = DIV_ROUND_UP(request->length, 1200 priv_ep->endpoint.maxpacket); 1201 if (priv_dev->gadget.speed == USB_SPEED_SUPER) 1202 trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size)); 1203 else 1204 control |= TRB_TDL_HS_SIZE(td_size); 1205 } 1206 1207 do { 1208 u32 length; 1209 1210 /* fill TRB */ 1211 control |= TRB_TYPE(TRB_NORMAL); 1212 if (sg_supported) { 1213 trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s))); 1214 length = sg_dma_len(s); 1215 } else { 1216 trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); 1217 length = request->length; 1218 } 1219 1220 if (priv_ep->flags & EP_TDLCHK_EN) 1221 total_tdl += DIV_ROUND_UP(length, 1222 priv_ep->endpoint.maxpacket); 1223 1224 trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) | 1225 TRB_LEN(length)); 1226 pcs = priv_ep->pcs ? TRB_CYCLE : 0; 1227 1228 /* 1229 * first trb should be prepared as last to avoid processing 1230 * transfer to early 1231 */ 1232 if (sg_iter != 0) 1233 control |= pcs; 1234 1235 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 1236 control |= TRB_IOC | TRB_ISP; 1237 } else { 1238 /* for last element in TD or in SG list */ 1239 if (sg_iter == (num_trb - 1) && sg_iter != 0) 1240 control |= pcs | TRB_IOC | TRB_ISP; 1241 } 1242 1243 if (sg_iter) 1244 trb->control = cpu_to_le32(control); 1245 else 1246 priv_req->trb->control = cpu_to_le32(control); 1247 1248 if (sg_supported) { 1249 trb->control |= cpu_to_le32(TRB_ISP); 1250 /* Don't set chain bit for last TRB */ 1251 if (sg_iter < num_trb - 1) 1252 trb->control |= cpu_to_le32(TRB_CHAIN); 1253 1254 s = sg_next(s); 1255 } 1256 1257 control = 0; 1258 ++sg_iter; 1259 priv_req->end_trb = priv_ep->enqueue; 1260 cdns3_ep_inc_enq(priv_ep); 1261 trb = priv_ep->trb_pool + priv_ep->enqueue; 1262 trb->length = 0; 1263 } while (sg_iter < num_trb); 1264 1265 trb = priv_req->trb; 1266 1267 priv_req->flags |= REQUEST_PENDING; 1268 priv_req->num_of_trb = num_trb; 1269 1270 if (sg_iter == 1) 1271 trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP); 1272 1273 if (priv_dev->dev_ver < DEV_VER_V2 && 1274 (priv_ep->flags & EP_TDLCHK_EN)) { 1275 u16 tdl = total_tdl; 1276 u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); 1277 1278 if (tdl > EP_CMD_TDL_MAX) { 1279 tdl = EP_CMD_TDL_MAX; 1280 priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX; 1281 } 1282 1283 if (old_tdl < tdl) { 1284 tdl -= old_tdl; 1285 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, 1286 &priv_dev->regs->ep_cmd); 1287 } 1288 } 1289 1290 /* 1291 * Memory barrier - cycle bit must be set before other filds in trb. 1292 */ 1293 wmb(); 1294 1295 /* give the TD to the consumer*/ 1296 if (togle_pcs) 1297 trb->control = trb->control ^ cpu_to_le32(1); 1298 1299 if (priv_dev->dev_ver <= DEV_VER_V2) 1300 cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); 1301 1302 if (num_trb > 1) { 1303 int i = 0; 1304 1305 while (i < num_trb) { 1306 trace_cdns3_prepare_trb(priv_ep, trb + i); 1307 if (trb + i == link_trb) { 1308 trb = priv_ep->trb_pool; 1309 num_trb = num_trb - i; 1310 i = 0; 1311 } else { 1312 i++; 1313 } 1314 } 1315 } else { 1316 trace_cdns3_prepare_trb(priv_ep, priv_req->trb); 1317 } 1318 1319 /* 1320 * Memory barrier - Cycle Bit must be set before trb->length and 1321 * trb->buffer fields. 1322 */ 1323 wmb(); 1324 1325 /* 1326 * For DMULT mode we can set address to transfer ring only once after 1327 * enabling endpoint. 1328 */ 1329 if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) { 1330 /* 1331 * Until SW is not ready to handle the OUT transfer the ISO OUT 1332 * Endpoint should be disabled (EP_CFG.ENABLE = 0). 1333 * EP_CFG_ENABLE must be set before updating ep_traddr. 1334 */ 1335 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir && 1336 !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) { 1337 priv_ep->flags |= EP_QUIRK_ISO_OUT_EN; 1338 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, 1339 EP_CFG_ENABLE); 1340 } 1341 1342 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + 1343 priv_req->start_trb * TRB_SIZE), 1344 &priv_dev->regs->ep_traddr); 1345 1346 priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR; 1347 } 1348 1349 if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) { 1350 trace_cdns3_ring(priv_ep); 1351 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1352 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1353 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1354 cdns3_rearm_drdy_if_needed(priv_ep); 1355 trace_cdns3_doorbell_epx(priv_ep->name, 1356 readl(&priv_dev->regs->ep_traddr)); 1357 } 1358 1359 /* WORKAROUND for transition to L0 */ 1360 __cdns3_gadget_wakeup(priv_dev); 1361 1362 return 0; 1363} 1364 1365void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) 1366{ 1367 struct cdns3_endpoint *priv_ep; 1368 struct usb_ep *ep; 1369 1370 if (priv_dev->hw_configured_flag) 1371 return; 1372 1373 writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf); 1374 1375 cdns3_set_register_bit(&priv_dev->regs->usb_conf, 1376 USB_CONF_U1EN | USB_CONF_U2EN); 1377 1378 priv_dev->hw_configured_flag = 1; 1379 1380 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 1381 if (ep->enabled) { 1382 priv_ep = ep_to_cdns3_ep(ep); 1383 cdns3_start_all_request(priv_dev, priv_ep); 1384 } 1385 } 1386 1387 cdns3_allow_enable_l1(priv_dev, 1); 1388} 1389 1390/** 1391 * cdns3_trb_handled - check whether trb has been handled by DMA 1392 * 1393 * @priv_ep: extended endpoint object. 1394 * @priv_req: request object for checking 1395 * 1396 * Endpoint must be selected before invoking this function. 1397 * 1398 * Returns false if request has not been handled by DMA, else returns true. 1399 * 1400 * SR - start ring 1401 * ER - end ring 1402 * DQ = priv_ep->dequeue - dequeue position 1403 * EQ = priv_ep->enqueue - enqueue position 1404 * ST = priv_req->start_trb - index of first TRB in transfer ring 1405 * ET = priv_req->end_trb - index of last TRB in transfer ring 1406 * CI = current_index - index of processed TRB by DMA. 1407 * 1408 * As first step, we check if the TRB between the ST and ET. 1409 * Then, we check if cycle bit for index priv_ep->dequeue 1410 * is correct. 1411 * 1412 * some rules: 1413 * 1. priv_ep->dequeue never equals to current_index. 1414 * 2 priv_ep->enqueue never exceed priv_ep->dequeue 1415 * 3. exception: priv_ep->enqueue == priv_ep->dequeue 1416 * and priv_ep->free_trbs is zero. 1417 * This case indicate that TR is full. 1418 * 1419 * At below two cases, the request have been handled. 1420 * Case 1 - priv_ep->dequeue < current_index 1421 * SR ... EQ ... DQ ... CI ... ER 1422 * SR ... DQ ... CI ... EQ ... ER 1423 * 1424 * Case 2 - priv_ep->dequeue > current_index 1425 * This situation takes place when CI go through the LINK TRB at the end of 1426 * transfer ring. 1427 * SR ... CI ... EQ ... DQ ... ER 1428 */ 1429static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep, 1430 struct cdns3_request *priv_req) 1431{ 1432 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1433 struct cdns3_trb *trb; 1434 int current_index = 0; 1435 int handled = 0; 1436 int doorbell; 1437 1438 current_index = cdns3_get_dma_pos(priv_dev, priv_ep); 1439 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); 1440 1441 /* current trb doesn't belong to this request */ 1442 if (priv_req->start_trb < priv_req->end_trb) { 1443 if (priv_ep->dequeue > priv_req->end_trb) 1444 goto finish; 1445 1446 if (priv_ep->dequeue < priv_req->start_trb) 1447 goto finish; 1448 } 1449 1450 if ((priv_req->start_trb > priv_req->end_trb) && 1451 (priv_ep->dequeue > priv_req->end_trb) && 1452 (priv_ep->dequeue < priv_req->start_trb)) 1453 goto finish; 1454 1455 if ((priv_req->start_trb == priv_req->end_trb) && 1456 (priv_ep->dequeue != priv_req->end_trb)) 1457 goto finish; 1458 1459 trb = &priv_ep->trb_pool[priv_ep->dequeue]; 1460 1461 if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs) 1462 goto finish; 1463 1464 if (doorbell == 1 && current_index == priv_ep->dequeue) 1465 goto finish; 1466 1467 /* The corner case for TRBS_PER_SEGMENT equal 2). */ 1468 if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { 1469 handled = 1; 1470 goto finish; 1471 } 1472 1473 if (priv_ep->enqueue == priv_ep->dequeue && 1474 priv_ep->free_trbs == 0) { 1475 handled = 1; 1476 } else if (priv_ep->dequeue < current_index) { 1477 if ((current_index == (priv_ep->num_trbs - 1)) && 1478 !priv_ep->dequeue) 1479 goto finish; 1480 1481 handled = 1; 1482 } else if (priv_ep->dequeue > current_index) { 1483 handled = 1; 1484 } 1485 1486finish: 1487 trace_cdns3_request_handled(priv_req, current_index, handled); 1488 1489 return handled; 1490} 1491 1492static void cdns3_transfer_completed(struct cdns3_device *priv_dev, 1493 struct cdns3_endpoint *priv_ep) 1494{ 1495 struct cdns3_request *priv_req; 1496 struct usb_request *request; 1497 struct cdns3_trb *trb; 1498 bool request_handled = false; 1499 bool transfer_end = false; 1500 1501 while (!list_empty(&priv_ep->pending_req_list)) { 1502 request = cdns3_next_request(&priv_ep->pending_req_list); 1503 priv_req = to_cdns3_request(request); 1504 1505 trb = priv_ep->trb_pool + priv_ep->dequeue; 1506 1507 /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */ 1508 while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { 1509 trace_cdns3_complete_trb(priv_ep, trb); 1510 cdns3_ep_inc_deq(priv_ep); 1511 trb = priv_ep->trb_pool + priv_ep->dequeue; 1512 } 1513 1514 if (!request->stream_id) { 1515 /* Re-select endpoint. It could be changed by other CPU 1516 * during handling usb_gadget_giveback_request. 1517 */ 1518 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1519 1520 while (cdns3_trb_handled(priv_ep, priv_req)) { 1521 priv_req->finished_trb++; 1522 if (priv_req->finished_trb >= priv_req->num_of_trb) 1523 request_handled = true; 1524 1525 trb = priv_ep->trb_pool + priv_ep->dequeue; 1526 trace_cdns3_complete_trb(priv_ep, trb); 1527 1528 if (!transfer_end) 1529 request->actual += 1530 TRB_LEN(le32_to_cpu(trb->length)); 1531 1532 if (priv_req->num_of_trb > 1 && 1533 le32_to_cpu(trb->control) & TRB_SMM) 1534 transfer_end = true; 1535 1536 cdns3_ep_inc_deq(priv_ep); 1537 } 1538 1539 if (request_handled) { 1540 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1541 request_handled = false; 1542 transfer_end = false; 1543 } else { 1544 goto prepare_next_td; 1545 } 1546 1547 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && 1548 TRBS_PER_SEGMENT == 2) 1549 break; 1550 } else { 1551 /* Re-select endpoint. It could be changed by other CPU 1552 * during handling usb_gadget_giveback_request. 1553 */ 1554 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1555 1556 trb = priv_ep->trb_pool; 1557 trace_cdns3_complete_trb(priv_ep, trb); 1558 1559 if (trb != priv_req->trb) 1560 dev_warn(priv_dev->dev, 1561 "request_trb=0x%p, queue_trb=0x%p\n", 1562 priv_req->trb, trb); 1563 1564 request->actual += TRB_LEN(le32_to_cpu(trb->length)); 1565 1566 if (!request->num_sgs || 1567 (request->num_sgs == (priv_ep->stream_sg_idx + 1))) { 1568 priv_ep->stream_sg_idx = 0; 1569 cdns3_gadget_giveback(priv_ep, priv_req, 0); 1570 } else { 1571 priv_ep->stream_sg_idx++; 1572 cdns3_ep_run_stream_transfer(priv_ep, request); 1573 } 1574 break; 1575 } 1576 } 1577 priv_ep->flags &= ~EP_PENDING_REQUEST; 1578 1579prepare_next_td: 1580 if (!(priv_ep->flags & EP_STALLED) && 1581 !(priv_ep->flags & EP_STALL_PENDING)) 1582 cdns3_start_all_request(priv_dev, priv_ep); 1583} 1584 1585void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm) 1586{ 1587 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1588 1589 cdns3_wa1_restore_cycle_bit(priv_ep); 1590 1591 if (rearm) { 1592 trace_cdns3_ring(priv_ep); 1593 1594 /* Cycle Bit must be updated before arming DMA. */ 1595 wmb(); 1596 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1597 1598 __cdns3_gadget_wakeup(priv_dev); 1599 1600 trace_cdns3_doorbell_epx(priv_ep->name, 1601 readl(&priv_dev->regs->ep_traddr)); 1602 } 1603} 1604 1605static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep) 1606{ 1607 u16 tdl = priv_ep->pending_tdl; 1608 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1609 1610 if (tdl > EP_CMD_TDL_MAX) { 1611 tdl = EP_CMD_TDL_MAX; 1612 priv_ep->pending_tdl -= EP_CMD_TDL_MAX; 1613 } else { 1614 priv_ep->pending_tdl = 0; 1615 } 1616 1617 writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd); 1618} 1619 1620/** 1621 * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint 1622 * @priv_ep: endpoint object 1623 * 1624 * Returns 0 1625 */ 1626static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) 1627{ 1628 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1629 u32 ep_sts_reg; 1630 struct usb_request *deferred_request; 1631 struct usb_request *pending_request; 1632 u32 tdl = 0; 1633 1634 cdns3_select_ep(priv_dev, priv_ep->endpoint.address); 1635 1636 trace_cdns3_epx_irq(priv_dev, priv_ep); 1637 1638 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 1639 writel(ep_sts_reg, &priv_dev->regs->ep_sts); 1640 1641 if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) { 1642 bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY); 1643 1644 tdl = cdns3_get_tdl(priv_dev); 1645 1646 /* 1647 * Continue the previous transfer: 1648 * There is some racing between ERDY and PRIME. The device send 1649 * ERDY and almost in the same time Host send PRIME. It cause 1650 * that host ignore the ERDY packet and driver has to send it 1651 * again. 1652 */ 1653 if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) || 1654 EP_STS_HOSTPP(ep_sts_reg))) { 1655 writel(EP_CMD_ERDY | 1656 EP_CMD_ERDY_SID(priv_ep->last_stream_id), 1657 &priv_dev->regs->ep_cmd); 1658 ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC); 1659 } else { 1660 priv_ep->prime_flag = true; 1661 1662 pending_request = cdns3_next_request(&priv_ep->pending_req_list); 1663 deferred_request = cdns3_next_request(&priv_ep->deferred_req_list); 1664 1665 if (deferred_request && !pending_request) { 1666 cdns3_start_all_request(priv_dev, priv_ep); 1667 } 1668 } 1669 } 1670 1671 if (ep_sts_reg & EP_STS_TRBERR) { 1672 if (priv_ep->flags & EP_STALL_PENDING && 1673 !(ep_sts_reg & EP_STS_DESCMIS && 1674 priv_dev->dev_ver < DEV_VER_V2)) { 1675 cdns3_ep_stall_flush(priv_ep); 1676 } 1677 1678 /* 1679 * For isochronous transfer driver completes request on 1680 * IOC or on TRBERR. IOC appears only when device receive 1681 * OUT data packet. If host disable stream or lost some packet 1682 * then the only way to finish all queued transfer is to do it 1683 * on TRBERR event. 1684 */ 1685 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && 1686 !priv_ep->wa1_set) { 1687 if (!priv_ep->dir) { 1688 u32 ep_cfg = readl(&priv_dev->regs->ep_cfg); 1689 1690 ep_cfg &= ~EP_CFG_ENABLE; 1691 writel(ep_cfg, &priv_dev->regs->ep_cfg); 1692 priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN; 1693 } 1694 cdns3_transfer_completed(priv_dev, priv_ep); 1695 } else if (!(priv_ep->flags & EP_STALLED) && 1696 !(priv_ep->flags & EP_STALL_PENDING)) { 1697 if (priv_ep->flags & EP_DEFERRED_DRDY) { 1698 priv_ep->flags &= ~EP_DEFERRED_DRDY; 1699 cdns3_start_all_request(priv_dev, priv_ep); 1700 } else { 1701 cdns3_rearm_transfer(priv_ep, 1702 priv_ep->wa1_set); 1703 } 1704 } 1705 } 1706 1707 if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) || 1708 (ep_sts_reg & EP_STS_IOT)) { 1709 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { 1710 if (ep_sts_reg & EP_STS_ISP) 1711 priv_ep->flags |= EP_QUIRK_END_TRANSFER; 1712 else 1713 priv_ep->flags &= ~EP_QUIRK_END_TRANSFER; 1714 } 1715 1716 if (!priv_ep->use_streams) { 1717 if ((ep_sts_reg & EP_STS_IOC) || 1718 (ep_sts_reg & EP_STS_ISP)) { 1719 cdns3_transfer_completed(priv_dev, priv_ep); 1720 } else if ((priv_ep->flags & EP_TDLCHK_EN) & 1721 priv_ep->pending_tdl) { 1722 /* handle IOT with pending tdl */ 1723 cdns3_reprogram_tdl(priv_ep); 1724 } 1725 } else if (priv_ep->dir == USB_DIR_OUT) { 1726 priv_ep->ep_sts_pending |= ep_sts_reg; 1727 } else if (ep_sts_reg & EP_STS_IOT) { 1728 cdns3_transfer_completed(priv_dev, priv_ep); 1729 } 1730 } 1731 1732 /* 1733 * MD_EXIT interrupt sets when stream capable endpoint exits 1734 * from MOVE DATA state of Bulk IN/OUT stream protocol state machine 1735 */ 1736 if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) && 1737 (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) { 1738 priv_ep->ep_sts_pending = 0; 1739 cdns3_transfer_completed(priv_dev, priv_ep); 1740 } 1741 1742 /* 1743 * WA2: this condition should only be meet when 1744 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or 1745 * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. 1746 * In other cases this interrupt will be disabled. 1747 */ 1748 if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 && 1749 !(priv_ep->flags & EP_STALLED)) 1750 cdns3_wa2_descmissing_packet(priv_ep); 1751 1752 return 0; 1753} 1754 1755static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev) 1756{ 1757 if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) 1758 priv_dev->gadget_driver->disconnect(&priv_dev->gadget); 1759} 1760 1761/** 1762 * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device 1763 * @priv_dev: extended gadget object 1764 * @usb_ists: bitmap representation of device's reported interrupts 1765 * (usb_ists register value) 1766 */ 1767static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, 1768 u32 usb_ists) 1769__must_hold(&priv_dev->lock) 1770{ 1771 int speed = 0; 1772 1773 trace_cdns3_usb_irq(priv_dev, usb_ists); 1774 if (usb_ists & USB_ISTS_L1ENTI) { 1775 /* 1776 * WORKAROUND: CDNS3 controller has issue with hardware resuming 1777 * from L1. To fix it, if any DMA transfer is pending driver 1778 * must starts driving resume signal immediately. 1779 */ 1780 if (readl(&priv_dev->regs->drbl)) 1781 __cdns3_gadget_wakeup(priv_dev); 1782 } 1783 1784 /* Connection detected */ 1785 if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) { 1786 speed = cdns3_get_speed(priv_dev); 1787 priv_dev->gadget.speed = speed; 1788 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED); 1789 cdns3_ep0_config(priv_dev); 1790 } 1791 1792 /* Disconnection detected */ 1793 if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) { 1794 spin_unlock(&priv_dev->lock); 1795 cdns3_disconnect_gadget(priv_dev); 1796 spin_lock(&priv_dev->lock); 1797 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 1798 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 1799 cdns3_hw_reset_eps_config(priv_dev); 1800 } 1801 1802 if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { 1803 if (priv_dev->gadget_driver && 1804 priv_dev->gadget_driver->suspend) { 1805 spin_unlock(&priv_dev->lock); 1806 priv_dev->gadget_driver->suspend(&priv_dev->gadget); 1807 spin_lock(&priv_dev->lock); 1808 } 1809 } 1810 1811 if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) { 1812 if (priv_dev->gadget_driver && 1813 priv_dev->gadget_driver->resume) { 1814 spin_unlock(&priv_dev->lock); 1815 priv_dev->gadget_driver->resume(&priv_dev->gadget); 1816 spin_lock(&priv_dev->lock); 1817 } 1818 } 1819 1820 /* reset*/ 1821 if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) { 1822 if (priv_dev->gadget_driver) { 1823 spin_unlock(&priv_dev->lock); 1824 usb_gadget_udc_reset(&priv_dev->gadget, 1825 priv_dev->gadget_driver); 1826 spin_lock(&priv_dev->lock); 1827 1828 /*read again to check the actual speed*/ 1829 speed = cdns3_get_speed(priv_dev); 1830 priv_dev->gadget.speed = speed; 1831 cdns3_hw_reset_eps_config(priv_dev); 1832 cdns3_ep0_config(priv_dev); 1833 } 1834 } 1835} 1836 1837/** 1838 * cdns3_device_irq_handler - interrupt handler for device part of controller 1839 * 1840 * @irq: irq number for cdns3 core device 1841 * @data: structure of cdns3 1842 * 1843 * Returns IRQ_HANDLED or IRQ_NONE 1844 */ 1845static irqreturn_t cdns3_device_irq_handler(int irq, void *data) 1846{ 1847 struct cdns3_device *priv_dev = data; 1848 struct cdns *cdns = dev_get_drvdata(priv_dev->dev); 1849 irqreturn_t ret = IRQ_NONE; 1850 u32 reg; 1851 1852 if (cdns->in_lpm) 1853 return ret; 1854 1855 /* check USB device interrupt */ 1856 reg = readl(&priv_dev->regs->usb_ists); 1857 if (reg) { 1858 /* After masking interrupts the new interrupts won't be 1859 * reported in usb_ists/ep_ists. In order to not lose some 1860 * of them driver disables only detected interrupts. 1861 * They will be enabled ASAP after clearing source of 1862 * interrupt. This an unusual behavior only applies to 1863 * usb_ists register. 1864 */ 1865 reg = ~reg & readl(&priv_dev->regs->usb_ien); 1866 /* mask deferred interrupt. */ 1867 writel(reg, &priv_dev->regs->usb_ien); 1868 ret = IRQ_WAKE_THREAD; 1869 } 1870 1871 /* check endpoint interrupt */ 1872 reg = readl(&priv_dev->regs->ep_ists); 1873 if (reg) { 1874 writel(0, &priv_dev->regs->ep_ien); 1875 ret = IRQ_WAKE_THREAD; 1876 } 1877 1878 return ret; 1879} 1880 1881/** 1882 * cdns3_device_thread_irq_handler - interrupt handler for device part 1883 * of controller 1884 * 1885 * @irq: irq number for cdns3 core device 1886 * @data: structure of cdns3 1887 * 1888 * Returns IRQ_HANDLED or IRQ_NONE 1889 */ 1890static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) 1891{ 1892 struct cdns3_device *priv_dev = data; 1893 irqreturn_t ret = IRQ_NONE; 1894 unsigned long flags; 1895 unsigned int bit; 1896 unsigned long reg; 1897 1898 spin_lock_irqsave(&priv_dev->lock, flags); 1899 1900 reg = readl(&priv_dev->regs->usb_ists); 1901 if (reg) { 1902 writel(reg, &priv_dev->regs->usb_ists); 1903 writel(USB_IEN_INIT, &priv_dev->regs->usb_ien); 1904 cdns3_check_usb_interrupt_proceed(priv_dev, reg); 1905 ret = IRQ_HANDLED; 1906 } 1907 1908 reg = readl(&priv_dev->regs->ep_ists); 1909 1910 /* handle default endpoint OUT */ 1911 if (reg & EP_ISTS_EP_OUT0) { 1912 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT); 1913 ret = IRQ_HANDLED; 1914 } 1915 1916 /* handle default endpoint IN */ 1917 if (reg & EP_ISTS_EP_IN0) { 1918 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN); 1919 ret = IRQ_HANDLED; 1920 } 1921 1922 /* check if interrupt from non default endpoint, if no exit */ 1923 reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0); 1924 if (!reg) 1925 goto irqend; 1926 1927 for_each_set_bit(bit, ®, 1928 sizeof(u32) * BITS_PER_BYTE) { 1929 cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]); 1930 ret = IRQ_HANDLED; 1931 } 1932 1933 if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams) 1934 cdns3_wa2_check_outq_status(priv_dev); 1935 1936irqend: 1937 writel(~0, &priv_dev->regs->ep_ien); 1938 spin_unlock_irqrestore(&priv_dev->lock, flags); 1939 1940 return ret; 1941} 1942 1943/** 1944 * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP 1945 * 1946 * The real reservation will occur during write to EP_CFG register, 1947 * this function is used to check if the 'size' reservation is allowed. 1948 * 1949 * @priv_dev: extended gadget object 1950 * @size: the size (KB) for EP would like to allocate 1951 * @is_in: endpoint direction 1952 * 1953 * Return 0 if the required size can met or negative value on failure 1954 */ 1955static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, 1956 int size, int is_in) 1957{ 1958 int remained; 1959 1960 /* 2KB are reserved for EP0*/ 1961 remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2; 1962 1963 if (is_in) { 1964 if (remained < size) 1965 return -EPERM; 1966 1967 priv_dev->onchip_used_size += size; 1968 } else { 1969 int required; 1970 1971 /** 1972 * ALL OUT EPs are shared the same chunk onchip memory, so 1973 * driver checks if it already has assigned enough buffers 1974 */ 1975 if (priv_dev->out_mem_is_allocated >= size) 1976 return 0; 1977 1978 required = size - priv_dev->out_mem_is_allocated; 1979 1980 if (required > remained) 1981 return -EPERM; 1982 1983 priv_dev->out_mem_is_allocated += required; 1984 priv_dev->onchip_used_size += required; 1985 } 1986 1987 return 0; 1988} 1989 1990static void cdns3_configure_dmult(struct cdns3_device *priv_dev, 1991 struct cdns3_endpoint *priv_ep) 1992{ 1993 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 1994 1995 /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */ 1996 if (priv_dev->dev_ver <= DEV_VER_V2) 1997 writel(USB_CONF_DMULT, ®s->usb_conf); 1998 1999 if (priv_dev->dev_ver == DEV_VER_V2) 2000 writel(USB_CONF2_EN_TDL_TRB, ®s->usb_conf2); 2001 2002 if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) { 2003 u32 mask; 2004 2005 if (priv_ep->dir) 2006 mask = BIT(priv_ep->num + 16); 2007 else 2008 mask = BIT(priv_ep->num); 2009 2010 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) { 2011 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2012 cdns3_set_register_bit(®s->tdl_beh, mask); 2013 cdns3_set_register_bit(®s->tdl_beh2, mask); 2014 cdns3_set_register_bit(®s->dma_adv_td, mask); 2015 } 2016 2017 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2018 cdns3_set_register_bit(®s->tdl_from_trb, mask); 2019 2020 cdns3_set_register_bit(®s->dtrans, mask); 2021 } 2022} 2023 2024/** 2025 * cdns3_ep_config - Configure hardware endpoint 2026 * @priv_ep: extended endpoint object 2027 * @enable: set EP_CFG_ENABLE bit in ep_cfg register. 2028 */ 2029int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) 2030{ 2031 bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC); 2032 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2033 u32 bEndpointAddress = priv_ep->num | priv_ep->dir; 2034 u32 max_packet_size = 0; 2035 u8 maxburst = 0; 2036 u32 ep_cfg = 0; 2037 u8 buffering; 2038 u8 mult = 0; 2039 int ret; 2040 2041 buffering = priv_dev->ep_buf_size - 1; 2042 2043 cdns3_configure_dmult(priv_dev, priv_ep); 2044 2045 switch (priv_ep->type) { 2046 case USB_ENDPOINT_XFER_INT: 2047 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT); 2048 2049 if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) 2050 ep_cfg |= EP_CFG_TDL_CHK; 2051 break; 2052 case USB_ENDPOINT_XFER_BULK: 2053 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK); 2054 2055 if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir) 2056 ep_cfg |= EP_CFG_TDL_CHK; 2057 break; 2058 default: 2059 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); 2060 mult = priv_dev->ep_iso_burst - 1; 2061 buffering = mult + 1; 2062 } 2063 2064 switch (priv_dev->gadget.speed) { 2065 case USB_SPEED_FULL: 2066 max_packet_size = is_iso_ep ? 1023 : 64; 2067 break; 2068 case USB_SPEED_HIGH: 2069 max_packet_size = is_iso_ep ? 1024 : 512; 2070 break; 2071 case USB_SPEED_SUPER: 2072 /* It's limitation that driver assumes in driver. */ 2073 mult = 0; 2074 max_packet_size = 1024; 2075 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2076 maxburst = priv_dev->ep_iso_burst - 1; 2077 buffering = (mult + 1) * 2078 (maxburst + 1); 2079 2080 if (priv_ep->interval > 1) 2081 buffering++; 2082 } else { 2083 maxburst = priv_dev->ep_buf_size - 1; 2084 } 2085 break; 2086 default: 2087 /* all other speed are not supported */ 2088 return -EINVAL; 2089 } 2090 2091 if (max_packet_size == 1024) 2092 priv_ep->trb_burst_size = 128; 2093 else if (max_packet_size >= 512) 2094 priv_ep->trb_burst_size = 64; 2095 else 2096 priv_ep->trb_burst_size = 16; 2097 2098 mult = min_t(u8, mult, EP_CFG_MULT_MAX); 2099 buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); 2100 maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX); 2101 2102 /* onchip buffer is only allocated before configuration */ 2103 if (!priv_dev->hw_configured_flag) { 2104 ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1, 2105 !!priv_ep->dir); 2106 if (ret) { 2107 dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n"); 2108 return ret; 2109 } 2110 } 2111 2112 if (enable) 2113 ep_cfg |= EP_CFG_ENABLE; 2114 2115 if (priv_ep->use_streams && priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2116 if (priv_dev->dev_ver >= DEV_VER_V3) { 2117 u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0)); 2118 2119 /* 2120 * Stream capable endpoints are handled by using ep_tdl 2121 * register. Other endpoints use TDL from TRB feature. 2122 */ 2123 cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, 2124 mask); 2125 } 2126 2127 /* Enable Stream Bit TDL chk and SID chk */ 2128 ep_cfg |= EP_CFG_STREAM_EN | EP_CFG_TDL_CHK | EP_CFG_SID_CHK; 2129 } 2130 2131 ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) | 2132 EP_CFG_MULT(mult) | 2133 EP_CFG_BUFFERING(buffering) | 2134 EP_CFG_MAXBURST(maxburst); 2135 2136 cdns3_select_ep(priv_dev, bEndpointAddress); 2137 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2138 priv_ep->flags |= EP_CONFIGURED; 2139 2140 dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n", 2141 priv_ep->name, ep_cfg); 2142 2143 return 0; 2144} 2145 2146/* Find correct direction for HW endpoint according to description */ 2147static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc, 2148 struct cdns3_endpoint *priv_ep) 2149{ 2150 return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) || 2151 (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc)); 2152} 2153 2154static struct 2155cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev, 2156 struct usb_endpoint_descriptor *desc) 2157{ 2158 struct usb_ep *ep; 2159 struct cdns3_endpoint *priv_ep; 2160 2161 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2162 unsigned long num; 2163 int ret; 2164 /* ep name pattern likes epXin or epXout */ 2165 char c[2] = {ep->name[2], '\0'}; 2166 2167 ret = kstrtoul(c, 10, &num); 2168 if (ret) 2169 return ERR_PTR(ret); 2170 2171 priv_ep = ep_to_cdns3_ep(ep); 2172 if (cdns3_ep_dir_is_correct(desc, priv_ep)) { 2173 if (!(priv_ep->flags & EP_CLAIMED)) { 2174 priv_ep->num = num; 2175 return priv_ep; 2176 } 2177 } 2178 } 2179 2180 return ERR_PTR(-ENOENT); 2181} 2182 2183/* 2184 * Cadence IP has one limitation that all endpoints must be configured 2185 * (Type & MaxPacketSize) before setting configuration through hardware 2186 * register, it means we can't change endpoints configuration after 2187 * set_configuration. 2188 * 2189 * This function set EP_CLAIMED flag which is added when the gadget driver 2190 * uses usb_ep_autoconfig to configure specific endpoint; 2191 * When the udc driver receives set_configurion request, 2192 * it goes through all claimed endpoints, and configure all endpoints 2193 * accordingly. 2194 * 2195 * At usb_ep_ops.enable/disable, we only enable and disable endpoint through 2196 * ep_cfg register which can be changed after set_configuration, and do 2197 * some software operation accordingly. 2198 */ 2199static struct 2200usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, 2201 struct usb_endpoint_descriptor *desc, 2202 struct usb_ss_ep_comp_descriptor *comp_desc) 2203{ 2204 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2205 struct cdns3_endpoint *priv_ep; 2206 unsigned long flags; 2207 2208 priv_ep = cdns3_find_available_ep(priv_dev, desc); 2209 if (IS_ERR(priv_ep)) { 2210 dev_err(priv_dev->dev, "no available ep\n"); 2211 return NULL; 2212 } 2213 2214 dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name); 2215 2216 spin_lock_irqsave(&priv_dev->lock, flags); 2217 priv_ep->endpoint.desc = desc; 2218 priv_ep->dir = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT; 2219 priv_ep->type = usb_endpoint_type(desc); 2220 priv_ep->flags |= EP_CLAIMED; 2221 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2222 2223 spin_unlock_irqrestore(&priv_dev->lock, flags); 2224 return &priv_ep->endpoint; 2225} 2226 2227/** 2228 * cdns3_gadget_ep_alloc_request - Allocates request 2229 * @ep: endpoint object associated with request 2230 * @gfp_flags: gfp flags 2231 * 2232 * Returns allocated request address, NULL on allocation error 2233 */ 2234struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep, 2235 gfp_t gfp_flags) 2236{ 2237 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2238 struct cdns3_request *priv_req; 2239 2240 priv_req = kzalloc(sizeof(*priv_req), gfp_flags); 2241 if (!priv_req) 2242 return NULL; 2243 2244 priv_req->priv_ep = priv_ep; 2245 2246 trace_cdns3_alloc_request(priv_req); 2247 return &priv_req->request; 2248} 2249 2250/** 2251 * cdns3_gadget_ep_free_request - Free memory occupied by request 2252 * @ep: endpoint object associated with request 2253 * @request: request to free memory 2254 */ 2255void cdns3_gadget_ep_free_request(struct usb_ep *ep, 2256 struct usb_request *request) 2257{ 2258 struct cdns3_request *priv_req = to_cdns3_request(request); 2259 2260 if (priv_req->aligned_buf) 2261 priv_req->aligned_buf->in_use = 0; 2262 2263 trace_cdns3_free_request(priv_req); 2264 kfree(priv_req); 2265} 2266 2267/** 2268 * cdns3_gadget_ep_enable - Enable endpoint 2269 * @ep: endpoint object 2270 * @desc: endpoint descriptor 2271 * 2272 * Returns 0 on success, error code elsewhere 2273 */ 2274static int cdns3_gadget_ep_enable(struct usb_ep *ep, 2275 const struct usb_endpoint_descriptor *desc) 2276{ 2277 struct cdns3_endpoint *priv_ep; 2278 struct cdns3_device *priv_dev; 2279 const struct usb_ss_ep_comp_descriptor *comp_desc; 2280 u32 reg = EP_STS_EN_TRBERREN; 2281 u32 bEndpointAddress; 2282 unsigned long flags; 2283 int enable = 1; 2284 int ret = 0; 2285 int val; 2286 2287 priv_ep = ep_to_cdns3_ep(ep); 2288 priv_dev = priv_ep->cdns3_dev; 2289 comp_desc = priv_ep->endpoint.comp_desc; 2290 2291 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { 2292 dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); 2293 return -EINVAL; 2294 } 2295 2296 if (!desc->wMaxPacketSize) { 2297 dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n"); 2298 return -EINVAL; 2299 } 2300 2301 if (dev_WARN_ONCE(priv_dev->dev, priv_ep->flags & EP_ENABLED, 2302 "%s is already enabled\n", priv_ep->name)) 2303 return 0; 2304 2305 spin_lock_irqsave(&priv_dev->lock, flags); 2306 2307 priv_ep->endpoint.desc = desc; 2308 priv_ep->type = usb_endpoint_type(desc); 2309 priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; 2310 2311 if (priv_ep->interval > ISO_MAX_INTERVAL && 2312 priv_ep->type == USB_ENDPOINT_XFER_ISOC) { 2313 dev_err(priv_dev->dev, "Driver is limited to %d period\n", 2314 ISO_MAX_INTERVAL); 2315 2316 ret = -EINVAL; 2317 goto exit; 2318 } 2319 2320 bEndpointAddress = priv_ep->num | priv_ep->dir; 2321 cdns3_select_ep(priv_dev, bEndpointAddress); 2322 2323 /* 2324 * For some versions of controller at some point during ISO OUT traffic 2325 * DMA reads Transfer Ring for the EP which has never got doorbell. 2326 * This issue was detected only on simulation, but to avoid this issue 2327 * driver add protection against it. To fix it driver enable ISO OUT 2328 * endpoint before setting DRBL. This special treatment of ISO OUT 2329 * endpoints are recommended by controller specification. 2330 */ 2331 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) 2332 enable = 0; 2333 2334 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { 2335 /* 2336 * Enable stream support (SS mode) related interrupts 2337 * in EP_STS_EN Register 2338 */ 2339 if (priv_dev->gadget.speed >= USB_SPEED_SUPER) { 2340 reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN | 2341 EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN | 2342 EP_STS_EN_STREAMREN; 2343 priv_ep->use_streams = true; 2344 ret = cdns3_ep_config(priv_ep, enable); 2345 priv_dev->using_streams |= true; 2346 } 2347 } else { 2348 ret = cdns3_ep_config(priv_ep, enable); 2349 } 2350 2351 if (ret) 2352 goto exit; 2353 2354 ret = cdns3_allocate_trb_pool(priv_ep); 2355 if (ret) 2356 goto exit; 2357 2358 bEndpointAddress = priv_ep->num | priv_ep->dir; 2359 cdns3_select_ep(priv_dev, bEndpointAddress); 2360 2361 trace_cdns3_gadget_ep_enable(priv_ep); 2362 2363 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2364 2365 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2366 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2367 1, 1000); 2368 2369 if (unlikely(ret)) { 2370 cdns3_free_trb_pool(priv_ep); 2371 ret = -EINVAL; 2372 goto exit; 2373 } 2374 2375 /* enable interrupt for selected endpoint */ 2376 cdns3_set_register_bit(&priv_dev->regs->ep_ien, 2377 BIT(cdns3_ep_addr_to_index(bEndpointAddress))); 2378 2379 if (priv_dev->dev_ver < DEV_VER_V2) 2380 cdns3_wa2_enable_detection(priv_dev, priv_ep, reg); 2381 2382 writel(reg, &priv_dev->regs->ep_sts_en); 2383 2384 ep->desc = desc; 2385 priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING | 2386 EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN); 2387 priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR; 2388 priv_ep->wa1_set = 0; 2389 priv_ep->enqueue = 0; 2390 priv_ep->dequeue = 0; 2391 reg = readl(&priv_dev->regs->ep_sts); 2392 priv_ep->pcs = !!EP_STS_CCS(reg); 2393 priv_ep->ccs = !!EP_STS_CCS(reg); 2394 /* one TRB is reserved for link TRB used in DMULT mode*/ 2395 priv_ep->free_trbs = priv_ep->num_trbs - 1; 2396exit: 2397 spin_unlock_irqrestore(&priv_dev->lock, flags); 2398 2399 return ret; 2400} 2401 2402/** 2403 * cdns3_gadget_ep_disable - Disable endpoint 2404 * @ep: endpoint object 2405 * 2406 * Returns 0 on success, error code elsewhere 2407 */ 2408static int cdns3_gadget_ep_disable(struct usb_ep *ep) 2409{ 2410 struct cdns3_endpoint *priv_ep; 2411 struct cdns3_request *priv_req; 2412 struct cdns3_device *priv_dev; 2413 struct usb_request *request; 2414 unsigned long flags; 2415 int ret = 0; 2416 u32 ep_cfg; 2417 int val; 2418 2419 if (!ep) { 2420 pr_err("usbss: invalid parameters\n"); 2421 return -EINVAL; 2422 } 2423 2424 priv_ep = ep_to_cdns3_ep(ep); 2425 priv_dev = priv_ep->cdns3_dev; 2426 2427 if (dev_WARN_ONCE(priv_dev->dev, !(priv_ep->flags & EP_ENABLED), 2428 "%s is already disabled\n", priv_ep->name)) 2429 return 0; 2430 2431 spin_lock_irqsave(&priv_dev->lock, flags); 2432 2433 trace_cdns3_gadget_ep_disable(priv_ep); 2434 2435 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2436 2437 ep_cfg = readl(&priv_dev->regs->ep_cfg); 2438 ep_cfg &= ~EP_CFG_ENABLE; 2439 writel(ep_cfg, &priv_dev->regs->ep_cfg); 2440 2441 /** 2442 * Driver needs some time before resetting endpoint. 2443 * It need waits for clearing DBUSY bit or for timeout expired. 2444 * 10us is enough time for controller to stop transfer. 2445 */ 2446 readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val, 2447 !(val & EP_STS_DBUSY), 1, 10); 2448 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2449 2450 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2451 !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)), 2452 1, 1000); 2453 if (unlikely(ret)) 2454 dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n", 2455 priv_ep->name); 2456 2457 while (!list_empty(&priv_ep->pending_req_list)) { 2458 request = cdns3_next_request(&priv_ep->pending_req_list); 2459 2460 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2461 -ESHUTDOWN); 2462 } 2463 2464 while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { 2465 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); 2466 2467 kfree(priv_req->request.buf); 2468 cdns3_gadget_ep_free_request(&priv_ep->endpoint, 2469 &priv_req->request); 2470 list_del_init(&priv_req->list); 2471 --priv_ep->wa2_counter; 2472 } 2473 2474 while (!list_empty(&priv_ep->deferred_req_list)) { 2475 request = cdns3_next_request(&priv_ep->deferred_req_list); 2476 2477 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 2478 -ESHUTDOWN); 2479 } 2480 2481 priv_ep->descmis_req = NULL; 2482 2483 ep->desc = NULL; 2484 priv_ep->flags &= ~EP_ENABLED; 2485 priv_ep->use_streams = false; 2486 2487 spin_unlock_irqrestore(&priv_dev->lock, flags); 2488 2489 return ret; 2490} 2491 2492/** 2493 * __cdns3_gadget_ep_queue - Transfer data on endpoint 2494 * @ep: endpoint object 2495 * @request: request object 2496 * @gfp_flags: gfp flags 2497 * 2498 * Returns 0 on success, error code elsewhere 2499 */ 2500static int __cdns3_gadget_ep_queue(struct usb_ep *ep, 2501 struct usb_request *request, 2502 gfp_t gfp_flags) 2503{ 2504 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2505 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2506 struct cdns3_request *priv_req; 2507 int ret = 0; 2508 2509 request->actual = 0; 2510 request->status = -EINPROGRESS; 2511 priv_req = to_cdns3_request(request); 2512 trace_cdns3_ep_queue(priv_req); 2513 2514 if (priv_dev->dev_ver < DEV_VER_V2) { 2515 ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep, 2516 priv_req); 2517 2518 if (ret == EINPROGRESS) 2519 return 0; 2520 } 2521 2522 ret = cdns3_prepare_aligned_request_buf(priv_req); 2523 if (ret < 0) 2524 return ret; 2525 2526 ret = usb_gadget_map_request_by_dev(priv_dev->sysdev, request, 2527 usb_endpoint_dir_in(ep->desc)); 2528 if (ret) 2529 return ret; 2530 2531 list_add_tail(&request->list, &priv_ep->deferred_req_list); 2532 2533 /* 2534 * For stream capable endpoint if prime irq flag is set then only start 2535 * request. 2536 * If hardware endpoint configuration has not been set yet then 2537 * just queue request in deferred list. Transfer will be started in 2538 * cdns3_set_hw_configuration. 2539 */ 2540 if (!request->stream_id) { 2541 if (priv_dev->hw_configured_flag && 2542 !(priv_ep->flags & EP_STALLED) && 2543 !(priv_ep->flags & EP_STALL_PENDING)) 2544 cdns3_start_all_request(priv_dev, priv_ep); 2545 } else { 2546 if (priv_dev->hw_configured_flag && priv_ep->prime_flag) 2547 cdns3_start_all_request(priv_dev, priv_ep); 2548 } 2549 2550 return 0; 2551} 2552 2553static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, 2554 gfp_t gfp_flags) 2555{ 2556 struct usb_request *zlp_request; 2557 struct cdns3_endpoint *priv_ep; 2558 struct cdns3_device *priv_dev; 2559 unsigned long flags; 2560 int ret; 2561 2562 if (!request || !ep) 2563 return -EINVAL; 2564 2565 priv_ep = ep_to_cdns3_ep(ep); 2566 priv_dev = priv_ep->cdns3_dev; 2567 2568 spin_lock_irqsave(&priv_dev->lock, flags); 2569 2570 ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags); 2571 2572 if (ret == 0 && request->zero && request->length && 2573 (request->length % ep->maxpacket == 0)) { 2574 struct cdns3_request *priv_req; 2575 2576 zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC); 2577 zlp_request->buf = priv_dev->zlp_buf; 2578 zlp_request->length = 0; 2579 2580 priv_req = to_cdns3_request(zlp_request); 2581 priv_req->flags |= REQUEST_ZLP; 2582 2583 dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n", 2584 priv_ep->name); 2585 ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags); 2586 } 2587 2588 spin_unlock_irqrestore(&priv_dev->lock, flags); 2589 return ret; 2590} 2591 2592/** 2593 * cdns3_gadget_ep_dequeue - Remove request from transfer queue 2594 * @ep: endpoint object associated with request 2595 * @request: request object 2596 * 2597 * Returns 0 on success, error code elsewhere 2598 */ 2599int cdns3_gadget_ep_dequeue(struct usb_ep *ep, 2600 struct usb_request *request) 2601{ 2602 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2603 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2604 struct usb_request *req, *req_temp; 2605 struct cdns3_request *priv_req; 2606 struct cdns3_trb *link_trb; 2607 u8 req_on_hw_ring = 0; 2608 unsigned long flags; 2609 int ret = 0; 2610 2611 if (!ep || !request || !ep->desc) 2612 return -EINVAL; 2613 2614 spin_lock_irqsave(&priv_dev->lock, flags); 2615 2616 priv_req = to_cdns3_request(request); 2617 2618 trace_cdns3_ep_dequeue(priv_req); 2619 2620 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2621 2622 list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list, 2623 list) { 2624 if (request == req) { 2625 req_on_hw_ring = 1; 2626 goto found; 2627 } 2628 } 2629 2630 list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list, 2631 list) { 2632 if (request == req) 2633 goto found; 2634 } 2635 2636 goto not_found; 2637 2638found: 2639 link_trb = priv_req->trb; 2640 2641 /* Update ring only if removed request is on pending_req_list list */ 2642 if (req_on_hw_ring && link_trb) { 2643 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma + 2644 ((priv_req->end_trb + 1) * TRB_SIZE))); 2645 link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | 2646 TRB_TYPE(TRB_LINK) | TRB_CHAIN); 2647 2648 if (priv_ep->wa1_trb == priv_req->trb) 2649 cdns3_wa1_restore_cycle_bit(priv_ep); 2650 } 2651 2652 cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); 2653 2654not_found: 2655 spin_unlock_irqrestore(&priv_dev->lock, flags); 2656 return ret; 2657} 2658 2659/** 2660 * __cdns3_gadget_ep_set_halt - Sets stall on selected endpoint 2661 * Should be called after acquiring spin_lock and selecting ep 2662 * @priv_ep: endpoint object to set stall on. 2663 */ 2664void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep) 2665{ 2666 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2667 2668 trace_cdns3_halt(priv_ep, 1, 0); 2669 2670 if (!(priv_ep->flags & EP_STALLED)) { 2671 u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts); 2672 2673 if (!(ep_sts_reg & EP_STS_DBUSY)) 2674 cdns3_ep_stall_flush(priv_ep); 2675 else 2676 priv_ep->flags |= EP_STALL_PENDING; 2677 } 2678} 2679 2680/** 2681 * __cdns3_gadget_ep_clear_halt - Clears stall on selected endpoint 2682 * Should be called after acquiring spin_lock and selecting ep 2683 * @priv_ep: endpoint object to clear stall on 2684 */ 2685int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) 2686{ 2687 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2688 struct usb_request *request; 2689 struct cdns3_request *priv_req; 2690 struct cdns3_trb *trb = NULL; 2691 struct cdns3_trb trb_tmp; 2692 int ret; 2693 int val; 2694 2695 trace_cdns3_halt(priv_ep, 0, 0); 2696 2697 request = cdns3_next_request(&priv_ep->pending_req_list); 2698 if (request) { 2699 priv_req = to_cdns3_request(request); 2700 trb = priv_req->trb; 2701 if (trb) { 2702 trb_tmp = *trb; 2703 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2704 } 2705 } 2706 2707 writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2708 2709 /* wait for EPRST cleared */ 2710 ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2711 !(val & EP_CMD_EPRST), 1, 100); 2712 if (ret) 2713 return -EINVAL; 2714 2715 priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); 2716 2717 if (request) { 2718 if (trb) 2719 *trb = trb_tmp; 2720 2721 cdns3_rearm_transfer(priv_ep, 1); 2722 } 2723 2724 cdns3_start_all_request(priv_dev, priv_ep); 2725 return ret; 2726} 2727 2728/** 2729 * cdns3_gadget_ep_set_halt - Sets/clears stall on selected endpoint 2730 * @ep: endpoint object to set/clear stall on 2731 * @value: 1 for set stall, 0 for clear stall 2732 * 2733 * Returns 0 on success, error code elsewhere 2734 */ 2735int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value) 2736{ 2737 struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep); 2738 struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 2739 unsigned long flags; 2740 int ret = 0; 2741 2742 if (!(priv_ep->flags & EP_ENABLED)) 2743 return -EPERM; 2744 2745 spin_lock_irqsave(&priv_dev->lock, flags); 2746 2747 cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress); 2748 2749 if (!value) { 2750 priv_ep->flags &= ~EP_WEDGE; 2751 ret = __cdns3_gadget_ep_clear_halt(priv_ep); 2752 } else { 2753 __cdns3_gadget_ep_set_halt(priv_ep); 2754 } 2755 2756 spin_unlock_irqrestore(&priv_dev->lock, flags); 2757 2758 return ret; 2759} 2760 2761extern const struct usb_ep_ops cdns3_gadget_ep0_ops; 2762 2763static const struct usb_ep_ops cdns3_gadget_ep_ops = { 2764 .enable = cdns3_gadget_ep_enable, 2765 .disable = cdns3_gadget_ep_disable, 2766 .alloc_request = cdns3_gadget_ep_alloc_request, 2767 .free_request = cdns3_gadget_ep_free_request, 2768 .queue = cdns3_gadget_ep_queue, 2769 .dequeue = cdns3_gadget_ep_dequeue, 2770 .set_halt = cdns3_gadget_ep_set_halt, 2771 .set_wedge = cdns3_gadget_ep_set_wedge, 2772}; 2773 2774/** 2775 * cdns3_gadget_get_frame - Returns number of actual ITP frame 2776 * @gadget: gadget object 2777 * 2778 * Returns number of actual ITP frame 2779 */ 2780static int cdns3_gadget_get_frame(struct usb_gadget *gadget) 2781{ 2782 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2783 2784 return readl(&priv_dev->regs->usb_itpn); 2785} 2786 2787int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev) 2788{ 2789 enum usb_device_speed speed; 2790 2791 speed = cdns3_get_speed(priv_dev); 2792 2793 if (speed >= USB_SPEED_SUPER) 2794 return 0; 2795 2796 /* Start driving resume signaling to indicate remote wakeup. */ 2797 writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf); 2798 2799 return 0; 2800} 2801 2802static int cdns3_gadget_wakeup(struct usb_gadget *gadget) 2803{ 2804 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2805 unsigned long flags; 2806 int ret = 0; 2807 2808 spin_lock_irqsave(&priv_dev->lock, flags); 2809 ret = __cdns3_gadget_wakeup(priv_dev); 2810 spin_unlock_irqrestore(&priv_dev->lock, flags); 2811 return ret; 2812} 2813 2814static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget, 2815 int is_selfpowered) 2816{ 2817 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2818 unsigned long flags; 2819 2820 spin_lock_irqsave(&priv_dev->lock, flags); 2821 priv_dev->is_selfpowered = !!is_selfpowered; 2822 spin_unlock_irqrestore(&priv_dev->lock, flags); 2823 return 0; 2824} 2825 2826static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on) 2827{ 2828 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2829 2830 if (is_on) { 2831 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 2832 } else { 2833 writel(~0, &priv_dev->regs->ep_ists); 2834 writel(~0, &priv_dev->regs->usb_ists); 2835 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2836 } 2837 2838 return 0; 2839} 2840 2841static void cdns3_gadget_config(struct cdns3_device *priv_dev) 2842{ 2843 struct cdns3_usb_regs __iomem *regs = priv_dev->regs; 2844 u32 reg; 2845 2846 cdns3_ep0_config(priv_dev); 2847 2848 /* enable interrupts for endpoint 0 (in and out) */ 2849 writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, ®s->ep_ien); 2850 2851 /* 2852 * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1 2853 * revision of controller. 2854 */ 2855 if (priv_dev->dev_ver == DEV_VER_TI_V1) { 2856 reg = readl(®s->dbg_link1); 2857 2858 reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK; 2859 reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) | 2860 DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET; 2861 writel(reg, ®s->dbg_link1); 2862 } 2863 2864 /* 2865 * By default some platforms has set protected access to memory. 2866 * This cause problem with cache, so driver restore non-secure 2867 * access to memory. 2868 */ 2869 reg = readl(®s->dma_axi_ctrl); 2870 reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) | 2871 DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE); 2872 writel(reg, ®s->dma_axi_ctrl); 2873 2874 /* enable generic interrupt*/ 2875 writel(USB_IEN_INIT, ®s->usb_ien); 2876 writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); 2877 /* keep Fast Access bit */ 2878 writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr); 2879 2880 cdns3_configure_dmult(priv_dev, NULL); 2881} 2882 2883/** 2884 * cdns3_gadget_udc_start - Gadget start 2885 * @gadget: gadget object 2886 * @driver: driver which operates on this gadget 2887 * 2888 * Returns 0 on success, error code elsewhere 2889 */ 2890static int cdns3_gadget_udc_start(struct usb_gadget *gadget, 2891 struct usb_gadget_driver *driver) 2892{ 2893 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2894 unsigned long flags; 2895 enum usb_device_speed max_speed = driver->max_speed; 2896 2897 spin_lock_irqsave(&priv_dev->lock, flags); 2898 priv_dev->gadget_driver = driver; 2899 2900 /* limit speed if necessary */ 2901 max_speed = min(driver->max_speed, gadget->max_speed); 2902 2903 switch (max_speed) { 2904 case USB_SPEED_FULL: 2905 writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); 2906 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2907 break; 2908 case USB_SPEED_HIGH: 2909 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); 2910 break; 2911 case USB_SPEED_SUPER: 2912 break; 2913 default: 2914 dev_err(priv_dev->dev, 2915 "invalid maximum_speed parameter %d\n", 2916 max_speed); 2917 fallthrough; 2918 case USB_SPEED_UNKNOWN: 2919 /* default to superspeed */ 2920 max_speed = USB_SPEED_SUPER; 2921 break; 2922 } 2923 2924 cdns3_gadget_config(priv_dev); 2925 spin_unlock_irqrestore(&priv_dev->lock, flags); 2926 return 0; 2927} 2928 2929/** 2930 * cdns3_gadget_udc_stop - Stops gadget 2931 * @gadget: gadget object 2932 * 2933 * Returns 0 2934 */ 2935static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) 2936{ 2937 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2938 struct cdns3_endpoint *priv_ep; 2939 u32 bEndpointAddress; 2940 struct usb_ep *ep; 2941 int val; 2942 2943 priv_dev->gadget_driver = NULL; 2944 2945 priv_dev->onchip_used_size = 0; 2946 priv_dev->out_mem_is_allocated = 0; 2947 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 2948 2949 list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { 2950 priv_ep = ep_to_cdns3_ep(ep); 2951 bEndpointAddress = priv_ep->num | priv_ep->dir; 2952 cdns3_select_ep(priv_dev, bEndpointAddress); 2953 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd); 2954 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2955 !(val & EP_CMD_EPRST), 1, 100); 2956 2957 priv_ep->flags &= ~EP_CLAIMED; 2958 } 2959 2960 /* disable interrupt for device */ 2961 writel(0, &priv_dev->regs->usb_ien); 2962 writel(0, &priv_dev->regs->usb_pwr); 2963 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); 2964 2965 return 0; 2966} 2967 2968/** 2969 * cdns3_gadget_check_config - ensure cdns3 can support the USB configuration 2970 * @gadget: pointer to the USB gadget 2971 * 2972 * Used to record the maximum number of endpoints being used in a USB composite 2973 * device. (across all configurations) This is to be used in the calculation 2974 * of the TXFIFO sizes when resizing internal memory for individual endpoints. 2975 * It will help ensured that the resizing logic reserves enough space for at 2976 * least one max packet. 2977 */ 2978static int cdns3_gadget_check_config(struct usb_gadget *gadget) 2979{ 2980 struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); 2981 struct usb_ep *ep; 2982 int n_in = 0; 2983 int total; 2984 2985 list_for_each_entry(ep, &gadget->ep_list, ep_list) { 2986 if (ep->claimed && (ep->address & USB_DIR_IN)) 2987 n_in++; 2988 } 2989 2990 /* 2KB are reserved for EP0, 1KB for out*/ 2991 total = 2 + n_in + 1; 2992 2993 if (total > priv_dev->onchip_buffers) 2994 return -ENOMEM; 2995 2996 priv_dev->ep_buf_size = priv_dev->ep_iso_burst = 2997 (priv_dev->onchip_buffers - 2) / (n_in + 1); 2998 2999 return 0; 3000} 3001 3002static const struct usb_gadget_ops cdns3_gadget_ops = { 3003 .get_frame = cdns3_gadget_get_frame, 3004 .wakeup = cdns3_gadget_wakeup, 3005 .set_selfpowered = cdns3_gadget_set_selfpowered, 3006 .pullup = cdns3_gadget_pullup, 3007 .udc_start = cdns3_gadget_udc_start, 3008 .udc_stop = cdns3_gadget_udc_stop, 3009 .match_ep = cdns3_gadget_match_ep, 3010 .check_config = cdns3_gadget_check_config, 3011}; 3012 3013static void cdns3_free_all_eps(struct cdns3_device *priv_dev) 3014{ 3015 int i; 3016 3017 /* ep0 OUT point to ep0 IN. */ 3018 priv_dev->eps[16] = NULL; 3019 3020 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) 3021 if (priv_dev->eps[i]) { 3022 cdns3_free_trb_pool(priv_dev->eps[i]); 3023 devm_kfree(priv_dev->dev, priv_dev->eps[i]); 3024 } 3025} 3026 3027/** 3028 * cdns3_init_eps - Initializes software endpoints of gadget 3029 * @priv_dev: extended gadget object 3030 * 3031 * Returns 0 on success, error code elsewhere 3032 */ 3033static int cdns3_init_eps(struct cdns3_device *priv_dev) 3034{ 3035 u32 ep_enabled_reg, iso_ep_reg; 3036 struct cdns3_endpoint *priv_ep; 3037 int ep_dir, ep_number; 3038 u32 ep_mask; 3039 int ret = 0; 3040 int i; 3041 3042 /* Read it from USB_CAP3 to USB_CAP5 */ 3043 ep_enabled_reg = readl(&priv_dev->regs->usb_cap3); 3044 iso_ep_reg = readl(&priv_dev->regs->usb_cap4); 3045 3046 dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n"); 3047 3048 for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) { 3049 ep_dir = i >> 4; /* i div 16 */ 3050 ep_number = i & 0xF; /* i % 16 */ 3051 ep_mask = BIT(i); 3052 3053 if (!(ep_enabled_reg & ep_mask)) 3054 continue; 3055 3056 if (ep_dir && !ep_number) { 3057 priv_dev->eps[i] = priv_dev->eps[0]; 3058 continue; 3059 } 3060 3061 priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep), 3062 GFP_KERNEL); 3063 if (!priv_ep) 3064 goto err; 3065 3066 /* set parent of endpoint object */ 3067 priv_ep->cdns3_dev = priv_dev; 3068 priv_dev->eps[i] = priv_ep; 3069 priv_ep->num = ep_number; 3070 priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT; 3071 3072 if (!ep_number) { 3073 ret = cdns3_init_ep0(priv_dev, priv_ep); 3074 if (ret) { 3075 dev_err(priv_dev->dev, "Failed to init ep0\n"); 3076 goto err; 3077 } 3078 } else { 3079 snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s", 3080 ep_number, !!ep_dir ? "in" : "out"); 3081 priv_ep->endpoint.name = priv_ep->name; 3082 3083 usb_ep_set_maxpacket_limit(&priv_ep->endpoint, 3084 CDNS3_EP_MAX_PACKET_LIMIT); 3085 priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS; 3086 priv_ep->endpoint.ops = &cdns3_gadget_ep_ops; 3087 if (ep_dir) 3088 priv_ep->endpoint.caps.dir_in = 1; 3089 else 3090 priv_ep->endpoint.caps.dir_out = 1; 3091 3092 if (iso_ep_reg & ep_mask) 3093 priv_ep->endpoint.caps.type_iso = 1; 3094 3095 priv_ep->endpoint.caps.type_bulk = 1; 3096 priv_ep->endpoint.caps.type_int = 1; 3097 3098 list_add_tail(&priv_ep->endpoint.ep_list, 3099 &priv_dev->gadget.ep_list); 3100 } 3101 3102 priv_ep->flags = 0; 3103 3104 dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n", 3105 priv_ep->name, 3106 priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "", 3107 priv_ep->endpoint.caps.type_iso ? "ISO" : ""); 3108 3109 INIT_LIST_HEAD(&priv_ep->pending_req_list); 3110 INIT_LIST_HEAD(&priv_ep->deferred_req_list); 3111 INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list); 3112 } 3113 3114 return 0; 3115err: 3116 cdns3_free_all_eps(priv_dev); 3117 return -ENOMEM; 3118} 3119 3120static void cdns3_gadget_release(struct device *dev) 3121{ 3122 struct cdns3_device *priv_dev = container_of(dev, 3123 struct cdns3_device, gadget.dev); 3124 3125 kfree(priv_dev); 3126} 3127 3128static void cdns3_gadget_exit(struct cdns *cdns) 3129{ 3130 struct cdns3_device *priv_dev; 3131 3132 priv_dev = cdns->gadget_dev; 3133 3134 3135 pm_runtime_mark_last_busy(cdns->dev); 3136 pm_runtime_put_autosuspend(cdns->dev); 3137 3138 usb_del_gadget(&priv_dev->gadget); 3139 devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); 3140 3141 cdns3_free_all_eps(priv_dev); 3142 3143 while (!list_empty(&priv_dev->aligned_buf_list)) { 3144 struct cdns3_aligned_buf *buf; 3145 3146 buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list); 3147 dma_free_noncoherent(priv_dev->sysdev, buf->size, 3148 buf->buf, 3149 buf->dma, 3150 buf->dir); 3151 3152 list_del(&buf->list); 3153 kfree(buf); 3154 } 3155 3156 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3157 priv_dev->setup_dma); 3158 dma_pool_destroy(priv_dev->eps_dma_pool); 3159 3160 kfree(priv_dev->zlp_buf); 3161 usb_put_gadget(&priv_dev->gadget); 3162 cdns->gadget_dev = NULL; 3163 cdns_drd_gadget_off(cdns); 3164} 3165 3166static int cdns3_gadget_start(struct cdns *cdns) 3167{ 3168 struct cdns3_device *priv_dev; 3169 u32 max_speed; 3170 int ret; 3171 3172 priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL); 3173 if (!priv_dev) 3174 return -ENOMEM; 3175 3176 usb_initialize_gadget(cdns->dev, &priv_dev->gadget, 3177 cdns3_gadget_release); 3178 cdns->gadget_dev = priv_dev; 3179 priv_dev->sysdev = cdns->dev; 3180 priv_dev->dev = cdns->dev; 3181 priv_dev->regs = cdns->dev_regs; 3182 3183 device_property_read_u16(priv_dev->dev, "cdns,on-chip-buff-size", 3184 &priv_dev->onchip_buffers); 3185 3186 if (priv_dev->onchip_buffers <= 0) { 3187 u32 reg = readl(&priv_dev->regs->usb_cap2); 3188 3189 priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg); 3190 } 3191 3192 if (!priv_dev->onchip_buffers) 3193 priv_dev->onchip_buffers = 256; 3194 3195 max_speed = usb_get_maximum_speed(cdns->dev); 3196 3197 /* Check the maximum_speed parameter */ 3198 switch (max_speed) { 3199 case USB_SPEED_FULL: 3200 case USB_SPEED_HIGH: 3201 case USB_SPEED_SUPER: 3202 break; 3203 default: 3204 dev_err(cdns->dev, "invalid maximum_speed parameter %d\n", 3205 max_speed); 3206 fallthrough; 3207 case USB_SPEED_UNKNOWN: 3208 /* default to superspeed */ 3209 max_speed = USB_SPEED_SUPER; 3210 break; 3211 } 3212 3213 /* fill gadget fields */ 3214 priv_dev->gadget.max_speed = max_speed; 3215 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3216 priv_dev->gadget.ops = &cdns3_gadget_ops; 3217 priv_dev->gadget.name = "usb-ss-gadget"; 3218 priv_dev->gadget.quirk_avoids_skb_reserve = 1; 3219 priv_dev->gadget.irq = cdns->dev_irq; 3220 3221 spin_lock_init(&priv_dev->lock); 3222 INIT_WORK(&priv_dev->pending_status_wq, 3223 cdns3_pending_setup_status_handler); 3224 3225 INIT_WORK(&priv_dev->aligned_buf_wq, 3226 cdns3_free_aligned_request_buf); 3227 3228 /* initialize endpoint container */ 3229 INIT_LIST_HEAD(&priv_dev->gadget.ep_list); 3230 INIT_LIST_HEAD(&priv_dev->aligned_buf_list); 3231 priv_dev->eps_dma_pool = dma_pool_create("cdns3_eps_dma_pool", 3232 priv_dev->sysdev, 3233 TRB_RING_SIZE, 8, 0); 3234 if (!priv_dev->eps_dma_pool) { 3235 dev_err(priv_dev->dev, "Failed to create TRB dma pool\n"); 3236 ret = -ENOMEM; 3237 goto err1; 3238 } 3239 3240 ret = cdns3_init_eps(priv_dev); 3241 if (ret) { 3242 dev_err(priv_dev->dev, "Failed to create endpoints\n"); 3243 goto err1; 3244 } 3245 3246 /* allocate memory for setup packet buffer */ 3247 priv_dev->setup_buf = dma_alloc_coherent(priv_dev->sysdev, 8, 3248 &priv_dev->setup_dma, GFP_DMA); 3249 if (!priv_dev->setup_buf) { 3250 ret = -ENOMEM; 3251 goto err2; 3252 } 3253 3254 priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); 3255 3256 dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", 3257 readl(&priv_dev->regs->usb_cap6)); 3258 dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n", 3259 readl(&priv_dev->regs->usb_cap1)); 3260 dev_dbg(priv_dev->dev, "On-Chip memory configuration: %08x\n", 3261 readl(&priv_dev->regs->usb_cap2)); 3262 3263 priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver); 3264 if (priv_dev->dev_ver >= DEV_VER_V2) 3265 priv_dev->gadget.sg_supported = 1; 3266 3267 priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL); 3268 if (!priv_dev->zlp_buf) { 3269 ret = -ENOMEM; 3270 goto err3; 3271 } 3272 3273 /* add USB gadget device */ 3274 ret = usb_add_gadget(&priv_dev->gadget); 3275 if (ret < 0) { 3276 dev_err(priv_dev->dev, "Failed to add gadget\n"); 3277 goto err4; 3278 } 3279 3280 return 0; 3281err4: 3282 kfree(priv_dev->zlp_buf); 3283err3: 3284 dma_free_coherent(priv_dev->sysdev, 8, priv_dev->setup_buf, 3285 priv_dev->setup_dma); 3286err2: 3287 cdns3_free_all_eps(priv_dev); 3288err1: 3289 dma_pool_destroy(priv_dev->eps_dma_pool); 3290 3291 usb_put_gadget(&priv_dev->gadget); 3292 cdns->gadget_dev = NULL; 3293 return ret; 3294} 3295 3296static int __cdns3_gadget_init(struct cdns *cdns) 3297{ 3298 int ret = 0; 3299 3300 /* Ensure 32-bit DMA Mask in case we switched back from Host mode */ 3301 ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32)); 3302 if (ret) { 3303 dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret); 3304 return ret; 3305 } 3306 3307 cdns_drd_gadget_on(cdns); 3308 pm_runtime_get_sync(cdns->dev); 3309 3310 ret = cdns3_gadget_start(cdns); 3311 if (ret) { 3312 pm_runtime_put_sync(cdns->dev); 3313 return ret; 3314 } 3315 3316 /* 3317 * Because interrupt line can be shared with other components in 3318 * driver it can't use IRQF_ONESHOT flag here. 3319 */ 3320 ret = devm_request_threaded_irq(cdns->dev, cdns->dev_irq, 3321 cdns3_device_irq_handler, 3322 cdns3_device_thread_irq_handler, 3323 IRQF_SHARED, dev_name(cdns->dev), 3324 cdns->gadget_dev); 3325 3326 if (ret) 3327 goto err0; 3328 3329 return 0; 3330err0: 3331 cdns3_gadget_exit(cdns); 3332 return ret; 3333} 3334 3335static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup) 3336__must_hold(&cdns->lock) 3337{ 3338 struct cdns3_device *priv_dev = cdns->gadget_dev; 3339 3340 spin_unlock(&cdns->lock); 3341 cdns3_disconnect_gadget(priv_dev); 3342 spin_lock(&cdns->lock); 3343 3344 priv_dev->gadget.speed = USB_SPEED_UNKNOWN; 3345 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); 3346 cdns3_hw_reset_eps_config(priv_dev); 3347 3348 /* disable interrupt for device */ 3349 writel(0, &priv_dev->regs->usb_ien); 3350 3351 return 0; 3352} 3353 3354static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated) 3355{ 3356 struct cdns3_device *priv_dev = cdns->gadget_dev; 3357 3358 if (!priv_dev->gadget_driver) 3359 return 0; 3360 3361 cdns3_gadget_config(priv_dev); 3362 if (hibernated) 3363 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); 3364 3365 return 0; 3366} 3367 3368/** 3369 * cdns3_gadget_init - initialize device structure 3370 * 3371 * @cdns: cdns instance 3372 * 3373 * This function initializes the gadget. 3374 */ 3375int cdns3_gadget_init(struct cdns *cdns) 3376{ 3377 struct cdns_role_driver *rdrv; 3378 3379 rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL); 3380 if (!rdrv) 3381 return -ENOMEM; 3382 3383 rdrv->start = __cdns3_gadget_init; 3384 rdrv->stop = cdns3_gadget_exit; 3385 rdrv->suspend = cdns3_gadget_suspend; 3386 rdrv->resume = cdns3_gadget_resume; 3387 rdrv->state = CDNS_ROLE_STATE_INACTIVE; 3388 rdrv->name = "gadget"; 3389 cdns->roles[USB_ROLE_DEVICE] = rdrv; 3390 3391 return 0; 3392}