ep0.c (13645B)
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget 4 * 5 * ep0.c - Endpoint 0 handling 6 * 7 * Copyright 2017 IBM Corporation 8 */ 9 10#include <linux/kernel.h> 11#include <linux/module.h> 12#include <linux/platform_device.h> 13#include <linux/delay.h> 14#include <linux/ioport.h> 15#include <linux/slab.h> 16#include <linux/errno.h> 17#include <linux/list.h> 18#include <linux/interrupt.h> 19#include <linux/proc_fs.h> 20#include <linux/prefetch.h> 21#include <linux/clk.h> 22#include <linux/usb/gadget.h> 23#include <linux/of.h> 24#include <linux/of_gpio.h> 25#include <linux/regmap.h> 26#include <linux/dma-mapping.h> 27 28#include "vhub.h" 29 30int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len) 31{ 32 struct usb_request *req = &ep->ep0.req.req; 33 int rc; 34 35 if (WARN_ON(ep->d_idx != 0)) 36 return std_req_stall; 37 if (WARN_ON(!ep->ep0.dir_in)) 38 return std_req_stall; 39 if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET)) 40 return std_req_stall; 41 if (WARN_ON(req->status == -EINPROGRESS)) 42 return std_req_stall; 43 44 req->buf = ptr; 45 req->length = len; 46 req->complete = NULL; 47 req->zero = true; 48 49 /* 50 * Call internal queue directly after dropping the lock. This is 51 * safe to do as the reply is always the last thing done when 52 * processing a SETUP packet, usually as a tail call 53 */ 54 spin_unlock(&ep->vhub->lock); 55 if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC)) 56 rc = std_req_stall; 57 else 58 rc = std_req_data; 59 spin_lock(&ep->vhub->lock); 60 return rc; 61} 62 63int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...) 64{ 65 u8 *buffer = ep->buf; 66 unsigned int i; 67 va_list args; 68 69 va_start(args, len); 70 71 /* Copy data directly into EP buffer */ 72 for (i = 0; i < len; i++) 73 buffer[i] = va_arg(args, int); 74 va_end(args); 75 76 /* req->buf NULL means data is already there */ 77 return ast_vhub_reply(ep, NULL, len); 78} 79 80void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep) 81{ 82 struct usb_ctrlrequest crq; 83 enum std_req_rc std_req_rc; 84 int rc = -ENODEV; 85 86 if (WARN_ON(ep->d_idx != 0)) 87 return; 88 89 /* 90 * Grab the setup packet from the chip and byteswap 91 * interesting fields 92 */ 93 memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq)); 94 95 EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n", 96 crq.bRequestType, crq.bRequest, 97 le16_to_cpu(crq.wValue), 98 le16_to_cpu(crq.wIndex), 99 le16_to_cpu(crq.wLength), 100 (crq.bRequestType & USB_DIR_IN) ? "in" : "out", 101 ep->ep0.state); 102 103 /* 104 * Check our state, cancel pending requests if needed 105 * 106 * Note: Under some circumstances, we can get a new setup 107 * packet while waiting for the stall ack, just accept it. 108 * 109 * In any case, a SETUP packet in wrong state should have 110 * reset the HW state machine, so let's just log, nuke 111 * requests, move on. 112 */ 113 if (ep->ep0.state != ep0_state_token && 114 ep->ep0.state != ep0_state_stall) { 115 EPDBG(ep, "wrong state\n"); 116 ast_vhub_nuke(ep, -EIO); 117 } 118 119 /* Calculate next state for EP0 */ 120 ep->ep0.state = ep0_state_data; 121 ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN); 122 123 /* If this is the vHub, we handle requests differently */ 124 std_req_rc = std_req_driver; 125 if (ep->dev == NULL) { 126 if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) 127 std_req_rc = ast_vhub_std_hub_request(ep, &crq); 128 else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) 129 std_req_rc = ast_vhub_class_hub_request(ep, &crq); 130 else 131 std_req_rc = std_req_stall; 132 } else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) 133 std_req_rc = ast_vhub_std_dev_request(ep, &crq); 134 135 /* Act upon result */ 136 switch(std_req_rc) { 137 case std_req_complete: 138 goto complete; 139 case std_req_stall: 140 goto stall; 141 case std_req_driver: 142 break; 143 case std_req_data: 144 return; 145 } 146 147 /* Pass request up to the gadget driver */ 148 if (WARN_ON(!ep->dev)) 149 goto stall; 150 if (ep->dev->driver) { 151 EPDBG(ep, "forwarding to gadget...\n"); 152 spin_unlock(&ep->vhub->lock); 153 rc = ep->dev->driver->setup(&ep->dev->gadget, &crq); 154 spin_lock(&ep->vhub->lock); 155 EPDBG(ep, "driver returned %d\n", rc); 156 } else { 157 EPDBG(ep, "no gadget for request !\n"); 158 } 159 if (rc >= 0) 160 return; 161 162 stall: 163 EPDBG(ep, "stalling\n"); 164 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat); 165 ep->ep0.state = ep0_state_stall; 166 ep->ep0.dir_in = false; 167 return; 168 169 complete: 170 EPVDBG(ep, "sending [in] status with no data\n"); 171 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat); 172 ep->ep0.state = ep0_state_status; 173 ep->ep0.dir_in = false; 174} 175 176 177static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep, 178 struct ast_vhub_req *req) 179{ 180 unsigned int chunk; 181 u32 reg; 182 183 /* If this is a 0-length request, it's the gadget trying to 184 * send a status on our behalf. We take it from here. 185 */ 186 if (req->req.length == 0) 187 req->last_desc = 1; 188 189 /* Are we done ? Complete request, otherwise wait for next interrupt */ 190 if (req->last_desc >= 0) { 191 EPVDBG(ep, "complete send %d/%d\n", 192 req->req.actual, req->req.length); 193 ep->ep0.state = ep0_state_status; 194 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat); 195 ast_vhub_done(ep, req, 0); 196 return; 197 } 198 199 /* 200 * Next chunk cropped to max packet size. Also check if this 201 * is the last packet 202 */ 203 chunk = req->req.length - req->req.actual; 204 if (chunk > ep->ep.maxpacket) 205 chunk = ep->ep.maxpacket; 206 else if ((chunk < ep->ep.maxpacket) || !req->req.zero) 207 req->last_desc = 1; 208 209 EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n", 210 chunk, req->last_desc, req->req.actual, ep->ep.maxpacket); 211 212 /* 213 * Copy data if any (internal requests already have data 214 * in the EP buffer) 215 */ 216 if (chunk && req->req.buf) 217 memcpy(ep->buf, req->req.buf + req->req.actual, chunk); 218 219 vhub_dma_workaround(ep->buf); 220 221 /* Remember chunk size and trigger send */ 222 reg = VHUB_EP0_SET_TX_LEN(chunk); 223 writel(reg, ep->ep0.ctlstat); 224 writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat); 225 req->req.actual += chunk; 226} 227 228static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep) 229{ 230 EPVDBG(ep, "rx prime\n"); 231 232 /* Prime endpoint for receiving data */ 233 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat); 234} 235 236static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req, 237 unsigned int len) 238{ 239 unsigned int remain; 240 int rc = 0; 241 242 /* We are receiving... grab request */ 243 remain = req->req.length - req->req.actual; 244 245 EPVDBG(ep, "receive got=%d remain=%d\n", len, remain); 246 247 /* Are we getting more than asked ? */ 248 if (len > remain) { 249 EPDBG(ep, "receiving too much (ovf: %d) !\n", 250 len - remain); 251 len = remain; 252 rc = -EOVERFLOW; 253 } 254 255 /* Hardware return wrong data len */ 256 if (len < ep->ep.maxpacket && len != remain) { 257 EPDBG(ep, "using expected data len instead\n"); 258 len = remain; 259 } 260 261 if (len && req->req.buf) 262 memcpy(req->req.buf + req->req.actual, ep->buf, len); 263 req->req.actual += len; 264 265 /* Done ? */ 266 if (len < ep->ep.maxpacket || len == remain) { 267 ep->ep0.state = ep0_state_status; 268 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat); 269 ast_vhub_done(ep, req, rc); 270 } else 271 ast_vhub_ep0_rx_prime(ep); 272} 273 274void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack) 275{ 276 struct ast_vhub_req *req; 277 struct ast_vhub *vhub = ep->vhub; 278 struct device *dev = &vhub->pdev->dev; 279 bool stall = false; 280 u32 stat; 281 282 /* Read EP0 status */ 283 stat = readl(ep->ep0.ctlstat); 284 285 /* Grab current request if any */ 286 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); 287 288 EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n", 289 stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req); 290 291 switch(ep->ep0.state) { 292 case ep0_state_token: 293 /* There should be no request queued in that state... */ 294 if (req) { 295 dev_warn(dev, "request present while in TOKEN state\n"); 296 ast_vhub_nuke(ep, -EINVAL); 297 } 298 dev_warn(dev, "ack while in TOKEN state\n"); 299 stall = true; 300 break; 301 case ep0_state_data: 302 /* Check the state bits corresponding to our direction */ 303 if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) || 304 (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) || 305 (ep->ep0.dir_in != in_ack)) { 306 /* In that case, ignore interrupt */ 307 dev_warn(dev, "irq state mismatch"); 308 break; 309 } 310 /* 311 * We are in data phase and there's no request, something is 312 * wrong, stall 313 */ 314 if (!req) { 315 dev_warn(dev, "data phase, no request\n"); 316 stall = true; 317 break; 318 } 319 320 /* We have a request, handle data transfers */ 321 if (ep->ep0.dir_in) 322 ast_vhub_ep0_do_send(ep, req); 323 else 324 ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat)); 325 return; 326 case ep0_state_status: 327 /* Nuke stale requests */ 328 if (req) { 329 dev_warn(dev, "request present while in STATUS state\n"); 330 ast_vhub_nuke(ep, -EINVAL); 331 } 332 333 /* 334 * If the status phase completes with the wrong ack, stall 335 * the endpoint just in case, to abort whatever the host 336 * was doing. 337 */ 338 if (ep->ep0.dir_in == in_ack) { 339 dev_warn(dev, "status direction mismatch\n"); 340 stall = true; 341 } 342 break; 343 case ep0_state_stall: 344 /* 345 * There shouldn't be any request left, but nuke just in case 346 * otherwise the stale request will block subsequent ones 347 */ 348 ast_vhub_nuke(ep, -EIO); 349 break; 350 } 351 352 /* Reset to token state or stall */ 353 if (stall) { 354 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat); 355 ep->ep0.state = ep0_state_stall; 356 } else 357 ep->ep0.state = ep0_state_token; 358} 359 360static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req, 361 gfp_t gfp_flags) 362{ 363 struct ast_vhub_req *req = to_ast_req(u_req); 364 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 365 struct ast_vhub *vhub = ep->vhub; 366 struct device *dev = &vhub->pdev->dev; 367 unsigned long flags; 368 369 /* Paranoid cheks */ 370 if (!u_req || (!u_req->complete && !req->internal)) { 371 dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req); 372 if (u_req) { 373 dev_warn(dev, "complete=%p internal=%d\n", 374 u_req->complete, req->internal); 375 } 376 return -EINVAL; 377 } 378 379 /* Not endpoint 0 ? */ 380 if (WARN_ON(ep->d_idx != 0)) 381 return -EINVAL; 382 383 /* Disabled device */ 384 if (ep->dev && !ep->dev->enabled) 385 return -ESHUTDOWN; 386 387 /* Data, no buffer and not internal ? */ 388 if (u_req->length && !u_req->buf && !req->internal) { 389 dev_warn(dev, "Request with no buffer !\n"); 390 return -EINVAL; 391 } 392 393 EPVDBG(ep, "enqueue req @%p\n", req); 394 EPVDBG(ep, " l=%d zero=%d noshort=%d is_in=%d\n", 395 u_req->length, u_req->zero, 396 u_req->short_not_ok, ep->ep0.dir_in); 397 398 /* Initialize request progress fields */ 399 u_req->status = -EINPROGRESS; 400 u_req->actual = 0; 401 req->last_desc = -1; 402 req->active = false; 403 404 spin_lock_irqsave(&vhub->lock, flags); 405 406 /* EP0 can only support a single request at a time */ 407 if (!list_empty(&ep->queue) || 408 ep->ep0.state == ep0_state_token || 409 ep->ep0.state == ep0_state_stall) { 410 dev_warn(dev, "EP0: Request in wrong state\n"); 411 EPVDBG(ep, "EP0: list_empty=%d state=%d\n", 412 list_empty(&ep->queue), ep->ep0.state); 413 spin_unlock_irqrestore(&vhub->lock, flags); 414 return -EBUSY; 415 } 416 417 /* Add request to list and kick processing if empty */ 418 list_add_tail(&req->queue, &ep->queue); 419 420 if (ep->ep0.dir_in) { 421 /* IN request, send data */ 422 ast_vhub_ep0_do_send(ep, req); 423 } else if (u_req->length == 0) { 424 /* 0-len request, send completion as rx */ 425 EPVDBG(ep, "0-length rx completion\n"); 426 ep->ep0.state = ep0_state_status; 427 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat); 428 ast_vhub_done(ep, req, 0); 429 } else { 430 /* OUT request, start receiver */ 431 ast_vhub_ep0_rx_prime(ep); 432 } 433 434 spin_unlock_irqrestore(&vhub->lock, flags); 435 436 return 0; 437} 438 439static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req) 440{ 441 struct ast_vhub_ep *ep = to_ast_ep(u_ep); 442 struct ast_vhub *vhub = ep->vhub; 443 struct ast_vhub_req *req; 444 unsigned long flags; 445 int rc = -EINVAL; 446 447 spin_lock_irqsave(&vhub->lock, flags); 448 449 /* Only one request can be in the queue */ 450 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); 451 452 /* Is it ours ? */ 453 if (req && u_req == &req->req) { 454 EPVDBG(ep, "dequeue req @%p\n", req); 455 456 /* 457 * We don't have to deal with "active" as all 458 * DMAs go to the EP buffers, not the request. 459 */ 460 ast_vhub_done(ep, req, -ECONNRESET); 461 462 /* We do stall the EP to clean things up in HW */ 463 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat); 464 ep->ep0.state = ep0_state_status; 465 ep->ep0.dir_in = false; 466 rc = 0; 467 } 468 spin_unlock_irqrestore(&vhub->lock, flags); 469 return rc; 470} 471 472 473static const struct usb_ep_ops ast_vhub_ep0_ops = { 474 .queue = ast_vhub_ep0_queue, 475 .dequeue = ast_vhub_ep0_dequeue, 476 .alloc_request = ast_vhub_alloc_request, 477 .free_request = ast_vhub_free_request, 478}; 479 480void ast_vhub_reset_ep0(struct ast_vhub_dev *dev) 481{ 482 struct ast_vhub_ep *ep = &dev->ep0; 483 484 ast_vhub_nuke(ep, -EIO); 485 ep->ep0.state = ep0_state_token; 486} 487 488 489void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep, 490 struct ast_vhub_dev *dev) 491{ 492 memset(ep, 0, sizeof(*ep)); 493 494 INIT_LIST_HEAD(&ep->ep.ep_list); 495 INIT_LIST_HEAD(&ep->queue); 496 ep->ep.ops = &ast_vhub_ep0_ops; 497 ep->ep.name = "ep0"; 498 ep->ep.caps.type_control = true; 499 usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET); 500 ep->d_idx = 0; 501 ep->dev = dev; 502 ep->vhub = vhub; 503 ep->ep0.state = ep0_state_token; 504 INIT_LIST_HEAD(&ep->ep0.req.queue); 505 ep->ep0.req.internal = true; 506 507 /* Small difference between vHub and devices */ 508 if (dev) { 509 ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL; 510 ep->ep0.setup = vhub->regs + 511 AST_VHUB_SETUP0 + 8 * (dev->index + 1); 512 ep->buf = vhub->ep0_bufs + 513 AST_VHUB_EP0_MAX_PACKET * (dev->index + 1); 514 ep->buf_dma = vhub->ep0_bufs_dma + 515 AST_VHUB_EP0_MAX_PACKET * (dev->index + 1); 516 } else { 517 ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL; 518 ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0; 519 ep->buf = vhub->ep0_bufs; 520 ep->buf_dma = vhub->ep0_bufs_dma; 521 } 522}