uvc_video.c (12340B)
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * uvc_video.c -- USB Video Class Gadget driver 4 * 5 * Copyright (C) 2009-2010 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 7 */ 8 9#include <linux/kernel.h> 10#include <linux/device.h> 11#include <linux/errno.h> 12#include <linux/usb/ch9.h> 13#include <linux/usb/gadget.h> 14#include <linux/usb/video.h> 15#include <asm/unaligned.h> 16 17#include <media/v4l2-dev.h> 18 19#include "uvc.h" 20#include "uvc_queue.h" 21#include "uvc_video.h" 22 23/* -------------------------------------------------------------------------- 24 * Video codecs 25 */ 26 27static int 28uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf, 29 u8 *data, int len) 30{ 31 struct uvc_device *uvc = container_of(video, struct uvc_device, video); 32 struct usb_composite_dev *cdev = uvc->func.config->cdev; 33 struct timespec64 ts = ns_to_timespec64(buf->buf.vb2_buf.timestamp); 34 int pos = 2; 35 36 data[1] = UVC_STREAM_EOH | video->fid; 37 38 if (video->queue.buf_used == 0 && ts.tv_sec) { 39 /* dwClockFrequency is 48 MHz */ 40 u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48; 41 42 data[1] |= UVC_STREAM_PTS; 43 put_unaligned_le32(pts, &data[pos]); 44 pos += 4; 45 } 46 47 if (cdev->gadget->ops->get_frame) { 48 u32 sof, stc; 49 50 sof = usb_gadget_frame_number(cdev->gadget); 51 ktime_get_ts64(&ts); 52 stc = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48; 53 54 data[1] |= UVC_STREAM_SCR; 55 put_unaligned_le32(stc, &data[pos]); 56 put_unaligned_le16(sof, &data[pos+4]); 57 pos += 6; 58 } 59 60 data[0] = pos; 61 62 if (buf->bytesused - video->queue.buf_used <= len - pos) 63 data[1] |= UVC_STREAM_EOF; 64 65 return pos; 66} 67 68static int 69uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf, 70 u8 *data, int len) 71{ 72 struct uvc_video_queue *queue = &video->queue; 73 unsigned int nbytes; 74 void *mem; 75 76 /* Copy video data to the USB buffer. */ 77 mem = buf->mem + queue->buf_used; 78 nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used); 79 80 memcpy(data, mem, nbytes); 81 queue->buf_used += nbytes; 82 83 return nbytes; 84} 85 86static void 87uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video, 88 struct uvc_buffer *buf) 89{ 90 void *mem = req->buf; 91 int len = video->req_size; 92 int ret; 93 94 /* Add a header at the beginning of the payload. */ 95 if (video->payload_size == 0) { 96 ret = uvc_video_encode_header(video, buf, mem, len); 97 video->payload_size += ret; 98 mem += ret; 99 len -= ret; 100 } 101 102 /* Process video data. */ 103 len = min((int)(video->max_payload_size - video->payload_size), len); 104 ret = uvc_video_encode_data(video, buf, mem, len); 105 106 video->payload_size += ret; 107 len -= ret; 108 109 req->length = video->req_size - len; 110 req->zero = video->payload_size == video->max_payload_size; 111 112 if (buf->bytesused == video->queue.buf_used) { 113 video->queue.buf_used = 0; 114 buf->state = UVC_BUF_STATE_DONE; 115 list_del(&buf->queue); 116 uvcg_complete_buffer(&video->queue, buf); 117 video->fid ^= UVC_STREAM_FID; 118 119 video->payload_size = 0; 120 } 121 122 if (video->payload_size == video->max_payload_size || 123 buf->bytesused == video->queue.buf_used) 124 video->payload_size = 0; 125} 126 127static void 128uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video, 129 struct uvc_buffer *buf) 130{ 131 unsigned int pending = buf->bytesused - video->queue.buf_used; 132 struct uvc_request *ureq = req->context; 133 struct scatterlist *sg, *iter; 134 unsigned int len = video->req_size; 135 unsigned int sg_left, part = 0; 136 unsigned int i; 137 int header_len; 138 139 sg = ureq->sgt.sgl; 140 sg_init_table(sg, ureq->sgt.nents); 141 142 /* Init the header. */ 143 header_len = uvc_video_encode_header(video, buf, ureq->header, 144 video->req_size); 145 sg_set_buf(sg, ureq->header, header_len); 146 len -= header_len; 147 148 if (pending <= len) 149 len = pending; 150 151 req->length = (len == pending) ? 152 len + header_len : video->req_size; 153 154 /* Init the pending sgs with payload */ 155 sg = sg_next(sg); 156 157 for_each_sg(sg, iter, ureq->sgt.nents - 1, i) { 158 if (!len || !buf->sg || !sg_dma_len(buf->sg)) 159 break; 160 161 sg_left = sg_dma_len(buf->sg) - buf->offset; 162 part = min_t(unsigned int, len, sg_left); 163 164 sg_set_page(iter, sg_page(buf->sg), part, buf->offset); 165 166 if (part == sg_left) { 167 buf->offset = 0; 168 buf->sg = sg_next(buf->sg); 169 } else { 170 buf->offset += part; 171 } 172 len -= part; 173 } 174 175 /* Assign the video data with header. */ 176 req->buf = NULL; 177 req->sg = ureq->sgt.sgl; 178 req->num_sgs = i + 1; 179 180 req->length -= len; 181 video->queue.buf_used += req->length - header_len; 182 183 if (buf->bytesused == video->queue.buf_used || !buf->sg) { 184 video->queue.buf_used = 0; 185 buf->state = UVC_BUF_STATE_DONE; 186 buf->offset = 0; 187 list_del(&buf->queue); 188 video->fid ^= UVC_STREAM_FID; 189 ureq->last_buf = buf; 190 } 191} 192 193static void 194uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video, 195 struct uvc_buffer *buf) 196{ 197 void *mem = req->buf; 198 int len = video->req_size; 199 int ret; 200 201 /* Add the header. */ 202 ret = uvc_video_encode_header(video, buf, mem, len); 203 mem += ret; 204 len -= ret; 205 206 /* Process video data. */ 207 ret = uvc_video_encode_data(video, buf, mem, len); 208 len -= ret; 209 210 req->length = video->req_size - len; 211 212 if (buf->bytesused == video->queue.buf_used) { 213 video->queue.buf_used = 0; 214 buf->state = UVC_BUF_STATE_DONE; 215 list_del(&buf->queue); 216 uvcg_complete_buffer(&video->queue, buf); 217 video->fid ^= UVC_STREAM_FID; 218 } 219} 220 221/* -------------------------------------------------------------------------- 222 * Request handling 223 */ 224 225static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req) 226{ 227 int ret; 228 229 ret = usb_ep_queue(video->ep, req, GFP_ATOMIC); 230 if (ret < 0) { 231 uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n", 232 ret); 233 234 /* If the endpoint is disabled the descriptor may be NULL. */ 235 if (video->ep->desc) { 236 /* Isochronous endpoints can't be halted. */ 237 if (usb_endpoint_xfer_bulk(video->ep->desc)) 238 usb_ep_set_halt(video->ep); 239 } 240 } 241 242 return ret; 243} 244 245static void 246uvc_video_complete(struct usb_ep *ep, struct usb_request *req) 247{ 248 struct uvc_request *ureq = req->context; 249 struct uvc_video *video = ureq->video; 250 struct uvc_video_queue *queue = &video->queue; 251 struct uvc_device *uvc = video->uvc; 252 unsigned long flags; 253 254 switch (req->status) { 255 case 0: 256 break; 257 258 case -ESHUTDOWN: /* disconnect from host. */ 259 uvcg_dbg(&video->uvc->func, "VS request cancelled.\n"); 260 uvcg_queue_cancel(queue, 1); 261 break; 262 263 default: 264 uvcg_info(&video->uvc->func, 265 "VS request completed with status %d.\n", 266 req->status); 267 uvcg_queue_cancel(queue, 0); 268 } 269 270 if (ureq->last_buf) { 271 uvcg_complete_buffer(&video->queue, ureq->last_buf); 272 ureq->last_buf = NULL; 273 } 274 275 spin_lock_irqsave(&video->req_lock, flags); 276 list_add_tail(&req->list, &video->req_free); 277 spin_unlock_irqrestore(&video->req_lock, flags); 278 279 if (uvc->state == UVC_STATE_STREAMING) 280 schedule_work(&video->pump); 281} 282 283static int 284uvc_video_free_requests(struct uvc_video *video) 285{ 286 unsigned int i; 287 288 if (video->ureq) { 289 for (i = 0; i < video->uvc_num_requests; ++i) { 290 sg_free_table(&video->ureq[i].sgt); 291 292 if (video->ureq[i].req) { 293 usb_ep_free_request(video->ep, video->ureq[i].req); 294 video->ureq[i].req = NULL; 295 } 296 297 if (video->ureq[i].req_buffer) { 298 kfree(video->ureq[i].req_buffer); 299 video->ureq[i].req_buffer = NULL; 300 } 301 } 302 303 kfree(video->ureq); 304 video->ureq = NULL; 305 } 306 307 INIT_LIST_HEAD(&video->req_free); 308 video->req_size = 0; 309 return 0; 310} 311 312static int 313uvc_video_alloc_requests(struct uvc_video *video) 314{ 315 unsigned int req_size; 316 unsigned int i; 317 int ret = -ENOMEM; 318 319 BUG_ON(video->req_size); 320 321 req_size = video->ep->maxpacket 322 * max_t(unsigned int, video->ep->maxburst, 1) 323 * (video->ep->mult); 324 325 video->ureq = kcalloc(video->uvc_num_requests, sizeof(struct uvc_request), GFP_KERNEL); 326 if (video->ureq == NULL) 327 return -ENOMEM; 328 329 for (i = 0; i < video->uvc_num_requests; ++i) { 330 video->ureq[i].req_buffer = kmalloc(req_size, GFP_KERNEL); 331 if (video->ureq[i].req_buffer == NULL) 332 goto error; 333 334 video->ureq[i].req = usb_ep_alloc_request(video->ep, GFP_KERNEL); 335 if (video->ureq[i].req == NULL) 336 goto error; 337 338 video->ureq[i].req->buf = video->ureq[i].req_buffer; 339 video->ureq[i].req->length = 0; 340 video->ureq[i].req->complete = uvc_video_complete; 341 video->ureq[i].req->context = &video->ureq[i]; 342 video->ureq[i].video = video; 343 video->ureq[i].last_buf = NULL; 344 345 list_add_tail(&video->ureq[i].req->list, &video->req_free); 346 /* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */ 347 sg_alloc_table(&video->ureq[i].sgt, 348 DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN, 349 PAGE_SIZE) + 2, GFP_KERNEL); 350 } 351 352 video->req_size = req_size; 353 354 return 0; 355 356error: 357 uvc_video_free_requests(video); 358 return ret; 359} 360 361/* -------------------------------------------------------------------------- 362 * Video streaming 363 */ 364 365/* 366 * uvcg_video_pump - Pump video data into the USB requests 367 * 368 * This function fills the available USB requests (listed in req_free) with 369 * video data from the queued buffers. 370 */ 371static void uvcg_video_pump(struct work_struct *work) 372{ 373 struct uvc_video *video = container_of(work, struct uvc_video, pump); 374 struct uvc_video_queue *queue = &video->queue; 375 struct usb_request *req = NULL; 376 struct uvc_buffer *buf; 377 unsigned long flags; 378 int ret; 379 380 while (video->ep->enabled) { 381 /* Retrieve the first available USB request, protected by the 382 * request lock. 383 */ 384 spin_lock_irqsave(&video->req_lock, flags); 385 if (list_empty(&video->req_free)) { 386 spin_unlock_irqrestore(&video->req_lock, flags); 387 return; 388 } 389 req = list_first_entry(&video->req_free, struct usb_request, 390 list); 391 list_del(&req->list); 392 spin_unlock_irqrestore(&video->req_lock, flags); 393 394 /* Retrieve the first available video buffer and fill the 395 * request, protected by the video queue irqlock. 396 */ 397 spin_lock_irqsave(&queue->irqlock, flags); 398 buf = uvcg_queue_head(queue); 399 if (buf == NULL) { 400 spin_unlock_irqrestore(&queue->irqlock, flags); 401 break; 402 } 403 404 video->encode(req, video, buf); 405 406 /* With usb3 we have more requests. This will decrease the 407 * interrupt load to a quarter but also catches the corner 408 * cases, which needs to be handled */ 409 if (list_empty(&video->req_free) || 410 buf->state == UVC_BUF_STATE_DONE || 411 !(video->req_int_count % 412 DIV_ROUND_UP(video->uvc_num_requests, 4))) { 413 video->req_int_count = 0; 414 req->no_interrupt = 0; 415 } else { 416 req->no_interrupt = 1; 417 } 418 419 /* Queue the USB request */ 420 ret = uvcg_video_ep_queue(video, req); 421 spin_unlock_irqrestore(&queue->irqlock, flags); 422 423 if (ret < 0) { 424 uvcg_queue_cancel(queue, 0); 425 break; 426 } 427 428 /* Endpoint now owns the request */ 429 req = NULL; 430 video->req_int_count++; 431 } 432 433 if (!req) 434 return; 435 436 spin_lock_irqsave(&video->req_lock, flags); 437 list_add_tail(&req->list, &video->req_free); 438 spin_unlock_irqrestore(&video->req_lock, flags); 439 return; 440} 441 442/* 443 * Enable or disable the video stream. 444 */ 445int uvcg_video_enable(struct uvc_video *video, int enable) 446{ 447 unsigned int i; 448 int ret; 449 450 if (video->ep == NULL) { 451 uvcg_info(&video->uvc->func, 452 "Video enable failed, device is uninitialized.\n"); 453 return -ENODEV; 454 } 455 456 if (!enable) { 457 cancel_work_sync(&video->pump); 458 uvcg_queue_cancel(&video->queue, 0); 459 460 for (i = 0; i < video->uvc_num_requests; ++i) 461 if (video->ureq && video->ureq[i].req) 462 usb_ep_dequeue(video->ep, video->ureq[i].req); 463 464 uvc_video_free_requests(video); 465 uvcg_queue_enable(&video->queue, 0); 466 return 0; 467 } 468 469 if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0) 470 return ret; 471 472 if ((ret = uvc_video_alloc_requests(video)) < 0) 473 return ret; 474 475 if (video->max_payload_size) { 476 video->encode = uvc_video_encode_bulk; 477 video->payload_size = 0; 478 } else 479 video->encode = video->queue.use_sg ? 480 uvc_video_encode_isoc_sg : uvc_video_encode_isoc; 481 482 video->req_int_count = 0; 483 484 schedule_work(&video->pump); 485 486 return ret; 487} 488 489/* 490 * Initialize the UVC video stream. 491 */ 492int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc) 493{ 494 INIT_LIST_HEAD(&video->req_free); 495 spin_lock_init(&video->req_lock); 496 INIT_WORK(&video->pump, uvcg_video_pump); 497 498 video->uvc = uvc; 499 video->fcc = V4L2_PIX_FMT_YUYV; 500 video->bpp = 16; 501 video->width = 320; 502 video->height = 240; 503 video->imagesize = 320 * 240 * 2; 504 505 /* Initialize the video buffers queue. */ 506 uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent, 507 V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex); 508 return 0; 509} 510