1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * vivid-vid-cap.c - video capture support functions. 4 * 5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 6 */ 7 8#include <linux/errno.h> 9#include <linux/kernel.h> 10#include <linux/sched.h> 11#include <linux/vmalloc.h> 12#include <linux/videodev2.h> 13#include <linux/v4l2-dv-timings.h> 14#include <media/v4l2-common.h> 15#include <media/v4l2-event.h> 16#include <media/v4l2-dv-timings.h> 17#include <media/v4l2-rect.h> 18 19#include "vivid-core.h" 20#include "vivid-vid-common.h" 21#include "vivid-kthread-cap.h" 22#include "vivid-vid-cap.h" 23 24static const struct vivid_fmt formats_ovl[] = { 25 { 26 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */ 27 .vdownsampling = { 1 }, 28 .bit_depth = { 16 }, 29 .planes = 1, 30 .buffers = 1, 31 }, 32 { 33 .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */ 34 .vdownsampling = { 1 }, 35 .bit_depth = { 16 }, 36 .planes = 1, 37 .buffers = 1, 38 }, 39 { 40 .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */ 41 .vdownsampling = { 1 }, 42 .bit_depth = { 16 }, 43 .planes = 1, 44 .buffers = 1, 45 }, 46}; 47 48/* The number of discrete webcam framesizes */ 49#define VIVID_WEBCAM_SIZES 6 50/* The number of discrete webcam frameintervals */ 51#define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2) 52 53/* Sizes must be in increasing order */ 54static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = { 55 { 320, 180 }, 56 { 640, 360 }, 57 { 640, 480 }, 58 { 1280, 720 }, 59 { 1920, 1080 }, 60 { 3840, 2160 }, 61}; 62 63/* 64 * Intervals must be in increasing order and there must be twice as many 65 * elements in this array as there are in webcam_sizes. 66 */ 67static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = { 68 { 1, 1 }, 69 { 1, 2 }, 70 { 1, 4 }, 71 { 1, 5 }, 72 { 1, 10 }, 73 { 2, 25 }, 74 { 1, 15 }, 75 { 1, 25 }, 76 { 1, 30 }, 77 { 1, 40 }, 78 { 1, 50 }, 79 { 1, 60 }, 80}; 81 82static int vid_cap_queue_setup(struct vb2_queue *vq, 83 unsigned *nbuffers, unsigned *nplanes, 84 unsigned sizes[], struct device *alloc_devs[]) 85{ 86 struct vivid_dev *dev = vb2_get_drv_priv(vq); 87 unsigned buffers = tpg_g_buffers(&dev->tpg); 88 unsigned h = dev->fmt_cap_rect.height; 89 unsigned p; 90 91 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 92 /* 93 * You cannot use read() with FIELD_ALTERNATE since the field 94 * information (TOP/BOTTOM) cannot be passed back to the user. 95 */ 96 if (vb2_fileio_is_active(vq)) 97 return -EINVAL; 98 } 99 100 if (dev->queue_setup_error) { 101 /* 102 * Error injection: test what happens if queue_setup() returns 103 * an error. 104 */ 105 dev->queue_setup_error = false; 106 return -EINVAL; 107 } 108 if (*nplanes) { 109 /* 110 * Check if the number of requested planes match 111 * the number of buffers in the current format. You can't mix that. 112 */ 113 if (*nplanes != buffers) 114 return -EINVAL; 115 for (p = 0; p < buffers; p++) { 116 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h + 117 dev->fmt_cap->data_offset[p]) 118 return -EINVAL; 119 } 120 } else { 121 for (p = 0; p < buffers; p++) 122 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / 123 dev->fmt_cap->vdownsampling[p] + 124 dev->fmt_cap->data_offset[p]; 125 } 126 127 if (vq->num_buffers + *nbuffers < 2) 128 *nbuffers = 2 - vq->num_buffers; 129 130 *nplanes = buffers; 131 132 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); 133 for (p = 0; p < buffers; p++) 134 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); 135 136 return 0; 137} 138 139static int vid_cap_buf_prepare(struct vb2_buffer *vb) 140{ 141 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 142 unsigned long size; 143 unsigned buffers = tpg_g_buffers(&dev->tpg); 144 unsigned p; 145 146 dprintk(dev, 1, "%s\n", __func__); 147 148 if (WARN_ON(NULL == dev->fmt_cap)) 149 return -EINVAL; 150 151 if (dev->buf_prepare_error) { 152 /* 153 * Error injection: test what happens if buf_prepare() returns 154 * an error. 155 */ 156 dev->buf_prepare_error = false; 157 return -EINVAL; 158 } 159 for (p = 0; p < buffers; p++) { 160 size = (tpg_g_line_width(&dev->tpg, p) * 161 dev->fmt_cap_rect.height) / 162 dev->fmt_cap->vdownsampling[p] + 163 dev->fmt_cap->data_offset[p]; 164 165 if (vb2_plane_size(vb, p) < size) { 166 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", 167 __func__, p, vb2_plane_size(vb, p), size); 168 return -EINVAL; 169 } 170 171 vb2_set_plane_payload(vb, p, size); 172 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p]; 173 } 174 175 return 0; 176} 177 178static void vid_cap_buf_finish(struct vb2_buffer *vb) 179{ 180 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 181 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 182 struct v4l2_timecode *tc = &vbuf->timecode; 183 unsigned fps = 25; 184 unsigned seq = vbuf->sequence; 185 186 if (!vivid_is_sdtv_cap(dev)) 187 return; 188 189 /* 190 * Set the timecode. Rarely used, so it is interesting to 191 * test this. 192 */ 193 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE; 194 if (dev->std_cap[dev->input] & V4L2_STD_525_60) 195 fps = 30; 196 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 197 tc->flags = 0; 198 tc->frames = seq % fps; 199 tc->seconds = (seq / fps) % 60; 200 tc->minutes = (seq / (60 * fps)) % 60; 201 tc->hours = (seq / (60 * 60 * fps)) % 24; 202} 203 204static void vid_cap_buf_queue(struct vb2_buffer *vb) 205{ 206 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 207 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 208 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); 209 210 dprintk(dev, 1, "%s\n", __func__); 211 212 spin_lock(&dev->slock); 213 list_add_tail(&buf->list, &dev->vid_cap_active); 214 spin_unlock(&dev->slock); 215} 216 217static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) 218{ 219 struct vivid_dev *dev = vb2_get_drv_priv(vq); 220 unsigned i; 221 int err; 222 223 if (vb2_is_streaming(&dev->vb_vid_out_q)) 224 dev->can_loop_video = vivid_vid_can_loop(dev); 225 226 dev->vid_cap_seq_count = 0; 227 dprintk(dev, 1, "%s\n", __func__); 228 for (i = 0; i < VIDEO_MAX_FRAME; i++) 229 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; 230 if (dev->start_streaming_error) { 231 dev->start_streaming_error = false; 232 err = -EINVAL; 233 } else { 234 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); 235 } 236 if (err) { 237 struct vivid_buffer *buf, *tmp; 238 239 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 240 list_del(&buf->list); 241 vb2_buffer_done(&buf->vb.vb2_buf, 242 VB2_BUF_STATE_QUEUED); 243 } 244 } 245 return err; 246} 247 248/* abort streaming and wait for last buffer */ 249static void vid_cap_stop_streaming(struct vb2_queue *vq) 250{ 251 struct vivid_dev *dev = vb2_get_drv_priv(vq); 252 253 dprintk(dev, 1, "%s\n", __func__); 254 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); 255 dev->can_loop_video = false; 256} 257 258static void vid_cap_buf_request_complete(struct vb2_buffer *vb) 259{ 260 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 261 262 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); 263} 264 265const struct vb2_ops vivid_vid_cap_qops = { 266 .queue_setup = vid_cap_queue_setup, 267 .buf_prepare = vid_cap_buf_prepare, 268 .buf_finish = vid_cap_buf_finish, 269 .buf_queue = vid_cap_buf_queue, 270 .start_streaming = vid_cap_start_streaming, 271 .stop_streaming = vid_cap_stop_streaming, 272 .buf_request_complete = vid_cap_buf_request_complete, 273 .wait_prepare = vb2_ops_wait_prepare, 274 .wait_finish = vb2_ops_wait_finish, 275}; 276 277/* 278 * Determine the 'picture' quality based on the current TV frequency: either 279 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off 280 * signal or NOISE for no signal. 281 */ 282void vivid_update_quality(struct vivid_dev *dev) 283{ 284 unsigned freq_modulus; 285 286 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) { 287 /* 288 * The 'noise' will only be replaced by the actual video 289 * if the output video matches the input video settings. 290 */ 291 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 292 return; 293 } 294 if (vivid_is_hdmi_cap(dev) && 295 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) { 296 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 297 return; 298 } 299 if (vivid_is_sdtv_cap(dev) && 300 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { 301 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 302 return; 303 } 304 if (!vivid_is_tv_cap(dev)) { 305 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 306 return; 307 } 308 309 /* 310 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 311 * From +/- 0.25 MHz around the channel there is color, and from 312 * +/- 1 MHz there is grayscale (chroma is lost). 313 * Everywhere else it is just noise. 314 */ 315 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 316 if (freq_modulus > 2 * 16) { 317 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 318 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); 319 return; 320 } 321 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) 322 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); 323 else 324 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 325} 326 327/* 328 * Get the current picture quality and the associated afc value. 329 */ 330static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) 331{ 332 unsigned freq_modulus; 333 334 if (afc) 335 *afc = 0; 336 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || 337 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) 338 return tpg_g_quality(&dev->tpg); 339 340 /* 341 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 342 * From +/- 0.25 MHz around the channel there is color, and from 343 * +/- 1 MHz there is grayscale (chroma is lost). 344 * Everywhere else it is just gray. 345 */ 346 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 347 if (afc) 348 *afc = freq_modulus - 1 * 16; 349 return TPG_QUAL_GRAY; 350} 351 352enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) 353{ 354 if (vivid_is_sdtv_cap(dev)) 355 return dev->std_aspect_ratio[dev->input]; 356 357 if (vivid_is_hdmi_cap(dev)) 358 return dev->dv_timings_aspect_ratio[dev->input]; 359 360 return TPG_VIDEO_ASPECT_IMAGE; 361} 362 363static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) 364{ 365 if (vivid_is_sdtv_cap(dev)) 366 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 367 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 368 369 if (vivid_is_hdmi_cap(dev) && 370 dev->src_rect.width == 720 && dev->src_rect.height <= 576) 371 return dev->src_rect.height == 480 ? 372 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 373 374 return TPG_PIXEL_ASPECT_SQUARE; 375} 376 377/* 378 * Called whenever the format has to be reset which can occur when 379 * changing inputs, standard, timings, etc. 380 */ 381void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) 382{ 383 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 384 unsigned size; 385 u64 pixelclock; 386 387 switch (dev->input_type[dev->input]) { 388 case WEBCAM: 389 default: 390 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; 391 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; 392 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; 393 dev->field_cap = V4L2_FIELD_NONE; 394 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 395 break; 396 case TV: 397 case SVID: 398 dev->field_cap = dev->tv_field_cap; 399 dev->src_rect.width = 720; 400 if (dev->std_cap[dev->input] & V4L2_STD_525_60) { 401 dev->src_rect.height = 480; 402 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; 403 dev->service_set_cap = V4L2_SLICED_CAPTION_525; 404 } else { 405 dev->src_rect.height = 576; 406 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; 407 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; 408 } 409 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 410 break; 411 case HDMI: 412 dev->src_rect.width = bt->width; 413 dev->src_rect.height = bt->height; 414 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); 415 if (dev->reduced_fps && can_reduce_fps(bt)) { 416 pixelclock = div_u64(bt->pixelclock * 1000, 1001); 417 bt->flags |= V4L2_DV_FL_REDUCED_FPS; 418 } else { 419 pixelclock = bt->pixelclock; 420 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; 421 } 422 dev->timeperframe_vid_cap = (struct v4l2_fract) { 423 size / 100, (u32)pixelclock / 100 424 }; 425 if (bt->interlaced) 426 dev->field_cap = V4L2_FIELD_ALTERNATE; 427 else 428 dev->field_cap = V4L2_FIELD_NONE; 429 430 /* 431 * We can be called from within s_ctrl, in that case we can't 432 * set/get controls. Luckily we don't need to in that case. 433 */ 434 if (keep_controls || !dev->colorspace) 435 break; 436 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 437 if (bt->width == 720 && bt->height <= 576) 438 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 439 else 440 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 441 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 442 } else { 443 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 444 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 445 } 446 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 447 break; 448 } 449 vfree(dev->bitmap_cap); 450 dev->bitmap_cap = NULL; 451 vivid_update_quality(dev); 452 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); 453 dev->crop_cap = dev->src_rect; 454 dev->crop_bounds_cap = dev->src_rect; 455 if (dev->bitmap_cap && 456 (dev->compose_cap.width != dev->crop_cap.width || 457 dev->compose_cap.height != dev->crop_cap.height)) { 458 vfree(dev->bitmap_cap); 459 dev->bitmap_cap = NULL; 460 } 461 dev->compose_cap = dev->crop_cap; 462 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) 463 dev->compose_cap.height /= 2; 464 dev->fmt_cap_rect = dev->compose_cap; 465 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); 466 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); 467 tpg_update_mv_step(&dev->tpg); 468} 469 470/* Map the field to something that is valid for the current input */ 471static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) 472{ 473 if (vivid_is_sdtv_cap(dev)) { 474 switch (field) { 475 case V4L2_FIELD_INTERLACED_TB: 476 case V4L2_FIELD_INTERLACED_BT: 477 case V4L2_FIELD_SEQ_TB: 478 case V4L2_FIELD_SEQ_BT: 479 case V4L2_FIELD_TOP: 480 case V4L2_FIELD_BOTTOM: 481 case V4L2_FIELD_ALTERNATE: 482 return field; 483 case V4L2_FIELD_INTERLACED: 484 default: 485 return V4L2_FIELD_INTERLACED; 486 } 487 } 488 if (vivid_is_hdmi_cap(dev)) 489 return dev->dv_timings_cap[dev->input].bt.interlaced ? 490 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; 491 return V4L2_FIELD_NONE; 492} 493 494static unsigned vivid_colorspace_cap(struct vivid_dev *dev) 495{ 496 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 497 return tpg_g_colorspace(&dev->tpg); 498 return dev->colorspace_out; 499} 500 501static unsigned vivid_xfer_func_cap(struct vivid_dev *dev) 502{ 503 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 504 return tpg_g_xfer_func(&dev->tpg); 505 return dev->xfer_func_out; 506} 507 508static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) 509{ 510 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 511 return tpg_g_ycbcr_enc(&dev->tpg); 512 return dev->ycbcr_enc_out; 513} 514 515static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev) 516{ 517 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 518 return tpg_g_hsv_enc(&dev->tpg); 519 return dev->hsv_enc_out; 520} 521 522static unsigned vivid_quantization_cap(struct vivid_dev *dev) 523{ 524 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 525 return tpg_g_quantization(&dev->tpg); 526 return dev->quantization_out; 527} 528 529int vivid_g_fmt_vid_cap(struct file *file, void *priv, 530 struct v4l2_format *f) 531{ 532 struct vivid_dev *dev = video_drvdata(file); 533 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 534 unsigned p; 535 536 mp->width = dev->fmt_cap_rect.width; 537 mp->height = dev->fmt_cap_rect.height; 538 mp->field = dev->field_cap; 539 mp->pixelformat = dev->fmt_cap->fourcc; 540 mp->colorspace = vivid_colorspace_cap(dev); 541 mp->xfer_func = vivid_xfer_func_cap(dev); 542 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV) 543 mp->hsv_enc = vivid_hsv_enc_cap(dev); 544 else 545 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 546 mp->quantization = vivid_quantization_cap(dev); 547 mp->num_planes = dev->fmt_cap->buffers; 548 for (p = 0; p < mp->num_planes; p++) { 549 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 550 mp->plane_fmt[p].sizeimage = 551 (tpg_g_line_width(&dev->tpg, p) * mp->height) / 552 dev->fmt_cap->vdownsampling[p] + 553 dev->fmt_cap->data_offset[p]; 554 } 555 return 0; 556} 557 558int vivid_try_fmt_vid_cap(struct file *file, void *priv, 559 struct v4l2_format *f) 560{ 561 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 562 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; 563 struct vivid_dev *dev = video_drvdata(file); 564 const struct vivid_fmt *fmt; 565 unsigned bytesperline, max_bpl; 566 unsigned factor = 1; 567 unsigned w, h; 568 unsigned p; 569 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC); 570 571 fmt = vivid_get_format(dev, mp->pixelformat); 572 if (!fmt) { 573 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", 574 mp->pixelformat); 575 mp->pixelformat = V4L2_PIX_FMT_YUYV; 576 fmt = vivid_get_format(dev, mp->pixelformat); 577 } 578 579 mp->field = vivid_field_cap(dev, mp->field); 580 if (vivid_is_webcam(dev)) { 581 const struct v4l2_frmsize_discrete *sz = 582 v4l2_find_nearest_size(webcam_sizes, 583 VIVID_WEBCAM_SIZES, width, 584 height, mp->width, mp->height); 585 586 w = sz->width; 587 h = sz->height; 588 } else if (vivid_is_sdtv_cap(dev)) { 589 w = 720; 590 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; 591 } else { 592 w = dev->src_rect.width; 593 h = dev->src_rect.height; 594 } 595 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 596 factor = 2; 597 if (vivid_is_webcam(dev) || 598 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { 599 mp->width = w; 600 mp->height = h / factor; 601 } else { 602 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; 603 604 v4l2_rect_set_min_size(&r, &vivid_min_rect); 605 v4l2_rect_set_max_size(&r, &vivid_max_rect); 606 if (dev->has_scaler_cap && !dev->has_compose_cap) { 607 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; 608 609 v4l2_rect_set_max_size(&r, &max_r); 610 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { 611 v4l2_rect_set_max_size(&r, &dev->src_rect); 612 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { 613 v4l2_rect_set_min_size(&r, &dev->src_rect); 614 } 615 mp->width = r.width; 616 mp->height = r.height / factor; 617 } 618 619 /* This driver supports custom bytesperline values */ 620 621 mp->num_planes = fmt->buffers; 622 for (p = 0; p < fmt->buffers; p++) { 623 /* Calculate the minimum supported bytesperline value */ 624 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; 625 /* Calculate the maximum supported bytesperline value */ 626 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; 627 628 if (pfmt[p].bytesperline > max_bpl) 629 pfmt[p].bytesperline = max_bpl; 630 if (pfmt[p].bytesperline < bytesperline) 631 pfmt[p].bytesperline = bytesperline; 632 633 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / 634 fmt->vdownsampling[p] + fmt->data_offset[p]; 635 636 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 637 } 638 for (p = fmt->buffers; p < fmt->planes; p++) 639 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * 640 (fmt->bit_depth[p] / fmt->vdownsampling[p])) / 641 (fmt->bit_depth[0] / fmt->vdownsampling[0]); 642 643 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace)) 644 mp->colorspace = vivid_colorspace_cap(dev); 645 646 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func)) 647 mp->xfer_func = vivid_xfer_func_cap(dev); 648 649 if (fmt->color_enc == TGP_COLOR_ENC_HSV) { 650 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc)) 651 mp->hsv_enc = vivid_hsv_enc_cap(dev); 652 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) { 653 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc)) 654 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 655 } else { 656 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 657 } 658 659 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR || 660 fmt->color_enc == TGP_COLOR_ENC_RGB) { 661 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization)) 662 mp->quantization = vivid_quantization_cap(dev); 663 } else { 664 mp->quantization = vivid_quantization_cap(dev); 665 } 666 667 memset(mp->reserved, 0, sizeof(mp->reserved)); 668 return 0; 669} 670 671int vivid_s_fmt_vid_cap(struct file *file, void *priv, 672 struct v4l2_format *f) 673{ 674 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 675 struct vivid_dev *dev = video_drvdata(file); 676 struct v4l2_rect *crop = &dev->crop_cap; 677 struct v4l2_rect *compose = &dev->compose_cap; 678 struct vb2_queue *q = &dev->vb_vid_cap_q; 679 int ret = vivid_try_fmt_vid_cap(file, priv, f); 680 unsigned factor = 1; 681 unsigned p; 682 unsigned i; 683 684 if (ret < 0) 685 return ret; 686 687 if (vb2_is_busy(q)) { 688 dprintk(dev, 1, "%s device busy\n", __func__); 689 return -EBUSY; 690 } 691 692 if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) { 693 dprintk(dev, 1, "overlay is active, can't change pixelformat\n"); 694 return -EBUSY; 695 } 696 697 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); 698 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 699 factor = 2; 700 701 /* Note: the webcam input doesn't support scaling, cropping or composing */ 702 703 if (!vivid_is_webcam(dev) && 704 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { 705 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 706 707 if (dev->has_scaler_cap) { 708 if (dev->has_compose_cap) 709 v4l2_rect_map_inside(compose, &r); 710 else 711 *compose = r; 712 if (dev->has_crop_cap && !dev->has_compose_cap) { 713 struct v4l2_rect min_r = { 714 0, 0, 715 r.width / MAX_ZOOM, 716 factor * r.height / MAX_ZOOM 717 }; 718 struct v4l2_rect max_r = { 719 0, 0, 720 r.width * MAX_ZOOM, 721 factor * r.height * MAX_ZOOM 722 }; 723 724 v4l2_rect_set_min_size(crop, &min_r); 725 v4l2_rect_set_max_size(crop, &max_r); 726 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 727 } else if (dev->has_crop_cap) { 728 struct v4l2_rect min_r = { 729 0, 0, 730 compose->width / MAX_ZOOM, 731 factor * compose->height / MAX_ZOOM 732 }; 733 struct v4l2_rect max_r = { 734 0, 0, 735 compose->width * MAX_ZOOM, 736 factor * compose->height * MAX_ZOOM 737 }; 738 739 v4l2_rect_set_min_size(crop, &min_r); 740 v4l2_rect_set_max_size(crop, &max_r); 741 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 742 } 743 } else if (dev->has_crop_cap && !dev->has_compose_cap) { 744 r.height *= factor; 745 v4l2_rect_set_size_to(crop, &r); 746 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 747 r = *crop; 748 r.height /= factor; 749 v4l2_rect_set_size_to(compose, &r); 750 } else if (!dev->has_crop_cap) { 751 v4l2_rect_map_inside(compose, &r); 752 } else { 753 r.height *= factor; 754 v4l2_rect_set_max_size(crop, &r); 755 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 756 compose->top *= factor; 757 compose->height *= factor; 758 v4l2_rect_set_size_to(compose, crop); 759 v4l2_rect_map_inside(compose, &r); 760 compose->top /= factor; 761 compose->height /= factor; 762 } 763 } else if (vivid_is_webcam(dev)) { 764 /* Guaranteed to be a match */ 765 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 766 if (webcam_sizes[i].width == mp->width && 767 webcam_sizes[i].height == mp->height) 768 break; 769 dev->webcam_size_idx = i; 770 if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i)) 771 dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1; 772 vivid_update_format_cap(dev, false); 773 } else { 774 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 775 776 v4l2_rect_set_size_to(compose, &r); 777 r.height *= factor; 778 v4l2_rect_set_size_to(crop, &r); 779 } 780 781 dev->fmt_cap_rect.width = mp->width; 782 dev->fmt_cap_rect.height = mp->height; 783 tpg_s_buf_height(&dev->tpg, mp->height); 784 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); 785 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) 786 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); 787 dev->field_cap = mp->field; 788 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 789 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); 790 else 791 tpg_s_field(&dev->tpg, dev->field_cap, false); 792 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); 793 if (vivid_is_sdtv_cap(dev)) 794 dev->tv_field_cap = mp->field; 795 tpg_update_mv_step(&dev->tpg); 796 dev->tpg.colorspace = mp->colorspace; 797 dev->tpg.xfer_func = mp->xfer_func; 798 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR) 799 dev->tpg.ycbcr_enc = mp->ycbcr_enc; 800 else 801 dev->tpg.hsv_enc = mp->hsv_enc; 802 dev->tpg.quantization = mp->quantization; 803 804 return 0; 805} 806 807int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, 808 struct v4l2_format *f) 809{ 810 struct vivid_dev *dev = video_drvdata(file); 811 812 if (!dev->multiplanar) 813 return -ENOTTY; 814 return vivid_g_fmt_vid_cap(file, priv, f); 815} 816 817int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, 818 struct v4l2_format *f) 819{ 820 struct vivid_dev *dev = video_drvdata(file); 821 822 if (!dev->multiplanar) 823 return -ENOTTY; 824 return vivid_try_fmt_vid_cap(file, priv, f); 825} 826 827int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, 828 struct v4l2_format *f) 829{ 830 struct vivid_dev *dev = video_drvdata(file); 831 832 if (!dev->multiplanar) 833 return -ENOTTY; 834 return vivid_s_fmt_vid_cap(file, priv, f); 835} 836 837int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 838 struct v4l2_format *f) 839{ 840 struct vivid_dev *dev = video_drvdata(file); 841 842 if (dev->multiplanar) 843 return -ENOTTY; 844 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); 845} 846 847int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 848 struct v4l2_format *f) 849{ 850 struct vivid_dev *dev = video_drvdata(file); 851 852 if (dev->multiplanar) 853 return -ENOTTY; 854 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); 855} 856 857int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 858 struct v4l2_format *f) 859{ 860 struct vivid_dev *dev = video_drvdata(file); 861 862 if (dev->multiplanar) 863 return -ENOTTY; 864 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); 865} 866 867int vivid_vid_cap_g_selection(struct file *file, void *priv, 868 struct v4l2_selection *sel) 869{ 870 struct vivid_dev *dev = video_drvdata(file); 871 872 if (!dev->has_crop_cap && !dev->has_compose_cap) 873 return -ENOTTY; 874 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 875 return -EINVAL; 876 if (vivid_is_webcam(dev)) 877 return -ENODATA; 878 879 sel->r.left = sel->r.top = 0; 880 switch (sel->target) { 881 case V4L2_SEL_TGT_CROP: 882 if (!dev->has_crop_cap) 883 return -EINVAL; 884 sel->r = dev->crop_cap; 885 break; 886 case V4L2_SEL_TGT_CROP_DEFAULT: 887 case V4L2_SEL_TGT_CROP_BOUNDS: 888 if (!dev->has_crop_cap) 889 return -EINVAL; 890 sel->r = dev->src_rect; 891 break; 892 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 893 if (!dev->has_compose_cap) 894 return -EINVAL; 895 sel->r = vivid_max_rect; 896 break; 897 case V4L2_SEL_TGT_COMPOSE: 898 if (!dev->has_compose_cap) 899 return -EINVAL; 900 sel->r = dev->compose_cap; 901 break; 902 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 903 if (!dev->has_compose_cap) 904 return -EINVAL; 905 sel->r = dev->fmt_cap_rect; 906 break; 907 default: 908 return -EINVAL; 909 } 910 return 0; 911} 912 913int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) 914{ 915 struct vivid_dev *dev = video_drvdata(file); 916 struct v4l2_rect *crop = &dev->crop_cap; 917 struct v4l2_rect *compose = &dev->compose_cap; 918 unsigned orig_compose_w = compose->width; 919 unsigned orig_compose_h = compose->height; 920 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; 921 int ret; 922 923 if (!dev->has_crop_cap && !dev->has_compose_cap) 924 return -ENOTTY; 925 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 926 return -EINVAL; 927 if (vivid_is_webcam(dev)) 928 return -ENODATA; 929 930 switch (s->target) { 931 case V4L2_SEL_TGT_CROP: 932 if (!dev->has_crop_cap) 933 return -EINVAL; 934 ret = vivid_vid_adjust_sel(s->flags, &s->r); 935 if (ret) 936 return ret; 937 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 938 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 939 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap); 940 s->r.top /= factor; 941 s->r.height /= factor; 942 if (dev->has_scaler_cap) { 943 struct v4l2_rect fmt = dev->fmt_cap_rect; 944 struct v4l2_rect max_rect = { 945 0, 0, 946 s->r.width * MAX_ZOOM, 947 s->r.height * MAX_ZOOM 948 }; 949 struct v4l2_rect min_rect = { 950 0, 0, 951 s->r.width / MAX_ZOOM, 952 s->r.height / MAX_ZOOM 953 }; 954 955 v4l2_rect_set_min_size(&fmt, &min_rect); 956 if (!dev->has_compose_cap) 957 v4l2_rect_set_max_size(&fmt, &max_rect); 958 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 959 vb2_is_busy(&dev->vb_vid_cap_q)) 960 return -EBUSY; 961 if (dev->has_compose_cap) { 962 v4l2_rect_set_min_size(compose, &min_rect); 963 v4l2_rect_set_max_size(compose, &max_rect); 964 v4l2_rect_map_inside(compose, &fmt); 965 } 966 dev->fmt_cap_rect = fmt; 967 tpg_s_buf_height(&dev->tpg, fmt.height); 968 } else if (dev->has_compose_cap) { 969 struct v4l2_rect fmt = dev->fmt_cap_rect; 970 971 v4l2_rect_set_min_size(&fmt, &s->r); 972 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && 973 vb2_is_busy(&dev->vb_vid_cap_q)) 974 return -EBUSY; 975 dev->fmt_cap_rect = fmt; 976 tpg_s_buf_height(&dev->tpg, fmt.height); 977 v4l2_rect_set_size_to(compose, &s->r); 978 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 979 } else { 980 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) && 981 vb2_is_busy(&dev->vb_vid_cap_q)) 982 return -EBUSY; 983 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r); 984 v4l2_rect_set_size_to(compose, &s->r); 985 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); 986 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); 987 } 988 s->r.top *= factor; 989 s->r.height *= factor; 990 *crop = s->r; 991 break; 992 case V4L2_SEL_TGT_COMPOSE: 993 if (!dev->has_compose_cap) 994 return -EINVAL; 995 ret = vivid_vid_adjust_sel(s->flags, &s->r); 996 if (ret) 997 return ret; 998 v4l2_rect_set_min_size(&s->r, &vivid_min_rect); 999 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect); 1000 if (dev->has_scaler_cap) { 1001 struct v4l2_rect max_rect = { 1002 0, 0, 1003 dev->src_rect.width * MAX_ZOOM, 1004 (dev->src_rect.height / factor) * MAX_ZOOM 1005 }; 1006 1007 v4l2_rect_set_max_size(&s->r, &max_rect); 1008 if (dev->has_crop_cap) { 1009 struct v4l2_rect min_rect = { 1010 0, 0, 1011 s->r.width / MAX_ZOOM, 1012 (s->r.height * factor) / MAX_ZOOM 1013 }; 1014 struct v4l2_rect max_rect = { 1015 0, 0, 1016 s->r.width * MAX_ZOOM, 1017 (s->r.height * factor) * MAX_ZOOM 1018 }; 1019 1020 v4l2_rect_set_min_size(crop, &min_rect); 1021 v4l2_rect_set_max_size(crop, &max_rect); 1022 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1023 } 1024 } else if (dev->has_crop_cap) { 1025 s->r.top *= factor; 1026 s->r.height *= factor; 1027 v4l2_rect_set_max_size(&s->r, &dev->src_rect); 1028 v4l2_rect_set_size_to(crop, &s->r); 1029 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); 1030 s->r.top /= factor; 1031 s->r.height /= factor; 1032 } else { 1033 v4l2_rect_set_size_to(&s->r, &dev->src_rect); 1034 s->r.height /= factor; 1035 } 1036 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); 1037 *compose = s->r; 1038 break; 1039 default: 1040 return -EINVAL; 1041 } 1042 1043 if (dev->bitmap_cap && (compose->width != orig_compose_w || 1044 compose->height != orig_compose_h)) { 1045 vfree(dev->bitmap_cap); 1046 dev->bitmap_cap = NULL; 1047 } 1048 tpg_s_crop_compose(&dev->tpg, crop, compose); 1049 return 0; 1050} 1051 1052int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, 1053 int type, struct v4l2_fract *f) 1054{ 1055 struct vivid_dev *dev = video_drvdata(file); 1056 1057 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1058 return -EINVAL; 1059 1060 switch (vivid_get_pixel_aspect(dev)) { 1061 case TPG_PIXEL_ASPECT_NTSC: 1062 f->numerator = 11; 1063 f->denominator = 10; 1064 break; 1065 case TPG_PIXEL_ASPECT_PAL: 1066 f->numerator = 54; 1067 f->denominator = 59; 1068 break; 1069 default: 1070 break; 1071 } 1072 return 0; 1073} 1074 1075int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv, 1076 struct v4l2_fmtdesc *f) 1077{ 1078 struct vivid_dev *dev = video_drvdata(file); 1079 const struct vivid_fmt *fmt; 1080 1081 if (dev->multiplanar) 1082 return -ENOTTY; 1083 1084 if (f->index >= ARRAY_SIZE(formats_ovl)) 1085 return -EINVAL; 1086 1087 fmt = &formats_ovl[f->index]; 1088 1089 f->pixelformat = fmt->fourcc; 1090 return 0; 1091} 1092 1093int vidioc_g_fmt_vid_overlay(struct file *file, void *priv, 1094 struct v4l2_format *f) 1095{ 1096 struct vivid_dev *dev = video_drvdata(file); 1097 const struct v4l2_rect *compose = &dev->compose_cap; 1098 struct v4l2_window *win = &f->fmt.win; 1099 unsigned clipcount = win->clipcount; 1100 1101 if (dev->multiplanar) 1102 return -ENOTTY; 1103 1104 win->w.top = dev->overlay_cap_top; 1105 win->w.left = dev->overlay_cap_left; 1106 win->w.width = compose->width; 1107 win->w.height = compose->height; 1108 win->field = dev->overlay_cap_field; 1109 win->clipcount = dev->clipcount_cap; 1110 if (clipcount > dev->clipcount_cap) 1111 clipcount = dev->clipcount_cap; 1112 if (dev->bitmap_cap == NULL) 1113 win->bitmap = NULL; 1114 else if (win->bitmap) { 1115 if (copy_to_user(win->bitmap, dev->bitmap_cap, 1116 ((compose->width + 7) / 8) * compose->height)) 1117 return -EFAULT; 1118 } 1119 if (clipcount && win->clips) { 1120 if (copy_to_user(win->clips, dev->clips_cap, 1121 clipcount * sizeof(dev->clips_cap[0]))) 1122 return -EFAULT; 1123 } 1124 return 0; 1125} 1126 1127int vidioc_try_fmt_vid_overlay(struct file *file, void *priv, 1128 struct v4l2_format *f) 1129{ 1130 struct vivid_dev *dev = video_drvdata(file); 1131 const struct v4l2_rect *compose = &dev->compose_cap; 1132 struct v4l2_window *win = &f->fmt.win; 1133 int i, j; 1134 1135 if (dev->multiplanar) 1136 return -ENOTTY; 1137 1138 win->w.left = clamp_t(int, win->w.left, 1139 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width); 1140 win->w.top = clamp_t(int, win->w.top, 1141 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height); 1142 win->w.width = compose->width; 1143 win->w.height = compose->height; 1144 if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP) 1145 win->field = V4L2_FIELD_ANY; 1146 win->chromakey = 0; 1147 win->global_alpha = 0; 1148 if (win->clipcount && !win->clips) 1149 win->clipcount = 0; 1150 if (win->clipcount > MAX_CLIPS) 1151 win->clipcount = MAX_CLIPS; 1152 if (win->clipcount) { 1153 if (copy_from_user(dev->try_clips_cap, win->clips, 1154 win->clipcount * sizeof(dev->clips_cap[0]))) 1155 return -EFAULT; 1156 for (i = 0; i < win->clipcount; i++) { 1157 struct v4l2_rect *r = &dev->try_clips_cap[i].c; 1158 1159 r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1); 1160 r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top); 1161 r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1); 1162 r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left); 1163 } 1164 /* 1165 * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small 1166 * number and it's typically a one-time deal. 1167 */ 1168 for (i = 0; i < win->clipcount - 1; i++) { 1169 struct v4l2_rect *r1 = &dev->try_clips_cap[i].c; 1170 1171 for (j = i + 1; j < win->clipcount; j++) { 1172 struct v4l2_rect *r2 = &dev->try_clips_cap[j].c; 1173 1174 if (v4l2_rect_overlap(r1, r2)) 1175 return -EINVAL; 1176 } 1177 } 1178 if (copy_to_user(win->clips, dev->try_clips_cap, 1179 win->clipcount * sizeof(dev->clips_cap[0]))) 1180 return -EFAULT; 1181 } 1182 return 0; 1183} 1184 1185int vidioc_s_fmt_vid_overlay(struct file *file, void *priv, 1186 struct v4l2_format *f) 1187{ 1188 struct vivid_dev *dev = video_drvdata(file); 1189 const struct v4l2_rect *compose = &dev->compose_cap; 1190 struct v4l2_window *win = &f->fmt.win; 1191 int ret = vidioc_try_fmt_vid_overlay(file, priv, f); 1192 unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height; 1193 unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]); 1194 void *new_bitmap = NULL; 1195 1196 if (ret) 1197 return ret; 1198 1199 if (win->bitmap) { 1200 new_bitmap = vzalloc(bitmap_size); 1201 1202 if (new_bitmap == NULL) 1203 return -ENOMEM; 1204 if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) { 1205 vfree(new_bitmap); 1206 return -EFAULT; 1207 } 1208 } 1209 1210 dev->overlay_cap_top = win->w.top; 1211 dev->overlay_cap_left = win->w.left; 1212 dev->overlay_cap_field = win->field; 1213 vfree(dev->bitmap_cap); 1214 dev->bitmap_cap = new_bitmap; 1215 dev->clipcount_cap = win->clipcount; 1216 if (dev->clipcount_cap) 1217 memcpy(dev->clips_cap, dev->try_clips_cap, clips_size); 1218 return 0; 1219} 1220 1221int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i) 1222{ 1223 struct vivid_dev *dev = video_drvdata(file); 1224 1225 if (dev->multiplanar) 1226 return -ENOTTY; 1227 1228 if (i && dev->fb_vbase_cap == NULL) 1229 return -EINVAL; 1230 1231 if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) { 1232 dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n"); 1233 return -EINVAL; 1234 } 1235 1236 if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh) 1237 return -EBUSY; 1238 dev->overlay_cap_owner = i ? fh : NULL; 1239 return 0; 1240} 1241 1242int vivid_vid_cap_g_fbuf(struct file *file, void *fh, 1243 struct v4l2_framebuffer *a) 1244{ 1245 struct vivid_dev *dev = video_drvdata(file); 1246 1247 if (dev->multiplanar) 1248 return -ENOTTY; 1249 1250 *a = dev->fb_cap; 1251 a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING | 1252 V4L2_FBUF_CAP_LIST_CLIPPING; 1253 a->flags = V4L2_FBUF_FLAG_PRIMARY; 1254 a->fmt.field = V4L2_FIELD_NONE; 1255 a->fmt.colorspace = V4L2_COLORSPACE_SRGB; 1256 a->fmt.priv = 0; 1257 return 0; 1258} 1259 1260int vivid_vid_cap_s_fbuf(struct file *file, void *fh, 1261 const struct v4l2_framebuffer *a) 1262{ 1263 struct vivid_dev *dev = video_drvdata(file); 1264 const struct vivid_fmt *fmt; 1265 1266 if (dev->multiplanar) 1267 return -ENOTTY; 1268 1269 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) 1270 return -EPERM; 1271 1272 if (dev->overlay_cap_owner) 1273 return -EBUSY; 1274 1275 if (a->base == NULL) { 1276 dev->fb_cap.base = NULL; 1277 dev->fb_vbase_cap = NULL; 1278 return 0; 1279 } 1280 1281 if (a->fmt.width < 48 || a->fmt.height < 32) 1282 return -EINVAL; 1283 fmt = vivid_get_format(dev, a->fmt.pixelformat); 1284 if (!fmt || !fmt->can_do_overlay) 1285 return -EINVAL; 1286 if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8) 1287 return -EINVAL; 1288 if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height) 1289 return -EINVAL; 1290 1291 /* 1292 * Only support the framebuffer of one of the vivid instances. 1293 * Anything else is rejected. 1294 */ 1295 if (!vivid_validate_fb(a)) 1296 return -EINVAL; 1297 1298 dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base); 1299 dev->fb_cap = *a; 1300 dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left, 1301 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width); 1302 dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top, 1303 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height); 1304 return 0; 1305} 1306 1307static const struct v4l2_audio vivid_audio_inputs[] = { 1308 { 0, "TV", V4L2_AUDCAP_STEREO }, 1309 { 1, "Line-In", V4L2_AUDCAP_STEREO }, 1310}; 1311 1312int vidioc_enum_input(struct file *file, void *priv, 1313 struct v4l2_input *inp) 1314{ 1315 struct vivid_dev *dev = video_drvdata(file); 1316 1317 if (inp->index >= dev->num_inputs) 1318 return -EINVAL; 1319 1320 inp->type = V4L2_INPUT_TYPE_CAMERA; 1321 switch (dev->input_type[inp->index]) { 1322 case WEBCAM: 1323 snprintf(inp->name, sizeof(inp->name), "Webcam %u", 1324 dev->input_name_counter[inp->index]); 1325 inp->capabilities = 0; 1326 break; 1327 case TV: 1328 snprintf(inp->name, sizeof(inp->name), "TV %u", 1329 dev->input_name_counter[inp->index]); 1330 inp->type = V4L2_INPUT_TYPE_TUNER; 1331 inp->std = V4L2_STD_ALL; 1332 if (dev->has_audio_inputs) 1333 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1334 inp->capabilities = V4L2_IN_CAP_STD; 1335 break; 1336 case SVID: 1337 snprintf(inp->name, sizeof(inp->name), "S-Video %u", 1338 dev->input_name_counter[inp->index]); 1339 inp->std = V4L2_STD_ALL; 1340 if (dev->has_audio_inputs) 1341 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1342 inp->capabilities = V4L2_IN_CAP_STD; 1343 break; 1344 case HDMI: 1345 snprintf(inp->name, sizeof(inp->name), "HDMI %u", 1346 dev->input_name_counter[inp->index]); 1347 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; 1348 if (dev->edid_blocks == 0 || 1349 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL) 1350 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1351 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK || 1352 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE) 1353 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1354 break; 1355 } 1356 if (dev->sensor_hflip) 1357 inp->status |= V4L2_IN_ST_HFLIP; 1358 if (dev->sensor_vflip) 1359 inp->status |= V4L2_IN_ST_VFLIP; 1360 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { 1361 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) { 1362 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1363 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) { 1364 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1365 } else if (vivid_is_tv_cap(dev)) { 1366 switch (tpg_g_quality(&dev->tpg)) { 1367 case TPG_QUAL_GRAY: 1368 inp->status |= V4L2_IN_ST_COLOR_KILL; 1369 break; 1370 case TPG_QUAL_NOISE: 1371 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1372 break; 1373 default: 1374 break; 1375 } 1376 } 1377 } 1378 return 0; 1379} 1380 1381int vidioc_g_input(struct file *file, void *priv, unsigned *i) 1382{ 1383 struct vivid_dev *dev = video_drvdata(file); 1384 1385 *i = dev->input; 1386 return 0; 1387} 1388 1389int vidioc_s_input(struct file *file, void *priv, unsigned i) 1390{ 1391 struct vivid_dev *dev = video_drvdata(file); 1392 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; 1393 unsigned brightness; 1394 1395 if (i >= dev->num_inputs) 1396 return -EINVAL; 1397 1398 if (i == dev->input) 1399 return 0; 1400 1401 if (vb2_is_busy(&dev->vb_vid_cap_q) || 1402 vb2_is_busy(&dev->vb_vbi_cap_q) || 1403 vb2_is_busy(&dev->vb_meta_cap_q)) 1404 return -EBUSY; 1405 1406 dev->input = i; 1407 dev->vid_cap_dev.tvnorms = 0; 1408 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { 1409 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; 1410 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; 1411 } 1412 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1413 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1414 vivid_update_format_cap(dev, false); 1415 1416 if (dev->colorspace) { 1417 switch (dev->input_type[i]) { 1418 case WEBCAM: 1419 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1420 break; 1421 case TV: 1422 case SVID: 1423 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1424 break; 1425 case HDMI: 1426 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 1427 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1428 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1429 else 1430 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 1431 } else { 1432 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1433 } 1434 break; 1435 } 1436 } 1437 1438 /* 1439 * Modify the brightness range depending on the input. 1440 * This makes it easy to use vivid to test if applications can 1441 * handle control range modifications and is also how this is 1442 * typically used in practice as different inputs may be hooked 1443 * up to different receivers with different control ranges. 1444 */ 1445 brightness = 128 * i + dev->input_brightness[i]; 1446 v4l2_ctrl_modify_range(dev->brightness, 1447 128 * i, 255 + 128 * i, 1, 128 + 128 * i); 1448 v4l2_ctrl_s_ctrl(dev->brightness, brightness); 1449 1450 /* Restore per-input states. */ 1451 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, 1452 vivid_is_hdmi_cap(dev)); 1453 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) && 1454 dev->dv_timings_signal_mode[dev->input] == 1455 SELECTED_DV_TIMINGS); 1456 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev)); 1457 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) && 1458 dev->std_signal_mode[dev->input]); 1459 1460 if (vivid_is_hdmi_cap(dev)) { 1461 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode, 1462 dev->dv_timings_signal_mode[dev->input]); 1463 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings, 1464 dev->query_dv_timings[dev->input]); 1465 } else if (vivid_is_sdtv_cap(dev)) { 1466 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode, 1467 dev->std_signal_mode[dev->input]); 1468 v4l2_ctrl_s_ctrl(dev->ctrl_standard, 1469 dev->std_signal_mode[dev->input]); 1470 } 1471 1472 return 0; 1473} 1474 1475int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) 1476{ 1477 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1478 return -EINVAL; 1479 *vin = vivid_audio_inputs[vin->index]; 1480 return 0; 1481} 1482 1483int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) 1484{ 1485 struct vivid_dev *dev = video_drvdata(file); 1486 1487 if (!vivid_is_sdtv_cap(dev)) 1488 return -EINVAL; 1489 *vin = vivid_audio_inputs[dev->tv_audio_input]; 1490 return 0; 1491} 1492 1493int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) 1494{ 1495 struct vivid_dev *dev = video_drvdata(file); 1496 1497 if (!vivid_is_sdtv_cap(dev)) 1498 return -EINVAL; 1499 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1500 return -EINVAL; 1501 dev->tv_audio_input = vin->index; 1502 return 0; 1503} 1504 1505int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 1506{ 1507 struct vivid_dev *dev = video_drvdata(file); 1508 1509 if (vf->tuner != 0) 1510 return -EINVAL; 1511 vf->frequency = dev->tv_freq; 1512 return 0; 1513} 1514 1515int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1516{ 1517 struct vivid_dev *dev = video_drvdata(file); 1518 1519 if (vf->tuner != 0) 1520 return -EINVAL; 1521 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); 1522 if (vivid_is_tv_cap(dev)) 1523 vivid_update_quality(dev); 1524 return 0; 1525} 1526 1527int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) 1528{ 1529 struct vivid_dev *dev = video_drvdata(file); 1530 1531 if (vt->index != 0) 1532 return -EINVAL; 1533 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) 1534 return -EINVAL; 1535 dev->tv_audmode = vt->audmode; 1536 return 0; 1537} 1538 1539int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) 1540{ 1541 struct vivid_dev *dev = video_drvdata(file); 1542 enum tpg_quality qual; 1543 1544 if (vt->index != 0) 1545 return -EINVAL; 1546 1547 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | 1548 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1549 vt->audmode = dev->tv_audmode; 1550 vt->rangelow = MIN_TV_FREQ; 1551 vt->rangehigh = MAX_TV_FREQ; 1552 qual = vivid_get_quality(dev, &vt->afc); 1553 if (qual == TPG_QUAL_COLOR) 1554 vt->signal = 0xffff; 1555 else if (qual == TPG_QUAL_GRAY) 1556 vt->signal = 0x8000; 1557 else 1558 vt->signal = 0; 1559 if (qual == TPG_QUAL_NOISE) { 1560 vt->rxsubchans = 0; 1561 } else if (qual == TPG_QUAL_GRAY) { 1562 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1563 } else { 1564 unsigned int channel_nr = dev->tv_freq / (6 * 16); 1565 unsigned int options = 1566 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3; 1567 1568 switch (channel_nr % options) { 1569 case 0: 1570 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1571 break; 1572 case 1: 1573 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1574 break; 1575 case 2: 1576 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) 1577 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; 1578 else 1579 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1580 break; 1581 case 3: 1582 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; 1583 break; 1584 } 1585 } 1586 strscpy(vt->name, "TV Tuner", sizeof(vt->name)); 1587 return 0; 1588} 1589 1590/* Must remain in sync with the vivid_ctrl_standard_strings array */ 1591const v4l2_std_id vivid_standard[] = { 1592 V4L2_STD_NTSC_M, 1593 V4L2_STD_NTSC_M_JP, 1594 V4L2_STD_NTSC_M_KR, 1595 V4L2_STD_NTSC_443, 1596 V4L2_STD_PAL_BG | V4L2_STD_PAL_H, 1597 V4L2_STD_PAL_I, 1598 V4L2_STD_PAL_DK, 1599 V4L2_STD_PAL_M, 1600 V4L2_STD_PAL_N, 1601 V4L2_STD_PAL_Nc, 1602 V4L2_STD_PAL_60, 1603 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 1604 V4L2_STD_SECAM_DK, 1605 V4L2_STD_SECAM_L, 1606 V4L2_STD_SECAM_LC, 1607 V4L2_STD_UNKNOWN 1608}; 1609 1610/* Must remain in sync with the vivid_standard array */ 1611const char * const vivid_ctrl_standard_strings[] = { 1612 "NTSC-M", 1613 "NTSC-M-JP", 1614 "NTSC-M-KR", 1615 "NTSC-443", 1616 "PAL-BGH", 1617 "PAL-I", 1618 "PAL-DK", 1619 "PAL-M", 1620 "PAL-N", 1621 "PAL-Nc", 1622 "PAL-60", 1623 "SECAM-BGH", 1624 "SECAM-DK", 1625 "SECAM-L", 1626 "SECAM-Lc", 1627 NULL, 1628}; 1629 1630int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) 1631{ 1632 struct vivid_dev *dev = video_drvdata(file); 1633 unsigned int last = dev->query_std_last[dev->input]; 1634 1635 if (!vivid_is_sdtv_cap(dev)) 1636 return -ENODATA; 1637 if (dev->std_signal_mode[dev->input] == NO_SIGNAL || 1638 dev->std_signal_mode[dev->input] == NO_LOCK) { 1639 *id = V4L2_STD_UNKNOWN; 1640 return 0; 1641 } 1642 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { 1643 *id = V4L2_STD_UNKNOWN; 1644 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) { 1645 *id = dev->std_cap[dev->input]; 1646 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) { 1647 *id = dev->query_std[dev->input]; 1648 } else { 1649 *id = vivid_standard[last]; 1650 dev->query_std_last[dev->input] = 1651 (last + 1) % ARRAY_SIZE(vivid_standard); 1652 } 1653 1654 return 0; 1655} 1656 1657int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) 1658{ 1659 struct vivid_dev *dev = video_drvdata(file); 1660 1661 if (!vivid_is_sdtv_cap(dev)) 1662 return -ENODATA; 1663 if (dev->std_cap[dev->input] == id) 1664 return 0; 1665 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1666 return -EBUSY; 1667 dev->std_cap[dev->input] = id; 1668 vivid_update_format_cap(dev, false); 1669 return 0; 1670} 1671 1672static void find_aspect_ratio(u32 width, u32 height, 1673 u32 *num, u32 *denom) 1674{ 1675 if (!(height % 3) && ((height * 4 / 3) == width)) { 1676 *num = 4; 1677 *denom = 3; 1678 } else if (!(height % 9) && ((height * 16 / 9) == width)) { 1679 *num = 16; 1680 *denom = 9; 1681 } else if (!(height % 10) && ((height * 16 / 10) == width)) { 1682 *num = 16; 1683 *denom = 10; 1684 } else if (!(height % 4) && ((height * 5 / 4) == width)) { 1685 *num = 5; 1686 *denom = 4; 1687 } else if (!(height % 9) && ((height * 15 / 9) == width)) { 1688 *num = 15; 1689 *denom = 9; 1690 } else { /* default to 16:9 */ 1691 *num = 16; 1692 *denom = 9; 1693 } 1694} 1695 1696static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) 1697{ 1698 struct v4l2_bt_timings *bt = &timings->bt; 1699 u32 total_h_pixel; 1700 u32 total_v_lines; 1701 u32 h_freq; 1702 1703 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, 1704 NULL, NULL)) 1705 return false; 1706 1707 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); 1708 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); 1709 1710 h_freq = (u32)bt->pixelclock / total_h_pixel; 1711 1712 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { 1713 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, 1714 bt->polarities, bt->interlaced, timings)) 1715 return true; 1716 } 1717 1718 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { 1719 struct v4l2_fract aspect_ratio; 1720 1721 find_aspect_ratio(bt->width, bt->height, 1722 &aspect_ratio.numerator, 1723 &aspect_ratio.denominator); 1724 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, 1725 bt->polarities, bt->interlaced, 1726 aspect_ratio, timings)) 1727 return true; 1728 } 1729 return false; 1730} 1731 1732int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, 1733 struct v4l2_dv_timings *timings) 1734{ 1735 struct vivid_dev *dev = video_drvdata(file); 1736 1737 if (!vivid_is_hdmi_cap(dev)) 1738 return -ENODATA; 1739 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 1740 0, NULL, NULL) && 1741 !valid_cvt_gtf_timings(timings)) 1742 return -EINVAL; 1743 1744 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input], 1745 0, false)) 1746 return 0; 1747 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1748 return -EBUSY; 1749 1750 dev->dv_timings_cap[dev->input] = *timings; 1751 vivid_update_format_cap(dev, false); 1752 return 0; 1753} 1754 1755int vidioc_query_dv_timings(struct file *file, void *_fh, 1756 struct v4l2_dv_timings *timings) 1757{ 1758 struct vivid_dev *dev = video_drvdata(file); 1759 unsigned int input = dev->input; 1760 unsigned int last = dev->query_dv_timings_last[input]; 1761 1762 if (!vivid_is_hdmi_cap(dev)) 1763 return -ENODATA; 1764 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL || 1765 dev->edid_blocks == 0) 1766 return -ENOLINK; 1767 if (dev->dv_timings_signal_mode[input] == NO_LOCK) 1768 return -ENOLCK; 1769 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) { 1770 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; 1771 return -ERANGE; 1772 } 1773 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) { 1774 *timings = dev->dv_timings_cap[input]; 1775 } else if (dev->dv_timings_signal_mode[input] == 1776 SELECTED_DV_TIMINGS) { 1777 *timings = 1778 v4l2_dv_timings_presets[dev->query_dv_timings[input]]; 1779 } else { 1780 *timings = 1781 v4l2_dv_timings_presets[last]; 1782 dev->query_dv_timings_last[input] = 1783 (last + 1) % dev->query_dv_timings_size; 1784 } 1785 return 0; 1786} 1787 1788int vidioc_s_edid(struct file *file, void *_fh, 1789 struct v4l2_edid *edid) 1790{ 1791 struct vivid_dev *dev = video_drvdata(file); 1792 u16 phys_addr; 1793 u32 display_present = 0; 1794 unsigned int i, j; 1795 int ret; 1796 1797 memset(edid->reserved, 0, sizeof(edid->reserved)); 1798 if (edid->pad >= dev->num_inputs) 1799 return -EINVAL; 1800 if (dev->input_type[edid->pad] != HDMI || edid->start_block) 1801 return -EINVAL; 1802 if (edid->blocks == 0) { 1803 dev->edid_blocks = 0; 1804 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0); 1805 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0); 1806 phys_addr = CEC_PHYS_ADDR_INVALID; 1807 goto set_phys_addr; 1808 } 1809 if (edid->blocks > dev->edid_max_blocks) { 1810 edid->blocks = dev->edid_max_blocks; 1811 return -E2BIG; 1812 } 1813 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); 1814 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); 1815 if (ret) 1816 return ret; 1817 1818 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1819 return -EBUSY; 1820 1821 dev->edid_blocks = edid->blocks; 1822 memcpy(dev->edid, edid->edid, edid->blocks * 128); 1823 1824 for (i = 0, j = 0; i < dev->num_outputs; i++) 1825 if (dev->output_type[i] == HDMI) 1826 display_present |= 1827 dev->display_present[i] << j++; 1828 1829 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present); 1830 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present); 1831 1832set_phys_addr: 1833 /* TODO: a proper hotplug detect cycle should be emulated here */ 1834 cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false); 1835 1836 for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++) 1837 cec_s_phys_addr(dev->cec_tx_adap[i], 1838 dev->display_present[i] ? 1839 v4l2_phys_addr_for_input(phys_addr, i + 1) : 1840 CEC_PHYS_ADDR_INVALID, 1841 false); 1842 return 0; 1843} 1844 1845int vidioc_enum_framesizes(struct file *file, void *fh, 1846 struct v4l2_frmsizeenum *fsize) 1847{ 1848 struct vivid_dev *dev = video_drvdata(file); 1849 1850 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) 1851 return -EINVAL; 1852 if (vivid_get_format(dev, fsize->pixel_format) == NULL) 1853 return -EINVAL; 1854 if (vivid_is_webcam(dev)) { 1855 if (fsize->index >= ARRAY_SIZE(webcam_sizes)) 1856 return -EINVAL; 1857 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 1858 fsize->discrete = webcam_sizes[fsize->index]; 1859 return 0; 1860 } 1861 if (fsize->index) 1862 return -EINVAL; 1863 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1864 fsize->stepwise.min_width = MIN_WIDTH; 1865 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; 1866 fsize->stepwise.step_width = 2; 1867 fsize->stepwise.min_height = MIN_HEIGHT; 1868 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; 1869 fsize->stepwise.step_height = 2; 1870 return 0; 1871} 1872 1873/* timeperframe is arbitrary and continuous */ 1874int vidioc_enum_frameintervals(struct file *file, void *priv, 1875 struct v4l2_frmivalenum *fival) 1876{ 1877 struct vivid_dev *dev = video_drvdata(file); 1878 const struct vivid_fmt *fmt; 1879 int i; 1880 1881 fmt = vivid_get_format(dev, fival->pixel_format); 1882 if (!fmt) 1883 return -EINVAL; 1884 1885 if (!vivid_is_webcam(dev)) { 1886 if (fival->index) 1887 return -EINVAL; 1888 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) 1889 return -EINVAL; 1890 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) 1891 return -EINVAL; 1892 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1893 fival->discrete = dev->timeperframe_vid_cap; 1894 return 0; 1895 } 1896 1897 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 1898 if (fival->width == webcam_sizes[i].width && 1899 fival->height == webcam_sizes[i].height) 1900 break; 1901 if (i == ARRAY_SIZE(webcam_sizes)) 1902 return -EINVAL; 1903 if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i)) 1904 return -EINVAL; 1905 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1906 fival->discrete = webcam_intervals[fival->index]; 1907 return 0; 1908} 1909 1910int vivid_vid_cap_g_parm(struct file *file, void *priv, 1911 struct v4l2_streamparm *parm) 1912{ 1913 struct vivid_dev *dev = video_drvdata(file); 1914 1915 if (parm->type != (dev->multiplanar ? 1916 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1917 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1918 return -EINVAL; 1919 1920 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1921 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; 1922 parm->parm.capture.readbuffers = 1; 1923 return 0; 1924} 1925 1926int vivid_vid_cap_s_parm(struct file *file, void *priv, 1927 struct v4l2_streamparm *parm) 1928{ 1929 struct vivid_dev *dev = video_drvdata(file); 1930 unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx); 1931 struct v4l2_fract tpf; 1932 unsigned i; 1933 1934 if (parm->type != (dev->multiplanar ? 1935 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1936 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1937 return -EINVAL; 1938 if (!vivid_is_webcam(dev)) 1939 return vivid_vid_cap_g_parm(file, priv, parm); 1940 1941 tpf = parm->parm.capture.timeperframe; 1942 1943 if (tpf.denominator == 0) 1944 tpf = webcam_intervals[ival_sz - 1]; 1945 for (i = 0; i < ival_sz; i++) 1946 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i])) 1947 break; 1948 if (i == ival_sz) 1949 i = ival_sz - 1; 1950 dev->webcam_ival_idx = i; 1951 tpf = webcam_intervals[dev->webcam_ival_idx]; 1952 1953 /* resync the thread's timings */ 1954 dev->cap_seq_resync = true; 1955 dev->timeperframe_vid_cap = tpf; 1956 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1957 parm->parm.capture.timeperframe = tpf; 1958 parm->parm.capture.readbuffers = 1; 1959 return 0; 1960} 1961