1/*
2 * V4L2 context helper functions.
3 *
4 * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
5 * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
6 *
7 * This file is part of FFmpeg.
8 *
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <linux/videodev2.h>
25#include <sys/ioctl.h>
26#include <sys/mman.h>
27#include <unistd.h>
28#include <fcntl.h>
29#include <poll.h>
30#include "libavcodec/avcodec.h"
31#include "libavcodec/internal.h"
32#include "v4l2_buffers.h"
33#include "v4l2_fmt.h"
34#include "v4l2_m2m.h"
35
36struct v4l2_format_update {
37    uint32_t v4l2_fmt;
38    int update_v4l2;
39
40    enum AVPixelFormat av_fmt;
41    int update_avfmt;
42};
43
44static inline V4L2m2mContext *ctx_to_m2mctx(V4L2Context *ctx)
45{
46    return V4L2_TYPE_IS_OUTPUT(ctx->type) ?
47        container_of(ctx, V4L2m2mContext, output) :
48        container_of(ctx, V4L2m2mContext, capture);
49}
50
51static inline AVCodecContext *logger(V4L2Context *ctx)
52{
53    return ctx_to_m2mctx(ctx)->avctx;
54}
55
56static inline unsigned int v4l2_get_width(struct v4l2_format *fmt)
57{
58    return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.width : fmt->fmt.pix.width;
59}
60
61static inline unsigned int v4l2_get_height(struct v4l2_format *fmt)
62{
63    return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.height : fmt->fmt.pix.height;
64}
65
66static AVRational v4l2_get_sar(V4L2Context *ctx)
67{
68    struct AVRational sar = { 0, 1 };
69    struct v4l2_cropcap cropcap;
70    int ret;
71
72    memset(&cropcap, 0, sizeof(cropcap));
73    cropcap.type = ctx->type;
74
75    ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_CROPCAP, &cropcap);
76    if (ret)
77        return sar;
78
79    sar.num = cropcap.pixelaspect.numerator;
80    sar.den = cropcap.pixelaspect.denominator;
81    return sar;
82}
83
84static inline unsigned int v4l2_resolution_changed(V4L2Context *ctx, struct v4l2_format *fmt2)
85{
86    struct v4l2_format *fmt1 = &ctx->format;
87    int ret =  V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
88        fmt1->fmt.pix_mp.width != fmt2->fmt.pix_mp.width ||
89        fmt1->fmt.pix_mp.height != fmt2->fmt.pix_mp.height
90        :
91        fmt1->fmt.pix.width != fmt2->fmt.pix.width ||
92        fmt1->fmt.pix.height != fmt2->fmt.pix.height;
93
94    if (ret)
95        av_log(logger(ctx), AV_LOG_DEBUG, "%s changed (%dx%d) -> (%dx%d)\n",
96            ctx->name,
97            v4l2_get_width(fmt1), v4l2_get_height(fmt1),
98            v4l2_get_width(fmt2), v4l2_get_height(fmt2));
99
100    return ret;
101}
102
103static inline int v4l2_type_supported(V4L2Context *ctx)
104{
105    return ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
106        ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
107        ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
108        ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
109}
110
111static inline int v4l2_get_framesize_compressed(V4L2Context* ctx, int width, int height)
112{
113    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
114    const int SZ_4K = 0x1000;
115    int size;
116
117    if (s->avctx && av_codec_is_decoder(s->avctx->codec))
118        return ((width * height * 3 / 2) / 2) + 128;
119
120    /* encoder */
121    size = FFALIGN(height, 32) * FFALIGN(width, 32) * 3 / 2 / 2;
122    return FFALIGN(size, SZ_4K);
123}
124
125static inline void v4l2_save_to_context(V4L2Context* ctx, struct v4l2_format_update *fmt)
126{
127    ctx->format.type = ctx->type;
128
129    if (fmt->update_avfmt)
130        ctx->av_pix_fmt = fmt->av_fmt;
131
132    if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
133        /* update the sizes to handle the reconfiguration of the capture stream at runtime */
134        ctx->format.fmt.pix_mp.height = ctx->height;
135        ctx->format.fmt.pix_mp.width = ctx->width;
136        if (fmt->update_v4l2) {
137            ctx->format.fmt.pix_mp.pixelformat = fmt->v4l2_fmt;
138
139            /* s5p-mfc requires the user to specify a buffer size */
140            ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage =
141                v4l2_get_framesize_compressed(ctx, ctx->width, ctx->height);
142        }
143    } else {
144        ctx->format.fmt.pix.height = ctx->height;
145        ctx->format.fmt.pix.width = ctx->width;
146        if (fmt->update_v4l2) {
147            ctx->format.fmt.pix.pixelformat = fmt->v4l2_fmt;
148
149            /* s5p-mfc requires the user to specify a buffer size */
150            ctx->format.fmt.pix.sizeimage =
151                v4l2_get_framesize_compressed(ctx, ctx->width, ctx->height);
152        }
153    }
154}
155
156static int v4l2_start_decode(V4L2Context *ctx)
157{
158    struct v4l2_decoder_cmd cmd = {
159        .cmd = V4L2_DEC_CMD_START,
160        .flags = 0,
161    };
162    int ret;
163
164    ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DECODER_CMD, &cmd);
165    if (ret)
166        return AVERROR(errno);
167
168    return 0;
169}
170
171/**
172 * handle resolution change event and end of stream event
173 * returns 1 if reinit was successful, negative if it failed
174 * returns 0 if reinit was not executed
175 */
176static int v4l2_handle_event(V4L2Context *ctx)
177{
178    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
179    struct v4l2_format cap_fmt = s->capture.format;
180    struct v4l2_event evt = { 0 };
181    int ret;
182
183    ret = ioctl(s->fd, VIDIOC_DQEVENT, &evt);
184    if (ret < 0) {
185        av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_DQEVENT\n", ctx->name);
186        return 0;
187    }
188
189    if (evt.type == V4L2_EVENT_EOS) {
190        ctx->done = 1;
191        return 0;
192    }
193
194    if (evt.type != V4L2_EVENT_SOURCE_CHANGE)
195        return 0;
196
197    ret = ioctl(s->fd, VIDIOC_G_FMT, &cap_fmt);
198    if (ret) {
199        av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->capture.name);
200        return 0;
201    }
202
203    if (v4l2_resolution_changed(&s->capture, &cap_fmt)) {
204        s->capture.height = v4l2_get_height(&cap_fmt);
205        s->capture.width = v4l2_get_width(&cap_fmt);
206        s->capture.sample_aspect_ratio = v4l2_get_sar(&s->capture);
207    } else {
208        v4l2_start_decode(ctx);
209        return 0;
210    }
211
212    s->reinit = 1;
213
214    if (s->avctx)
215        ret = ff_set_dimensions(s->avctx, s->capture.width, s->capture.height);
216    if (ret < 0)
217        av_log(logger(ctx), AV_LOG_WARNING, "update avcodec height and width\n");
218
219    ret = ff_v4l2_m2m_codec_reinit(s);
220    if (ret) {
221        av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_reinit\n");
222        return AVERROR(EINVAL);
223    }
224
225    /* reinit executed */
226    return 1;
227}
228
229static int v4l2_stop_decode(V4L2Context *ctx)
230{
231    struct v4l2_decoder_cmd cmd = {
232        .cmd = V4L2_DEC_CMD_STOP,
233        .flags = 0,
234    };
235    int ret;
236
237    ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DECODER_CMD, &cmd);
238    if (ret) {
239        /* DECODER_CMD is optional */
240        if (errno == ENOTTY)
241            return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
242        else
243            return AVERROR(errno);
244    }
245
246    return 0;
247}
248
249static int v4l2_stop_encode(V4L2Context *ctx)
250{
251    struct v4l2_encoder_cmd cmd = {
252        .cmd = V4L2_ENC_CMD_STOP,
253        .flags = 0,
254    };
255    int ret;
256
257    ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENCODER_CMD, &cmd);
258    if (ret) {
259        /* ENCODER_CMD is optional */
260        if (errno == ENOTTY)
261            return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
262        else
263            return AVERROR(errno);
264    }
265
266    return 0;
267}
268
269static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
270{
271    struct v4l2_plane planes[VIDEO_MAX_PLANES];
272    struct v4l2_buffer buf = { 0 };
273    V4L2Buffer *avbuf;
274    struct pollfd pfd = {
275        .events =  POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM, /* default blocking capture */
276        .fd = ctx_to_m2mctx(ctx)->fd,
277    };
278    int i, ret;
279
280    if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx->buffers) {
281        for (i = 0; i < ctx->num_buffers; i++) {
282            if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
283                break;
284        }
285        if (i == ctx->num_buffers)
286            av_log(logger(ctx), AV_LOG_WARNING, "All capture buffers returned to "
287                                                "userspace. Increase num_capture_buffers "
288                                                "to prevent device deadlock or dropped "
289                                                "packets/frames.\n");
290    }
291
292    /* if we are draining and there are no more capture buffers queued in the driver we are done */
293    if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx_to_m2mctx(ctx)->draining) {
294        for (i = 0; i < ctx->num_buffers; i++) {
295            /* capture buffer initialization happens during decode hence
296             * detection happens at runtime
297             */
298            if (!ctx->buffers)
299                break;
300
301            if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
302                goto start;
303        }
304        ctx->done = 1;
305        return NULL;
306    }
307
308start:
309    if (V4L2_TYPE_IS_OUTPUT(ctx->type))
310        pfd.events =  POLLOUT | POLLWRNORM;
311    else {
312        /* no need to listen to requests for more input while draining */
313        if (ctx_to_m2mctx(ctx)->draining)
314            pfd.events =  POLLIN | POLLRDNORM | POLLPRI;
315    }
316
317    for (;;) {
318        ret = poll(&pfd, 1, timeout);
319        if (ret > 0)
320            break;
321        if (errno == EINTR)
322            continue;
323        return NULL;
324    }
325
326    /* 0. handle errors */
327    if (pfd.revents & POLLERR) {
328        /* if we are trying to get free buffers but none have been queued yet
329           no need to raise a warning */
330        if (timeout == 0) {
331            for (i = 0; i < ctx->num_buffers; i++) {
332                if (ctx->buffers[i].status != V4L2BUF_AVAILABLE)
333                    av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
334            }
335        }
336        else
337            av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
338
339        return NULL;
340    }
341
342    /* 1. handle resolution changes */
343    if (pfd.revents & POLLPRI) {
344        ret = v4l2_handle_event(ctx);
345        if (ret < 0) {
346            /* if re-init failed, abort */
347            ctx->done = 1;
348            return NULL;
349        }
350        if (ret) {
351            /* if re-init was successful drop the buffer (if there was one)
352             * since we had to reconfigure capture (unmap all buffers)
353             */
354            return NULL;
355        }
356    }
357
358    /* 2. dequeue the buffer */
359    if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) {
360
361        if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) {
362            /* there is a capture buffer ready */
363            if (pfd.revents & (POLLIN | POLLRDNORM))
364                goto dequeue;
365
366            /* the driver is ready to accept more input; instead of waiting for the capture
367             * buffer to complete we return NULL so input can proceed (we are single threaded)
368             */
369            if (pfd.revents & (POLLOUT | POLLWRNORM))
370                return NULL;
371        }
372
373dequeue:
374        memset(&buf, 0, sizeof(buf));
375        buf.memory = V4L2_MEMORY_MMAP;
376        buf.type = ctx->type;
377        if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
378            memset(planes, 0, sizeof(planes));
379            buf.length = VIDEO_MAX_PLANES;
380            buf.m.planes = planes;
381        }
382
383        ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf);
384        if (ret) {
385            if (errno != EAGAIN) {
386                ctx->done = 1;
387                if (errno != EPIPE)
388                    av_log(logger(ctx), AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno (%s)\n",
389                        ctx->name, av_err2str(AVERROR(errno)));
390            }
391            return NULL;
392        }
393
394        if (ctx_to_m2mctx(ctx)->draining && !V4L2_TYPE_IS_OUTPUT(ctx->type)) {
395            int bytesused = V4L2_TYPE_IS_MULTIPLANAR(buf.type) ?
396                            buf.m.planes[0].bytesused : buf.bytesused;
397            if (bytesused == 0) {
398                ctx->done = 1;
399                return NULL;
400            }
401#ifdef V4L2_BUF_FLAG_LAST
402            if (buf.flags & V4L2_BUF_FLAG_LAST)
403                ctx->done = 1;
404#endif
405        }
406
407        avbuf = &ctx->buffers[buf.index];
408        avbuf->status = V4L2BUF_AVAILABLE;
409        avbuf->buf = buf;
410        if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
411            memcpy(avbuf->planes, planes, sizeof(planes));
412            avbuf->buf.m.planes = avbuf->planes;
413        }
414        return avbuf;
415    }
416
417    return NULL;
418}
419
420static V4L2Buffer* v4l2_getfree_v4l2buf(V4L2Context *ctx)
421{
422    int timeout = 0; /* return when no more buffers to dequeue */
423    int i;
424
425    /* get back as many output buffers as possible */
426    if (V4L2_TYPE_IS_OUTPUT(ctx->type)) {
427          do {
428          } while (v4l2_dequeue_v4l2buf(ctx, timeout));
429    }
430
431    for (i = 0; i < ctx->num_buffers; i++) {
432        if (ctx->buffers[i].status == V4L2BUF_AVAILABLE)
433            return &ctx->buffers[i];
434    }
435
436    return NULL;
437}
438
439static int v4l2_release_buffers(V4L2Context* ctx)
440{
441    struct v4l2_requestbuffers req = {
442        .memory = V4L2_MEMORY_MMAP,
443        .type = ctx->type,
444        .count = 0, /* 0 -> unmaps buffers from the driver */
445    };
446    int i, j;
447
448    for (i = 0; i < ctx->num_buffers; i++) {
449        V4L2Buffer *buffer = &ctx->buffers[i];
450
451        for (j = 0; j < buffer->num_planes; j++) {
452            struct V4L2Plane_info *p = &buffer->plane_info[j];
453            if (p->mm_addr && p->length)
454                if (munmap(p->mm_addr, p->length) < 0)
455                    av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane (%s))\n", ctx->name, av_err2str(AVERROR(errno)));
456        }
457    }
458
459    return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req);
460}
461
462static inline int v4l2_try_raw_format(V4L2Context* ctx, enum AVPixelFormat pixfmt)
463{
464    struct v4l2_format *fmt = &ctx->format;
465    uint32_t v4l2_fmt;
466    int ret;
467
468    v4l2_fmt = ff_v4l2_format_avfmt_to_v4l2(pixfmt);
469    if (!v4l2_fmt)
470        return AVERROR(EINVAL);
471
472    if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type))
473        fmt->fmt.pix_mp.pixelformat = v4l2_fmt;
474    else
475        fmt->fmt.pix.pixelformat = v4l2_fmt;
476
477    fmt->type = ctx->type;
478
479    ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_TRY_FMT, fmt);
480    if (ret)
481        return AVERROR(EINVAL);
482
483    return 0;
484}
485
486static int v4l2_get_raw_format(V4L2Context* ctx, enum AVPixelFormat *p)
487{
488    enum AVPixelFormat pixfmt = ctx->av_pix_fmt;
489    struct v4l2_fmtdesc fdesc;
490    int ret;
491
492    memset(&fdesc, 0, sizeof(fdesc));
493    fdesc.type = ctx->type;
494
495    if (pixfmt != AV_PIX_FMT_NONE) {
496        ret = v4l2_try_raw_format(ctx, pixfmt);
497        if (!ret)
498            return 0;
499    }
500
501    for (;;) {
502        ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENUM_FMT, &fdesc);
503        if (ret)
504            return AVERROR(EINVAL);
505
506        pixfmt = ff_v4l2_format_v4l2_to_avfmt(fdesc.pixelformat, AV_CODEC_ID_RAWVIDEO);
507        ret = v4l2_try_raw_format(ctx, pixfmt);
508        if (ret){
509            fdesc.index++;
510            continue;
511        }
512
513        *p = pixfmt;
514
515        return 0;
516    }
517
518    return AVERROR(EINVAL);
519}
520
521static int v4l2_get_coded_format(V4L2Context* ctx, uint32_t *p)
522{
523    struct v4l2_fmtdesc fdesc;
524    uint32_t v4l2_fmt;
525    int ret;
526
527    /* translate to a valid v4l2 format */
528    v4l2_fmt = ff_v4l2_format_avcodec_to_v4l2(ctx->av_codec_id);
529    if (!v4l2_fmt)
530        return AVERROR(EINVAL);
531
532    /* check if the driver supports this format */
533    memset(&fdesc, 0, sizeof(fdesc));
534    fdesc.type = ctx->type;
535
536    for (;;) {
537        ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENUM_FMT, &fdesc);
538        if (ret)
539            return AVERROR(EINVAL);
540
541        if (fdesc.pixelformat == v4l2_fmt)
542            break;
543
544        fdesc.index++;
545    }
546
547    *p = v4l2_fmt;
548
549    return 0;
550}
551
552 /*****************************************************************************
553  *
554  *             V4L2 Context Interface
555  *
556  *****************************************************************************/
557
558int ff_v4l2_context_set_status(V4L2Context* ctx, uint32_t cmd)
559{
560    int type = ctx->type;
561    int ret;
562
563    ret = ioctl(ctx_to_m2mctx(ctx)->fd, cmd, &type);
564    if (ret < 0)
565        return AVERROR(errno);
566
567    ctx->streamon = (cmd == VIDIOC_STREAMON);
568
569    return 0;
570}
571
572int ff_v4l2_context_enqueue_frame(V4L2Context* ctx, const AVFrame* frame)
573{
574    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
575    V4L2Buffer* avbuf;
576    int ret;
577
578    if (!frame) {
579        ret = v4l2_stop_encode(ctx);
580        if (ret)
581            av_log(logger(ctx), AV_LOG_ERROR, "%s stop_encode\n", ctx->name);
582        s->draining= 1;
583        return 0;
584    }
585
586    avbuf = v4l2_getfree_v4l2buf(ctx);
587    if (!avbuf)
588        return AVERROR(EAGAIN);
589
590    ret = ff_v4l2_buffer_avframe_to_buf(frame, avbuf);
591    if (ret)
592        return ret;
593
594    return ff_v4l2_buffer_enqueue(avbuf);
595}
596
597int ff_v4l2_context_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt)
598{
599    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
600    V4L2Buffer* avbuf;
601    int ret;
602
603    if (!pkt->size) {
604        ret = v4l2_stop_decode(ctx);
605        if (ret)
606            av_log(logger(ctx), AV_LOG_ERROR, "%s stop_decode\n", ctx->name);
607        s->draining = 1;
608        return 0;
609    }
610
611    avbuf = v4l2_getfree_v4l2buf(ctx);
612    if (!avbuf)
613        return AVERROR(EAGAIN);
614
615    ret = ff_v4l2_buffer_avpkt_to_buf(pkt, avbuf);
616    if (ret)
617        return ret;
618
619    return ff_v4l2_buffer_enqueue(avbuf);
620}
621
622int ff_v4l2_context_dequeue_frame(V4L2Context* ctx, AVFrame* frame, int timeout)
623{
624    V4L2Buffer *avbuf;
625
626    /*
627     * timeout=-1 blocks until:
628     *  1. decoded frame available
629     *  2. an input buffer is ready to be dequeued
630     */
631    avbuf = v4l2_dequeue_v4l2buf(ctx, timeout);
632    if (!avbuf) {
633        if (ctx->done)
634            return AVERROR_EOF;
635
636        return AVERROR(EAGAIN);
637    }
638
639    return ff_v4l2_buffer_buf_to_avframe(frame, avbuf);
640}
641
642int ff_v4l2_context_dequeue_packet(V4L2Context* ctx, AVPacket* pkt)
643{
644    V4L2Buffer *avbuf;
645
646    /*
647     * blocks until:
648     *  1. encoded packet available
649     *  2. an input buffer ready to be dequeued
650     */
651    avbuf = v4l2_dequeue_v4l2buf(ctx, -1);
652    if (!avbuf) {
653        if (ctx->done)
654            return AVERROR_EOF;
655
656        return AVERROR(EAGAIN);
657    }
658
659    return ff_v4l2_buffer_buf_to_avpkt(pkt, avbuf);
660}
661
662int ff_v4l2_context_get_format(V4L2Context* ctx, int probe)
663{
664    struct v4l2_format_update fmt = { 0 };
665    int ret;
666
667    if  (ctx->av_codec_id == AV_CODEC_ID_RAWVIDEO) {
668        ret = v4l2_get_raw_format(ctx, &fmt.av_fmt);
669        if (ret)
670            return ret;
671
672        fmt.update_avfmt = !probe;
673        v4l2_save_to_context(ctx, &fmt);
674
675        /* format has been tried already */
676        return ret;
677    }
678
679    ret = v4l2_get_coded_format(ctx, &fmt.v4l2_fmt);
680    if (ret)
681        return ret;
682
683    fmt.update_v4l2 = 1;
684    v4l2_save_to_context(ctx, &fmt);
685
686    return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_TRY_FMT, &ctx->format);
687}
688
689int ff_v4l2_context_set_format(V4L2Context* ctx)
690{
691    return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format);
692}
693
694void ff_v4l2_context_release(V4L2Context* ctx)
695{
696    int ret;
697
698    if (!ctx->buffers)
699        return;
700
701    ret = v4l2_release_buffers(ctx);
702    if (ret)
703        av_log(logger(ctx), AV_LOG_WARNING, "V4L2 failed to unmap the %s buffers\n", ctx->name);
704
705    av_freep(&ctx->buffers);
706}
707
708int ff_v4l2_context_init(V4L2Context* ctx)
709{
710    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
711    struct v4l2_requestbuffers req;
712    int ret, i;
713
714    if (!v4l2_type_supported(ctx)) {
715        av_log(logger(ctx), AV_LOG_ERROR, "type %i not supported\n", ctx->type);
716        return AVERROR_PATCHWELCOME;
717    }
718
719    ret = ioctl(s->fd, VIDIOC_G_FMT, &ctx->format);
720    if (ret)
721        av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", ctx->name);
722
723    memset(&req, 0, sizeof(req));
724    req.count = ctx->num_buffers;
725    req.memory = V4L2_MEMORY_MMAP;
726    req.type = ctx->type;
727    ret = ioctl(s->fd, VIDIOC_REQBUFS, &req);
728    if (ret < 0) {
729        av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_REQBUFS failed: %s\n", ctx->name, strerror(errno));
730        return AVERROR(errno);
731    }
732
733    ctx->num_buffers = req.count;
734    ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer));
735    if (!ctx->buffers) {
736        av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name);
737        return AVERROR(ENOMEM);
738    }
739
740    for (i = 0; i < req.count; i++) {
741        ctx->buffers[i].context = ctx;
742        ret = ff_v4l2_buffer_initialize(&ctx->buffers[i], i);
743        if (ret < 0) {
744            av_log(logger(ctx), AV_LOG_ERROR, "%s buffer[%d] initialization (%s)\n", ctx->name, i, av_err2str(ret));
745            goto error;
746        }
747    }
748
749    av_log(logger(ctx), AV_LOG_DEBUG, "%s: %s %02d buffers initialized: %04ux%04u, sizeimage %08u, bytesperline %08u\n", ctx->name,
750        V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? av_fourcc2str(ctx->format.fmt.pix_mp.pixelformat) : av_fourcc2str(ctx->format.fmt.pix.pixelformat),
751        req.count,
752        v4l2_get_width(&ctx->format),
753        v4l2_get_height(&ctx->format),
754        V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage : ctx->format.fmt.pix.sizeimage,
755        V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline : ctx->format.fmt.pix.bytesperline);
756
757    return 0;
758
759error:
760    v4l2_release_buffers(ctx);
761
762    av_freep(&ctx->buffers);
763
764    return ret;
765}
766