xref: /third_party/ffmpeg/libavcodec/dxva2_av1.c (revision cabdff1a)
1/*
2 * DXVA2 AV1 HW acceleration.
3 *
4 * copyright (c) 2020 Hendrik Leppkes
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include "config_components.h"
24
25#include "libavutil/avassert.h"
26#include "libavutil/pixdesc.h"
27
28#include "dxva2_internal.h"
29#include "av1dec.h"
30
31#define MAX_TILES 256
32
33struct AV1DXVAContext {
34    FFDXVASharedContext shared;
35
36    unsigned int bitstream_allocated;
37    uint8_t *bitstream_cache;
38};
39
40struct av1_dxva2_picture_context {
41    DXVA_PicParams_AV1    pp;
42    unsigned              tile_count;
43    DXVA_Tile_AV1         tiles[MAX_TILES];
44    uint8_t              *bitstream;
45    unsigned              bitstream_size;
46};
47
48static int get_bit_depth_from_seq(const AV1RawSequenceHeader *seq)
49{
50    if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
51        return seq->color_config.twelve_bit ? 12 : 10;
52    else if (seq->seq_profile <= 2 && seq->color_config.high_bitdepth)
53        return 10;
54    else
55        return 8;
56}
57
58static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, const AV1DecContext *h,
59                                    DXVA_PicParams_AV1 *pp)
60{
61    int i,j, uses_lr;
62    const AV1RawSequenceHeader *seq = h->raw_seq;
63    const AV1RawFrameHeader *frame_header = h->raw_frame_header;
64    const AV1RawFilmGrainParams *film_grain = &h->cur_frame.film_grain;
65
66    unsigned char remap_lr_type[4] = { AV1_RESTORE_NONE, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ };
67    int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
68
69    memset(pp, 0, sizeof(*pp));
70
71    pp->width  = avctx->width;
72    pp->height = avctx->height;
73
74    pp->max_width  = seq->max_frame_width_minus_1 + 1;
75    pp->max_height = seq->max_frame_height_minus_1 + 1;
76
77    pp->CurrPicTextureIndex = ff_dxva2_get_surface_index(avctx, ctx, h->cur_frame.f);
78    pp->superres_denom      = frame_header->use_superres ? frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN : AV1_SUPERRES_NUM;
79    pp->bitdepth            = get_bit_depth_from_seq(seq);
80    pp->seq_profile         = seq->seq_profile;
81
82    /* Tiling info */
83    pp->tiles.cols = frame_header->tile_cols;
84    pp->tiles.rows = frame_header->tile_rows;
85    pp->tiles.context_update_id = frame_header->context_update_tile_id;
86
87    for (i = 0; i < pp->tiles.cols; i++)
88        pp->tiles.widths[i] = frame_header->width_in_sbs_minus_1[i] + 1;
89
90    for (i = 0; i < pp->tiles.rows; i++)
91        pp->tiles.heights[i] = frame_header->height_in_sbs_minus_1[i] + 1;
92
93    /* Coding tools */
94    pp->coding.use_128x128_superblock       = seq->use_128x128_superblock;
95    pp->coding.intra_edge_filter            = seq->enable_intra_edge_filter;
96    pp->coding.interintra_compound          = seq->enable_interintra_compound;
97    pp->coding.masked_compound              = seq->enable_masked_compound;
98    pp->coding.warped_motion                = frame_header->allow_warped_motion;
99    pp->coding.dual_filter                  = seq->enable_dual_filter;
100    pp->coding.jnt_comp                     = seq->enable_jnt_comp;
101    pp->coding.screen_content_tools         = frame_header->allow_screen_content_tools;
102    pp->coding.integer_mv                   = frame_header->force_integer_mv || !(frame_header->frame_type & 1);
103    pp->coding.cdef                         = seq->enable_cdef;
104    pp->coding.restoration                  = seq->enable_restoration;
105    pp->coding.film_grain                   = seq->film_grain_params_present && !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
106    pp->coding.intrabc                      = frame_header->allow_intrabc;
107    pp->coding.high_precision_mv            = frame_header->allow_high_precision_mv;
108    pp->coding.switchable_motion_mode       = frame_header->is_motion_mode_switchable;
109    pp->coding.filter_intra                 = seq->enable_filter_intra;
110    pp->coding.disable_frame_end_update_cdf = frame_header->disable_frame_end_update_cdf;
111    pp->coding.disable_cdf_update           = frame_header->disable_cdf_update;
112    pp->coding.reference_mode               = frame_header->reference_select;
113    pp->coding.skip_mode                    = frame_header->skip_mode_present;
114    pp->coding.reduced_tx_set               = frame_header->reduced_tx_set;
115    pp->coding.superres                     = frame_header->use_superres;
116    pp->coding.tx_mode                      = frame_header->tx_mode;
117    pp->coding.use_ref_frame_mvs            = frame_header->use_ref_frame_mvs;
118    pp->coding.enable_ref_frame_mvs         = seq->enable_ref_frame_mvs;
119    pp->coding.reference_frame_update       = 1; // 0 for show_existing_frame with key frames, but those are not passed to the hwaccel
120
121    /* Format & Picture Info flags */
122    pp->format.frame_type     = frame_header->frame_type;
123    pp->format.show_frame     = frame_header->show_frame;
124    pp->format.showable_frame = frame_header->showable_frame;
125    pp->format.subsampling_x  = seq->color_config.subsampling_x;
126    pp->format.subsampling_y  = seq->color_config.subsampling_y;
127    pp->format.mono_chrome    = seq->color_config.mono_chrome;
128
129    /* References */
130    pp->primary_ref_frame = frame_header->primary_ref_frame;
131    pp->order_hint        = frame_header->order_hint;
132    pp->order_hint_bits   = seq->enable_order_hint ? seq->order_hint_bits_minus_1 + 1 : 0;
133
134    memset(pp->RefFrameMapTextureIndex, 0xFF, sizeof(pp->RefFrameMapTextureIndex));
135    for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
136        int8_t ref_idx = frame_header->ref_frame_idx[i];
137        AVFrame *ref_frame = h->ref[ref_idx].f;
138
139        pp->frame_refs[i].width  = ref_frame->width;
140        pp->frame_refs[i].height = ref_frame->height;
141        pp->frame_refs[i].Index  = ref_frame->buf[0] ? ref_idx : 0xFF;
142
143        /* Global Motion */
144        pp->frame_refs[i].wminvalid = h->cur_frame.gm_invalid[AV1_REF_FRAME_LAST + i];
145        pp->frame_refs[i].wmtype    = h->cur_frame.gm_type[AV1_REF_FRAME_LAST + i];
146        for (j = 0; j < 6; ++j) {
147             pp->frame_refs[i].wmmat[j] = h->cur_frame.gm_params[AV1_REF_FRAME_LAST + i][j];
148        }
149    }
150    for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
151        AVFrame *ref_frame = h->ref[i].f;
152        if (ref_frame->buf[0])
153            pp->RefFrameMapTextureIndex[i] = ff_dxva2_get_surface_index(avctx, ctx, ref_frame);
154    }
155
156    /* Loop filter parameters */
157    pp->loop_filter.filter_level[0]        = frame_header->loop_filter_level[0];
158    pp->loop_filter.filter_level[1]        = frame_header->loop_filter_level[1];
159    pp->loop_filter.filter_level_u         = frame_header->loop_filter_level[2];
160    pp->loop_filter.filter_level_v         = frame_header->loop_filter_level[3];
161    pp->loop_filter.sharpness_level        = frame_header->loop_filter_sharpness;
162    pp->loop_filter.mode_ref_delta_enabled = frame_header->loop_filter_delta_enabled;
163    pp->loop_filter.mode_ref_delta_update  = frame_header->loop_filter_delta_update;
164    pp->loop_filter.delta_lf_multi         = frame_header->delta_lf_multi;
165    pp->loop_filter.delta_lf_present       = frame_header->delta_lf_present;
166    pp->loop_filter.delta_lf_res           = frame_header->delta_lf_res;
167
168    for (i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) {
169        pp->loop_filter.ref_deltas[i] = frame_header->loop_filter_ref_deltas[i];
170    }
171
172    pp->loop_filter.mode_deltas[0]                = frame_header->loop_filter_mode_deltas[0];
173    pp->loop_filter.mode_deltas[1]                = frame_header->loop_filter_mode_deltas[1];
174    pp->loop_filter.frame_restoration_type[0]     = remap_lr_type[frame_header->lr_type[0]];
175    pp->loop_filter.frame_restoration_type[1]     = remap_lr_type[frame_header->lr_type[1]];
176    pp->loop_filter.frame_restoration_type[2]     = remap_lr_type[frame_header->lr_type[2]];
177    uses_lr = frame_header->lr_type[0] || frame_header->lr_type[1] || frame_header->lr_type[2];
178    pp->loop_filter.log2_restoration_unit_size[0] = uses_lr ? (6 + frame_header->lr_unit_shift) : 8;
179    pp->loop_filter.log2_restoration_unit_size[1] = uses_lr ? (6 + frame_header->lr_unit_shift - frame_header->lr_uv_shift) : 8;
180    pp->loop_filter.log2_restoration_unit_size[2] = uses_lr ? (6 + frame_header->lr_unit_shift - frame_header->lr_uv_shift) : 8;
181
182    /* Quantization */
183    pp->quantization.delta_q_present = frame_header->delta_q_present;
184    pp->quantization.delta_q_res     = frame_header->delta_q_res;
185    pp->quantization.base_qindex     = frame_header->base_q_idx;
186    pp->quantization.y_dc_delta_q    = frame_header->delta_q_y_dc;
187    pp->quantization.u_dc_delta_q    = frame_header->delta_q_u_dc;
188    pp->quantization.v_dc_delta_q    = frame_header->delta_q_v_dc;
189    pp->quantization.u_ac_delta_q    = frame_header->delta_q_u_ac;
190    pp->quantization.v_ac_delta_q    = frame_header->delta_q_v_ac;
191    pp->quantization.qm_y            = frame_header->using_qmatrix ? frame_header->qm_y : 0xFF;
192    pp->quantization.qm_u            = frame_header->using_qmatrix ? frame_header->qm_u : 0xFF;
193    pp->quantization.qm_v            = frame_header->using_qmatrix ? frame_header->qm_v : 0xFF;
194
195    /* Cdef parameters */
196    pp->cdef.damping = frame_header->cdef_damping_minus_3;
197    pp->cdef.bits    = frame_header->cdef_bits;
198    for (i = 0; i < 8; i++) {
199        pp->cdef.y_strengths[i].primary    = frame_header->cdef_y_pri_strength[i];
200        pp->cdef.y_strengths[i].secondary  = frame_header->cdef_y_sec_strength[i];
201        pp->cdef.uv_strengths[i].primary   = frame_header->cdef_uv_pri_strength[i];
202        pp->cdef.uv_strengths[i].secondary = frame_header->cdef_uv_sec_strength[i];
203    }
204
205    /* Misc flags */
206    pp->interp_filter = frame_header->interpolation_filter;
207
208    /* Segmentation */
209    pp->segmentation.enabled         = frame_header->segmentation_enabled;
210    pp->segmentation.update_map      = frame_header->segmentation_update_map;
211    pp->segmentation.update_data     = frame_header->segmentation_update_data;
212    pp->segmentation.temporal_update = frame_header->segmentation_temporal_update;
213    for (i = 0; i < AV1_MAX_SEGMENTS; i++) {
214        for (j = 0; j < AV1_SEG_LVL_MAX; j++) {
215            pp->segmentation.feature_mask[i].mask |= frame_header->feature_enabled[i][j] << j;
216            pp->segmentation.feature_data[i][j]    = frame_header->feature_value[i][j];
217        }
218    }
219
220    /* Film grain */
221    if (apply_grain) {
222        pp->film_grain.apply_grain              = 1;
223        pp->film_grain.scaling_shift_minus8     = film_grain->grain_scaling_minus_8;
224        pp->film_grain.chroma_scaling_from_luma = film_grain->chroma_scaling_from_luma;
225        pp->film_grain.ar_coeff_lag             = film_grain->ar_coeff_lag;
226        pp->film_grain.ar_coeff_shift_minus6    = film_grain->ar_coeff_shift_minus_6;
227        pp->film_grain.grain_scale_shift        = film_grain->grain_scale_shift;
228        pp->film_grain.overlap_flag             = film_grain->overlap_flag;
229        pp->film_grain.clip_to_restricted_range = film_grain->clip_to_restricted_range;
230        pp->film_grain.matrix_coeff_is_identity = (seq->color_config.matrix_coefficients == AVCOL_SPC_RGB);
231
232        pp->film_grain.grain_seed               = film_grain->grain_seed;
233        pp->film_grain.num_y_points             = film_grain->num_y_points;
234        for (i = 0; i < film_grain->num_y_points; i++) {
235            pp->film_grain.scaling_points_y[i][0] = film_grain->point_y_value[i];
236            pp->film_grain.scaling_points_y[i][1] = film_grain->point_y_scaling[i];
237        }
238        pp->film_grain.num_cb_points            = film_grain->num_cb_points;
239        for (i = 0; i < film_grain->num_cb_points; i++) {
240            pp->film_grain.scaling_points_cb[i][0] = film_grain->point_cb_value[i];
241            pp->film_grain.scaling_points_cb[i][1] = film_grain->point_cb_scaling[i];
242        }
243        pp->film_grain.num_cr_points            = film_grain->num_cr_points;
244        for (i = 0; i < film_grain->num_cr_points; i++) {
245            pp->film_grain.scaling_points_cr[i][0] = film_grain->point_cr_value[i];
246            pp->film_grain.scaling_points_cr[i][1] = film_grain->point_cr_scaling[i];
247        }
248        for (i = 0; i < 24; i++) {
249            pp->film_grain.ar_coeffs_y[i] = film_grain->ar_coeffs_y_plus_128[i];
250        }
251        for (i = 0; i < 25; i++) {
252            pp->film_grain.ar_coeffs_cb[i] = film_grain->ar_coeffs_cb_plus_128[i];
253            pp->film_grain.ar_coeffs_cr[i] = film_grain->ar_coeffs_cr_plus_128[i];
254        }
255        pp->film_grain.cb_mult      = film_grain->cb_mult;
256        pp->film_grain.cb_luma_mult = film_grain->cb_luma_mult;
257        pp->film_grain.cr_mult      = film_grain->cr_mult;
258        pp->film_grain.cr_luma_mult = film_grain->cr_luma_mult;
259        pp->film_grain.cb_offset    = film_grain->cb_offset;
260        pp->film_grain.cr_offset    = film_grain->cr_offset;
261        pp->film_grain.cr_offset    = film_grain->cr_offset;
262    }
263
264    // XXX: Setting the StatusReportFeedbackNumber breaks decoding on some drivers (tested on NVIDIA 457.09)
265    // Status Reporting is not used by FFmpeg, hence not providing a number does not cause any issues
266    //pp->StatusReportFeedbackNumber = 1 + DXVA_CONTEXT_REPORT_ID(avctx, ctx)++;
267    return 0;
268}
269
270static int dxva2_av1_start_frame(AVCodecContext *avctx,
271                                 av_unused const uint8_t *buffer,
272                                 av_unused uint32_t size)
273{
274    const AV1DecContext *h = avctx->priv_data;
275    AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
276    struct av1_dxva2_picture_context *ctx_pic = h->cur_frame.hwaccel_picture_private;
277
278    if (!DXVA_CONTEXT_VALID(avctx, ctx))
279        return -1;
280    av_assert0(ctx_pic);
281
282    /* Fill up DXVA_PicParams_AV1 */
283    if (fill_picture_parameters(avctx, ctx, h, &ctx_pic->pp) < 0)
284        return -1;
285
286    ctx_pic->bitstream_size = 0;
287    ctx_pic->bitstream      = NULL;
288    return 0;
289}
290
291static int dxva2_av1_decode_slice(AVCodecContext *avctx,
292                                  const uint8_t *buffer,
293                                  uint32_t size)
294{
295    const AV1DecContext *h = avctx->priv_data;
296    const AV1RawFrameHeader *frame_header = h->raw_frame_header;
297    struct av1_dxva2_picture_context *ctx_pic = h->cur_frame.hwaccel_picture_private;
298    struct AV1DXVAContext *ctx = avctx->internal->hwaccel_priv_data;
299    void *tmp;
300
301    ctx_pic->tile_count = frame_header->tile_cols * frame_header->tile_rows;
302
303    /* too many tiles, exceeding all defined levels in the AV1 spec */
304    if (ctx_pic->tile_count > MAX_TILES)
305        return AVERROR(ENOSYS);
306
307    /* Shortcut if all tiles are in the same buffer */
308    if (ctx_pic->tile_count == h->tg_end - h->tg_start + 1) {
309        ctx_pic->bitstream = (uint8_t *)buffer;
310        ctx_pic->bitstream_size = size;
311
312        for (uint32_t tile_num = 0; tile_num < ctx_pic->tile_count; tile_num++) {
313            ctx_pic->tiles[tile_num].DataOffset   = h->tile_group_info[tile_num].tile_offset;
314            ctx_pic->tiles[tile_num].DataSize     = h->tile_group_info[tile_num].tile_size;
315            ctx_pic->tiles[tile_num].row          = h->tile_group_info[tile_num].tile_row;
316            ctx_pic->tiles[tile_num].column       = h->tile_group_info[tile_num].tile_column;
317            ctx_pic->tiles[tile_num].anchor_frame = 0xFF;
318        }
319
320        return 0;
321    }
322
323    /* allocate an internal buffer */
324    tmp = av_fast_realloc(ctx->bitstream_cache, &ctx->bitstream_allocated,
325                          ctx_pic->bitstream_size + size);
326    if (!tmp) {
327        return AVERROR(ENOMEM);
328    }
329    ctx_pic->bitstream = ctx->bitstream_cache = tmp;
330
331    memcpy(ctx_pic->bitstream + ctx_pic->bitstream_size, buffer, size);
332
333    for (uint32_t tile_num = h->tg_start; tile_num <= h->tg_end; tile_num++) {
334        ctx_pic->tiles[tile_num].DataOffset   = ctx_pic->bitstream_size + h->tile_group_info[tile_num].tile_offset;
335        ctx_pic->tiles[tile_num].DataSize     = h->tile_group_info[tile_num].tile_size;
336        ctx_pic->tiles[tile_num].row          = h->tile_group_info[tile_num].tile_row;
337        ctx_pic->tiles[tile_num].column       = h->tile_group_info[tile_num].tile_column;
338        ctx_pic->tiles[tile_num].anchor_frame = 0xFF;
339    }
340
341    ctx_pic->bitstream_size += size;
342
343    return 0;
344}
345
346static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
347                                             DECODER_BUFFER_DESC *bs,
348                                             DECODER_BUFFER_DESC *sc)
349{
350    const AV1DecContext *h = avctx->priv_data;
351    AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
352    struct av1_dxva2_picture_context *ctx_pic = h->cur_frame.hwaccel_picture_private;
353    void     *dxva_data_ptr;
354    uint8_t  *dxva_data;
355    unsigned dxva_size;
356    unsigned padding;
357    unsigned type;
358
359#if CONFIG_D3D11VA
360    if (ff_dxva2_is_d3d11(avctx)) {
361        type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM;
362        if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
363                                                       D3D11VA_CONTEXT(ctx)->decoder,
364                                                       type,
365                                                       &dxva_size, &dxva_data_ptr)))
366            return -1;
367    }
368#endif
369#if CONFIG_DXVA2
370    if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
371        type = DXVA2_BitStreamDateBufferType;
372        if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder,
373                                                  type,
374                                                  &dxva_data_ptr, &dxva_size)))
375            return -1;
376    }
377#endif
378
379    dxva_data = dxva_data_ptr;
380
381    if (ctx_pic->bitstream_size > dxva_size) {
382        av_log(avctx, AV_LOG_ERROR, "Bitstream size exceeds hardware buffer");
383        return -1;
384    }
385
386    memcpy(dxva_data, ctx_pic->bitstream, ctx_pic->bitstream_size);
387
388    padding = FFMIN(128 - ((ctx_pic->bitstream_size) & 127), dxva_size - ctx_pic->bitstream_size);
389    if (padding > 0) {
390        memset(dxva_data + ctx_pic->bitstream_size, 0, padding);
391        ctx_pic->bitstream_size += padding;
392    }
393
394#if CONFIG_D3D11VA
395    if (ff_dxva2_is_d3d11(avctx))
396        if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type)))
397            return -1;
398#endif
399#if CONFIG_DXVA2
400    if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
401        if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type)))
402            return -1;
403#endif
404
405#if CONFIG_D3D11VA
406    if (ff_dxva2_is_d3d11(avctx)) {
407        D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs;
408        memset(dsc11, 0, sizeof(*dsc11));
409        dsc11->BufferType           = type;
410        dsc11->DataSize             = ctx_pic->bitstream_size;
411        dsc11->NumMBsInBuffer       = 0;
412
413        type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL;
414    }
415#endif
416#if CONFIG_DXVA2
417    if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
418        DXVA2_DecodeBufferDesc *dsc2 = bs;
419        memset(dsc2, 0, sizeof(*dsc2));
420        dsc2->CompressedBufferType = type;
421        dsc2->DataSize             = ctx_pic->bitstream_size;
422        dsc2->NumMBsInBuffer       = 0;
423
424        type = DXVA2_SliceControlBufferType;
425    }
426#endif
427
428    return ff_dxva2_commit_buffer(avctx, ctx, sc, type,
429                                  ctx_pic->tiles, sizeof(*ctx_pic->tiles) * ctx_pic->tile_count, 0);
430}
431
432static int dxva2_av1_end_frame(AVCodecContext *avctx)
433{
434    const AV1DecContext *h = avctx->priv_data;
435    struct av1_dxva2_picture_context *ctx_pic = h->cur_frame.hwaccel_picture_private;
436    int ret;
437
438    if (ctx_pic->bitstream_size <= 0)
439        return -1;
440
441    ret = ff_dxva2_common_end_frame(avctx, h->cur_frame.f,
442                                    &ctx_pic->pp, sizeof(ctx_pic->pp),
443                                    NULL, 0,
444                                    commit_bitstream_and_slice_buffer);
445
446    return ret;
447}
448
449static int dxva2_av1_uninit(AVCodecContext *avctx)
450{
451    struct AV1DXVAContext *ctx = avctx->internal->hwaccel_priv_data;
452
453    av_freep(&ctx->bitstream_cache);
454    ctx->bitstream_allocated = 0;
455
456    return ff_dxva2_decode_uninit(avctx);
457}
458
459#if CONFIG_AV1_DXVA2_HWACCEL
460const AVHWAccel ff_av1_dxva2_hwaccel = {
461    .name           = "av1_dxva2",
462    .type           = AVMEDIA_TYPE_VIDEO,
463    .id             = AV_CODEC_ID_AV1,
464    .pix_fmt        = AV_PIX_FMT_DXVA2_VLD,
465    .init           = ff_dxva2_decode_init,
466    .uninit         = dxva2_av1_uninit,
467    .start_frame    = dxva2_av1_start_frame,
468    .decode_slice   = dxva2_av1_decode_slice,
469    .end_frame      = dxva2_av1_end_frame,
470    .frame_params   = ff_dxva2_common_frame_params,
471    .frame_priv_data_size = sizeof(struct av1_dxva2_picture_context),
472    .priv_data_size = sizeof(struct AV1DXVAContext),
473};
474#endif
475
476#if CONFIG_AV1_D3D11VA_HWACCEL
477const AVHWAccel ff_av1_d3d11va_hwaccel = {
478    .name           = "av1_d3d11va",
479    .type           = AVMEDIA_TYPE_VIDEO,
480    .id             = AV_CODEC_ID_AV1,
481    .pix_fmt        = AV_PIX_FMT_D3D11VA_VLD,
482    .init           = ff_dxva2_decode_init,
483    .uninit         = dxva2_av1_uninit,
484    .start_frame    = dxva2_av1_start_frame,
485    .decode_slice   = dxva2_av1_decode_slice,
486    .end_frame      = dxva2_av1_end_frame,
487    .frame_params   = ff_dxva2_common_frame_params,
488    .frame_priv_data_size = sizeof(struct av1_dxva2_picture_context),
489    .priv_data_size = sizeof(struct AV1DXVAContext),
490};
491#endif
492
493#if CONFIG_AV1_D3D11VA2_HWACCEL
494const AVHWAccel ff_av1_d3d11va2_hwaccel = {
495    .name           = "av1_d3d11va2",
496    .type           = AVMEDIA_TYPE_VIDEO,
497    .id             = AV_CODEC_ID_AV1,
498    .pix_fmt        = AV_PIX_FMT_D3D11,
499    .init           = ff_dxva2_decode_init,
500    .uninit         = dxva2_av1_uninit,
501    .start_frame    = dxva2_av1_start_frame,
502    .decode_slice   = dxva2_av1_decode_slice,
503    .end_frame      = dxva2_av1_end_frame,
504    .frame_params   = ff_dxva2_common_frame_params,
505    .frame_priv_data_size = sizeof(struct av1_dxva2_picture_context),
506    .priv_data_size = sizeof(struct AV1DXVAContext),
507};
508#endif
509