1 /*
2 * DXVA2 VP9 HW acceleration.
3 *
4 * copyright (c) 2015 Hendrik Leppkes
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "config_components.h"
24
25 #include "libavutil/avassert.h"
26 #include "libavutil/pixdesc.h"
27
28 #include "dxva2_internal.h"
29 #include "vp9shared.h"
30
31 struct vp9_dxva2_picture_context {
32 DXVA_PicParams_VP9 pp;
33 DXVA_Slice_VPx_Short slice;
34 const uint8_t *bitstream;
35 unsigned bitstream_size;
36 };
37
fill_picture_entry(DXVA_PicEntry_VPx *pic, unsigned index, unsigned flag)38 static void fill_picture_entry(DXVA_PicEntry_VPx *pic,
39 unsigned index, unsigned flag)
40 {
41 av_assert0((index & 0x7f) == index && (flag & 0x01) == flag);
42 pic->bPicEntry = index | (flag << 7);
43 }
44
fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, const VP9SharedContext *h, DXVA_PicParams_VP9 *pp)45 static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, const VP9SharedContext *h,
46 DXVA_PicParams_VP9 *pp)
47 {
48 int i;
49 const AVPixFmtDescriptor * pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
50
51 if (!pixdesc)
52 return -1;
53
54 memset(pp, 0, sizeof(*pp));
55
56 fill_picture_entry(&pp->CurrPic, ff_dxva2_get_surface_index(avctx, ctx, h->frames[CUR_FRAME].tf.f), 0);
57
58 pp->profile = h->h.profile;
59 pp->wFormatAndPictureInfoFlags = ((h->h.keyframe == 0) << 0) |
60 ((h->h.invisible == 0) << 1) |
61 (h->h.errorres << 2) |
62 (pixdesc->log2_chroma_w << 3) | /* subsampling_x */
63 (pixdesc->log2_chroma_h << 4) | /* subsampling_y */
64 (0 << 5) | /* extra_plane */
65 (h->h.refreshctx << 6) |
66 (h->h.parallelmode << 7) |
67 (h->h.intraonly << 8) |
68 (h->h.framectxid << 9) |
69 (h->h.resetctx << 11) |
70 ((h->h.keyframe ? 0 : h->h.highprecisionmvs) << 13) |
71 (0 << 14); /* ReservedFormatInfo2Bits */
72
73 pp->width = avctx->width;
74 pp->height = avctx->height;
75 pp->BitDepthMinus8Luma = pixdesc->comp[0].depth - 8;
76 pp->BitDepthMinus8Chroma = pixdesc->comp[1].depth - 8;
77 /* swap 0/1 to match the reference */
78 pp->interp_filter = h->h.filtermode ^ (h->h.filtermode <= 1);
79 pp->Reserved8Bits = 0;
80
81 for (i = 0; i < 8; i++) {
82 if (h->refs[i].f->buf[0]) {
83 fill_picture_entry(&pp->ref_frame_map[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[i].f), 0);
84 pp->ref_frame_coded_width[i] = h->refs[i].f->width;
85 pp->ref_frame_coded_height[i] = h->refs[i].f->height;
86 } else
87 pp->ref_frame_map[i].bPicEntry = 0xFF;
88 }
89
90 for (i = 0; i < 3; i++) {
91 uint8_t refidx = h->h.refidx[i];
92 if (h->refs[refidx].f->buf[0])
93 fill_picture_entry(&pp->frame_refs[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[refidx].f), 0);
94 else
95 pp->frame_refs[i].bPicEntry = 0xFF;
96
97 pp->ref_frame_sign_bias[i + 1] = h->h.signbias[i];
98 }
99
100 pp->filter_level = h->h.filter.level;
101 pp->sharpness_level = h->h.filter.sharpness;
102
103 pp->wControlInfoFlags = (h->h.lf_delta.enabled << 0) |
104 (h->h.lf_delta.updated << 1) |
105 (h->h.use_last_frame_mvs << 2) |
106 (0 << 3); /* ReservedControlInfo5Bits */
107
108 for (i = 0; i < 4; i++)
109 pp->ref_deltas[i] = h->h.lf_delta.ref[i];
110
111 for (i = 0; i < 2; i++)
112 pp->mode_deltas[i] = h->h.lf_delta.mode[i];
113
114 pp->base_qindex = h->h.yac_qi;
115 pp->y_dc_delta_q = h->h.ydc_qdelta;
116 pp->uv_dc_delta_q = h->h.uvdc_qdelta;
117 pp->uv_ac_delta_q = h->h.uvac_qdelta;
118
119 /* segmentation data */
120 pp->stVP9Segments.wSegmentInfoFlags = (h->h.segmentation.enabled << 0) |
121 (h->h.segmentation.update_map << 1) |
122 (h->h.segmentation.temporal << 2) |
123 (h->h.segmentation.absolute_vals << 3) |
124 (0 << 4); /* ReservedSegmentFlags4Bits */
125
126 for (i = 0; i < 7; i++)
127 pp->stVP9Segments.tree_probs[i] = h->h.segmentation.prob[i];
128
129 if (h->h.segmentation.temporal)
130 for (i = 0; i < 3; i++)
131 pp->stVP9Segments.pred_probs[i] = h->h.segmentation.pred_prob[i];
132 else
133 memset(pp->stVP9Segments.pred_probs, 255, sizeof(pp->stVP9Segments.pred_probs));
134
135 for (i = 0; i < 8; i++) {
136 pp->stVP9Segments.feature_mask[i] = (h->h.segmentation.feat[i].q_enabled << 0) |
137 (h->h.segmentation.feat[i].lf_enabled << 1) |
138 (h->h.segmentation.feat[i].ref_enabled << 2) |
139 (h->h.segmentation.feat[i].skip_enabled << 3);
140
141 pp->stVP9Segments.feature_data[i][0] = h->h.segmentation.feat[i].q_val;
142 pp->stVP9Segments.feature_data[i][1] = h->h.segmentation.feat[i].lf_val;
143 pp->stVP9Segments.feature_data[i][2] = h->h.segmentation.feat[i].ref_val;
144 pp->stVP9Segments.feature_data[i][3] = 0; /* no data for skip */
145 }
146
147 pp->log2_tile_cols = h->h.tiling.log2_tile_cols;
148 pp->log2_tile_rows = h->h.tiling.log2_tile_rows;
149
150 pp->uncompressed_header_size_byte_aligned = h->h.uncompressed_header_size;
151 pp->first_partition_size = h->h.compressed_header_size;
152
153 pp->StatusReportFeedbackNumber = 1 + DXVA_CONTEXT_REPORT_ID(avctx, ctx)++;
154 return 0;
155 }
156
fill_slice_short(DXVA_Slice_VPx_Short *slice, unsigned position, unsigned size)157 static void fill_slice_short(DXVA_Slice_VPx_Short *slice,
158 unsigned position, unsigned size)
159 {
160 memset(slice, 0, sizeof(*slice));
161 slice->BSNALunitDataLocation = position;
162 slice->SliceBytesInBuffer = size;
163 slice->wBadSliceChopping = 0;
164 }
165
commit_bitstream_and_slice_buffer(AVCodecContext *avctx, DECODER_BUFFER_DESC *bs, DECODER_BUFFER_DESC *sc)166 static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
167 DECODER_BUFFER_DESC *bs,
168 DECODER_BUFFER_DESC *sc)
169 {
170 const VP9SharedContext *h = avctx->priv_data;
171 AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
172 struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
173 void *dxva_data_ptr;
174 uint8_t *dxva_data;
175 unsigned dxva_size;
176 unsigned padding;
177 unsigned type;
178
179 #if CONFIG_D3D11VA
180 if (ff_dxva2_is_d3d11(avctx)) {
181 type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM;
182 if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
183 D3D11VA_CONTEXT(ctx)->decoder,
184 type,
185 &dxva_size, &dxva_data_ptr)))
186 return -1;
187 }
188 #endif
189 #if CONFIG_DXVA2
190 if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
191 type = DXVA2_BitStreamDateBufferType;
192 if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder,
193 type,
194 &dxva_data_ptr, &dxva_size)))
195 return -1;
196 }
197 #endif
198
199 dxva_data = dxva_data_ptr;
200
201 if (ctx_pic->slice.SliceBytesInBuffer > dxva_size) {
202 av_log(avctx, AV_LOG_ERROR, "Failed to build bitstream");
203 return -1;
204 }
205
206 memcpy(dxva_data, ctx_pic->bitstream, ctx_pic->slice.SliceBytesInBuffer);
207
208 padding = FFMIN(128 - ((ctx_pic->slice.SliceBytesInBuffer) & 127), dxva_size - ctx_pic->slice.SliceBytesInBuffer);
209 if (padding > 0) {
210 memset(dxva_data + ctx_pic->slice.SliceBytesInBuffer, 0, padding);
211 ctx_pic->slice.SliceBytesInBuffer += padding;
212 }
213
214 #if CONFIG_D3D11VA
215 if (ff_dxva2_is_d3d11(avctx))
216 if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type)))
217 return -1;
218 #endif
219 #if CONFIG_DXVA2
220 if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
221 if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type)))
222 return -1;
223 #endif
224
225 #if CONFIG_D3D11VA
226 if (ff_dxva2_is_d3d11(avctx)) {
227 D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs;
228 memset(dsc11, 0, sizeof(*dsc11));
229 dsc11->BufferType = type;
230 dsc11->DataSize = ctx_pic->slice.SliceBytesInBuffer;
231 dsc11->NumMBsInBuffer = 0;
232
233 type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL;
234 }
235 #endif
236 #if CONFIG_DXVA2
237 if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
238 DXVA2_DecodeBufferDesc *dsc2 = bs;
239 memset(dsc2, 0, sizeof(*dsc2));
240 dsc2->CompressedBufferType = type;
241 dsc2->DataSize = ctx_pic->slice.SliceBytesInBuffer;
242 dsc2->NumMBsInBuffer = 0;
243
244 type = DXVA2_SliceControlBufferType;
245 }
246 #endif
247
248 return ff_dxva2_commit_buffer(avctx, ctx, sc,
249 type,
250 &ctx_pic->slice, sizeof(ctx_pic->slice), 0);
251 }
252
253
dxva2_vp9_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)254 static int dxva2_vp9_start_frame(AVCodecContext *avctx,
255 av_unused const uint8_t *buffer,
256 av_unused uint32_t size)
257 {
258 const VP9SharedContext *h = avctx->priv_data;
259 AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
260 struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
261
262 if (!DXVA_CONTEXT_VALID(avctx, ctx))
263 return -1;
264 av_assert0(ctx_pic);
265
266 /* Fill up DXVA_PicParams_VP9 */
267 if (fill_picture_parameters(avctx, ctx, h, &ctx_pic->pp) < 0)
268 return -1;
269
270 ctx_pic->bitstream_size = 0;
271 ctx_pic->bitstream = NULL;
272 return 0;
273 }
274
dxva2_vp9_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)275 static int dxva2_vp9_decode_slice(AVCodecContext *avctx,
276 const uint8_t *buffer,
277 uint32_t size)
278 {
279 const VP9SharedContext *h = avctx->priv_data;
280 struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
281 unsigned position;
282
283 if (!ctx_pic->bitstream)
284 ctx_pic->bitstream = buffer;
285 ctx_pic->bitstream_size += size;
286
287 position = buffer - ctx_pic->bitstream;
288 fill_slice_short(&ctx_pic->slice, position, size);
289
290 return 0;
291 }
292
dxva2_vp9_end_frame(AVCodecContext *avctx)293 static int dxva2_vp9_end_frame(AVCodecContext *avctx)
294 {
295 VP9SharedContext *h = avctx->priv_data;
296 struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private;
297 int ret;
298
299 if (ctx_pic->bitstream_size <= 0)
300 return -1;
301
302 ret = ff_dxva2_common_end_frame(avctx, h->frames[CUR_FRAME].tf.f,
303 &ctx_pic->pp, sizeof(ctx_pic->pp),
304 NULL, 0,
305 commit_bitstream_and_slice_buffer);
306 return ret;
307 }
308
309 #if CONFIG_VP9_DXVA2_HWACCEL
310 const AVHWAccel ff_vp9_dxva2_hwaccel = {
311 .name = "vp9_dxva2",
312 .type = AVMEDIA_TYPE_VIDEO,
313 .id = AV_CODEC_ID_VP9,
314 .pix_fmt = AV_PIX_FMT_DXVA2_VLD,
315 .init = ff_dxva2_decode_init,
316 .uninit = ff_dxva2_decode_uninit,
317 .start_frame = dxva2_vp9_start_frame,
318 .decode_slice = dxva2_vp9_decode_slice,
319 .end_frame = dxva2_vp9_end_frame,
320 .frame_params = ff_dxva2_common_frame_params,
321 .frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
322 .priv_data_size = sizeof(FFDXVASharedContext),
323 };
324 #endif
325
326 #if CONFIG_VP9_D3D11VA_HWACCEL
327 const AVHWAccel ff_vp9_d3d11va_hwaccel = {
328 .name = "vp9_d3d11va",
329 .type = AVMEDIA_TYPE_VIDEO,
330 .id = AV_CODEC_ID_VP9,
331 .pix_fmt = AV_PIX_FMT_D3D11VA_VLD,
332 .init = ff_dxva2_decode_init,
333 .uninit = ff_dxva2_decode_uninit,
334 .start_frame = dxva2_vp9_start_frame,
335 .decode_slice = dxva2_vp9_decode_slice,
336 .end_frame = dxva2_vp9_end_frame,
337 .frame_params = ff_dxva2_common_frame_params,
338 .frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
339 .priv_data_size = sizeof(FFDXVASharedContext),
340 };
341 #endif
342
343 #if CONFIG_VP9_D3D11VA2_HWACCEL
344 const AVHWAccel ff_vp9_d3d11va2_hwaccel = {
345 .name = "vp9_d3d11va2",
346 .type = AVMEDIA_TYPE_VIDEO,
347 .id = AV_CODEC_ID_VP9,
348 .pix_fmt = AV_PIX_FMT_D3D11,
349 .init = ff_dxva2_decode_init,
350 .uninit = ff_dxva2_decode_uninit,
351 .start_frame = dxva2_vp9_start_frame,
352 .decode_slice = dxva2_vp9_decode_slice,
353 .end_frame = dxva2_vp9_end_frame,
354 .frame_params = ff_dxva2_common_frame_params,
355 .frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context),
356 .priv_data_size = sizeof(FFDXVASharedContext),
357 };
358 #endif
359