1/**************************************************************************
2 *
3 * Copyright 2011 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/*
29 * Authors:
30 *	Christian König <christian.koenig@amd.com>
31 *
32 */
33
34#include <sys/types.h>
35#include <assert.h>
36#include <errno.h>
37#include <unistd.h>
38#include <stdio.h>
39
40#include "pipe/p_video_codec.h"
41
42#include "util/u_memory.h"
43#include "util/u_video.h"
44
45#include "vl/vl_defines.h"
46#include "vl/vl_mpeg12_decoder.h"
47
48#include "r600_pipe_common.h"
49#include "radeon_video.h"
50#include "radeon_uvd.h"
51
52#define NUM_BUFFERS 4
53
54#define NUM_MPEG2_REFS 6
55#define NUM_H264_REFS 17
56#define NUM_VC1_REFS 5
57
58#define FB_BUFFER_OFFSET 0x1000
59#define FB_BUFFER_SIZE 2048
60#define FB_BUFFER_SIZE_TONGA (2048 * 64)
61#define IT_SCALING_TABLE_SIZE 992
62#define UVD_SESSION_CONTEXT_SIZE (128 * 1024)
63
64/* UVD decoder representation */
65struct ruvd_decoder {
66	struct pipe_video_codec		base;
67
68	ruvd_set_dtb			set_dtb;
69
70	unsigned			stream_handle;
71	unsigned			stream_type;
72	unsigned			frame_number;
73
74	struct pipe_screen		*screen;
75	struct radeon_winsys*		ws;
76	struct radeon_cmdbuf	cs;
77
78	unsigned			cur_buffer;
79
80	struct rvid_buffer		msg_fb_it_buffers[NUM_BUFFERS];
81	struct ruvd_msg			*msg;
82	uint32_t			*fb;
83	unsigned			fb_size;
84	uint8_t				*it;
85
86	struct rvid_buffer		bs_buffers[NUM_BUFFERS];
87	void*				bs_ptr;
88	unsigned			bs_size;
89
90	struct rvid_buffer		dpb;
91	bool				use_legacy;
92	struct rvid_buffer		ctx;
93	struct rvid_buffer		sessionctx;
94	struct {
95		unsigned 		data0;
96		unsigned		data1;
97		unsigned		cmd;
98		unsigned		cntl;
99	} reg;
100};
101
102/* flush IB to the hardware */
103static int flush(struct ruvd_decoder *dec, unsigned flags)
104{
105	return dec->ws->cs_flush(&dec->cs, flags, NULL);
106}
107
108/* add a new set register command to the IB */
109static void set_reg(struct ruvd_decoder *dec, unsigned reg, uint32_t val)
110{
111	radeon_emit(&dec->cs, RUVD_PKT0(reg >> 2, 0));
112	radeon_emit(&dec->cs, val);
113}
114
115/* send a command to the VCPU through the GPCOM registers */
116static void send_cmd(struct ruvd_decoder *dec, unsigned cmd,
117		     struct pb_buffer* buf, uint32_t off,
118		     unsigned usage, enum radeon_bo_domain domain)
119{
120	int reloc_idx;
121
122	reloc_idx = dec->ws->cs_add_buffer(&dec->cs, buf, usage | RADEON_USAGE_SYNCHRONIZED,
123					   domain);
124	if (!dec->use_legacy) {
125		uint64_t addr;
126		addr = dec->ws->buffer_get_virtual_address(buf);
127		addr = addr + off;
128		set_reg(dec, dec->reg.data0, addr);
129		set_reg(dec, dec->reg.data1, addr >> 32);
130	} else {
131		off += dec->ws->buffer_get_reloc_offset(buf);
132		set_reg(dec, RUVD_GPCOM_VCPU_DATA0, off);
133		set_reg(dec, RUVD_GPCOM_VCPU_DATA1, reloc_idx * 4);
134	}
135	set_reg(dec, dec->reg.cmd, cmd << 1);
136}
137
138/* do the codec needs an IT buffer ?*/
139static bool have_it(struct ruvd_decoder *dec)
140{
141	return dec->stream_type == RUVD_CODEC_H264_PERF ||
142		dec->stream_type == RUVD_CODEC_H265;
143}
144
145/* map the next available message/feedback/itscaling buffer */
146static void map_msg_fb_it_buf(struct ruvd_decoder *dec)
147{
148	struct rvid_buffer* buf;
149	uint8_t *ptr;
150
151	/* grab the current message/feedback buffer */
152	buf = &dec->msg_fb_it_buffers[dec->cur_buffer];
153
154	/* and map it for CPU access */
155	ptr = dec->ws->buffer_map(dec->ws, buf->res->buf, &dec->cs,
156                                  PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
157
158	/* calc buffer offsets */
159	dec->msg = (struct ruvd_msg *)ptr;
160	memset(dec->msg, 0, sizeof(*dec->msg));
161
162	dec->fb = (uint32_t *)(ptr + FB_BUFFER_OFFSET);
163	if (have_it(dec))
164		dec->it = (uint8_t *)(ptr + FB_BUFFER_OFFSET + dec->fb_size);
165}
166
167/* unmap and send a message command to the VCPU */
168static void send_msg_buf(struct ruvd_decoder *dec)
169{
170	struct rvid_buffer* buf;
171
172	/* ignore the request if message/feedback buffer isn't mapped */
173	if (!dec->msg || !dec->fb)
174		return;
175
176	/* grab the current message buffer */
177	buf = &dec->msg_fb_it_buffers[dec->cur_buffer];
178
179	/* unmap the buffer */
180	dec->ws->buffer_unmap(dec->ws, buf->res->buf);
181	dec->bs_ptr = NULL;
182	dec->msg = NULL;
183	dec->fb = NULL;
184	dec->it = NULL;
185
186
187	if (dec->sessionctx.res)
188		send_cmd(dec, RUVD_CMD_SESSION_CONTEXT_BUFFER,
189			 dec->sessionctx.res->buf, 0, RADEON_USAGE_READWRITE,
190			 RADEON_DOMAIN_VRAM);
191
192	/* and send it to the hardware */
193	send_cmd(dec, RUVD_CMD_MSG_BUFFER, buf->res->buf, 0,
194		 RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
195}
196
197/* cycle to the next set of buffers */
198static void next_buffer(struct ruvd_decoder *dec)
199{
200	++dec->cur_buffer;
201	dec->cur_buffer %= NUM_BUFFERS;
202}
203
204/* convert the profile into something UVD understands */
205static uint32_t profile2stream_type(struct ruvd_decoder *dec, unsigned family)
206{
207	switch (u_reduce_video_profile(dec->base.profile)) {
208	case PIPE_VIDEO_FORMAT_MPEG4_AVC:
209		return RUVD_CODEC_H264;
210
211	case PIPE_VIDEO_FORMAT_VC1:
212		return RUVD_CODEC_VC1;
213
214	case PIPE_VIDEO_FORMAT_MPEG12:
215		return RUVD_CODEC_MPEG2;
216
217	case PIPE_VIDEO_FORMAT_MPEG4:
218		return RUVD_CODEC_MPEG4;
219
220	case PIPE_VIDEO_FORMAT_JPEG:
221		return RUVD_CODEC_MJPEG;
222
223	default:
224		assert(0);
225		return 0;
226	}
227}
228
229
230static unsigned get_db_pitch_alignment(struct ruvd_decoder *dec)
231{
232	return 16;
233}
234
235/* calculate size of reference picture buffer */
236static unsigned calc_dpb_size(struct ruvd_decoder *dec)
237{
238	unsigned width_in_mb, height_in_mb, image_size, dpb_size;
239
240	// always align them to MB size for dpb calculation
241	unsigned width = align(dec->base.width, VL_MACROBLOCK_WIDTH);
242	unsigned height = align(dec->base.height, VL_MACROBLOCK_HEIGHT);
243
244	// always one more for currently decoded picture
245	unsigned max_references = dec->base.max_references + 1;
246
247	// aligned size of a single frame
248	image_size = align(width, get_db_pitch_alignment(dec)) * height;
249	image_size += image_size / 2;
250	image_size = align(image_size, 1024);
251
252	// picture width & height in 16 pixel units
253	width_in_mb = width / VL_MACROBLOCK_WIDTH;
254	height_in_mb = align(height / VL_MACROBLOCK_HEIGHT, 2);
255
256	switch (u_reduce_video_profile(dec->base.profile)) {
257	case PIPE_VIDEO_FORMAT_MPEG4_AVC: {
258		if (!dec->use_legacy) {
259			unsigned fs_in_mb = width_in_mb * height_in_mb;
260			unsigned alignment = 64, num_dpb_buffer;
261
262			if (dec->stream_type == RUVD_CODEC_H264_PERF)
263				alignment = 256;
264			switch(dec->base.level) {
265			case 30:
266				num_dpb_buffer = 8100 / fs_in_mb;
267				break;
268			case 31:
269				num_dpb_buffer = 18000 / fs_in_mb;
270				break;
271			case 32:
272				num_dpb_buffer = 20480 / fs_in_mb;
273				break;
274			case 41:
275				num_dpb_buffer = 32768 / fs_in_mb;
276				break;
277			case 42:
278				num_dpb_buffer = 34816 / fs_in_mb;
279				break;
280			case 50:
281				num_dpb_buffer = 110400 / fs_in_mb;
282				break;
283			case 51:
284				num_dpb_buffer = 184320 / fs_in_mb;
285				break;
286			default:
287				num_dpb_buffer = 184320 / fs_in_mb;
288				break;
289			}
290			num_dpb_buffer++;
291			max_references = MAX2(MIN2(NUM_H264_REFS, num_dpb_buffer), max_references);
292			dpb_size = image_size * max_references;
293			if ((dec->stream_type != RUVD_CODEC_H264_PERF)) {
294				dpb_size += max_references * align(width_in_mb * height_in_mb  * 192, alignment);
295				dpb_size += align(width_in_mb * height_in_mb * 32, alignment);
296			}
297		} else {
298			// the firmware seems to allways assume a minimum of ref frames
299			max_references = MAX2(NUM_H264_REFS, max_references);
300			// reference picture buffer
301			dpb_size = image_size * max_references;
302			if ((dec->stream_type != RUVD_CODEC_H264_PERF)) {
303				// macroblock context buffer
304				dpb_size += width_in_mb * height_in_mb * max_references * 192;
305				// IT surface buffer
306				dpb_size += width_in_mb * height_in_mb * 32;
307			}
308		}
309		break;
310	}
311
312	case PIPE_VIDEO_FORMAT_VC1:
313		// the firmware seems to allways assume a minimum of ref frames
314		max_references = MAX2(NUM_VC1_REFS, max_references);
315
316		// reference picture buffer
317		dpb_size = image_size * max_references;
318
319		// CONTEXT_BUFFER
320		dpb_size += width_in_mb * height_in_mb * 128;
321
322		// IT surface buffer
323		dpb_size += width_in_mb * 64;
324
325		// DB surface buffer
326		dpb_size += width_in_mb * 128;
327
328		// BP
329		dpb_size += align(MAX2(width_in_mb, height_in_mb) * 7 * 16, 64);
330		break;
331
332	case PIPE_VIDEO_FORMAT_MPEG12:
333		// reference picture buffer, must be big enough for all frames
334		dpb_size = image_size * NUM_MPEG2_REFS;
335		break;
336
337	case PIPE_VIDEO_FORMAT_MPEG4:
338		// reference picture buffer
339		dpb_size = image_size * max_references;
340
341		// CM
342		dpb_size += width_in_mb * height_in_mb * 64;
343
344		// IT surface buffer
345		dpb_size += align(width_in_mb * height_in_mb * 32, 64);
346
347		dpb_size = MAX2(dpb_size, 30 * 1024 * 1024);
348		break;
349
350	case PIPE_VIDEO_FORMAT_JPEG:
351		dpb_size = 0;
352		break;
353
354	default:
355		// something is missing here
356		assert(0);
357
358		// at least use a sane default value
359		dpb_size = 32 * 1024 * 1024;
360		break;
361	}
362	return dpb_size;
363}
364
365/* free associated data in the video buffer callback */
366static void ruvd_destroy_associated_data(void *data)
367{
368	/* NOOP, since we only use an intptr */
369}
370
371/* get h264 specific message bits */
372static struct ruvd_h264 get_h264_msg(struct ruvd_decoder *dec, struct pipe_h264_picture_desc *pic)
373{
374	struct ruvd_h264 result;
375
376	memset(&result, 0, sizeof(result));
377	switch (pic->base.profile) {
378	case PIPE_VIDEO_PROFILE_MPEG4_AVC_BASELINE:
379	case PIPE_VIDEO_PROFILE_MPEG4_AVC_CONSTRAINED_BASELINE:
380		result.profile = RUVD_H264_PROFILE_BASELINE;
381		break;
382
383	case PIPE_VIDEO_PROFILE_MPEG4_AVC_MAIN:
384		result.profile = RUVD_H264_PROFILE_MAIN;
385		break;
386
387	case PIPE_VIDEO_PROFILE_MPEG4_AVC_HIGH:
388		result.profile = RUVD_H264_PROFILE_HIGH;
389		break;
390
391	default:
392		assert(0);
393		break;
394	}
395
396	result.level = dec->base.level;
397
398	result.sps_info_flags = 0;
399	result.sps_info_flags |= pic->pps->sps->direct_8x8_inference_flag << 0;
400	result.sps_info_flags |= pic->pps->sps->mb_adaptive_frame_field_flag << 1;
401	result.sps_info_flags |= pic->pps->sps->frame_mbs_only_flag << 2;
402	result.sps_info_flags |= pic->pps->sps->delta_pic_order_always_zero_flag << 3;
403
404	result.bit_depth_luma_minus8 = pic->pps->sps->bit_depth_luma_minus8;
405	result.bit_depth_chroma_minus8 = pic->pps->sps->bit_depth_chroma_minus8;
406	result.log2_max_frame_num_minus4 = pic->pps->sps->log2_max_frame_num_minus4;
407	result.pic_order_cnt_type = pic->pps->sps->pic_order_cnt_type;
408	result.log2_max_pic_order_cnt_lsb_minus4 = pic->pps->sps->log2_max_pic_order_cnt_lsb_minus4;
409
410	switch (dec->base.chroma_format) {
411	case PIPE_VIDEO_CHROMA_FORMAT_NONE:
412		/* TODO: assert? */
413		break;
414	case PIPE_VIDEO_CHROMA_FORMAT_400:
415		result.chroma_format = 0;
416		break;
417	case PIPE_VIDEO_CHROMA_FORMAT_420:
418		result.chroma_format = 1;
419		break;
420	case PIPE_VIDEO_CHROMA_FORMAT_422:
421		result.chroma_format = 2;
422		break;
423	case PIPE_VIDEO_CHROMA_FORMAT_444:
424		result.chroma_format = 3;
425		break;
426	}
427
428	result.pps_info_flags = 0;
429	result.pps_info_flags |= pic->pps->transform_8x8_mode_flag << 0;
430	result.pps_info_flags |= pic->pps->redundant_pic_cnt_present_flag << 1;
431	result.pps_info_flags |= pic->pps->constrained_intra_pred_flag << 2;
432	result.pps_info_flags |= pic->pps->deblocking_filter_control_present_flag << 3;
433	result.pps_info_flags |= pic->pps->weighted_bipred_idc << 4;
434	result.pps_info_flags |= pic->pps->weighted_pred_flag << 6;
435	result.pps_info_flags |= pic->pps->bottom_field_pic_order_in_frame_present_flag << 7;
436	result.pps_info_flags |= pic->pps->entropy_coding_mode_flag << 8;
437
438	result.num_slice_groups_minus1 = pic->pps->num_slice_groups_minus1;
439	result.slice_group_map_type = pic->pps->slice_group_map_type;
440	result.slice_group_change_rate_minus1 = pic->pps->slice_group_change_rate_minus1;
441	result.pic_init_qp_minus26 = pic->pps->pic_init_qp_minus26;
442	result.chroma_qp_index_offset = pic->pps->chroma_qp_index_offset;
443	result.second_chroma_qp_index_offset = pic->pps->second_chroma_qp_index_offset;
444
445	memcpy(result.scaling_list_4x4, pic->pps->ScalingList4x4, 6*16);
446	memcpy(result.scaling_list_8x8, pic->pps->ScalingList8x8, 2*64);
447
448	if (dec->stream_type == RUVD_CODEC_H264_PERF) {
449		memcpy(dec->it, result.scaling_list_4x4, 6*16);
450		memcpy((dec->it + 96), result.scaling_list_8x8, 2*64);
451	}
452
453	result.num_ref_frames = pic->num_ref_frames;
454
455	result.num_ref_idx_l0_active_minus1 = pic->num_ref_idx_l0_active_minus1;
456	result.num_ref_idx_l1_active_minus1 = pic->num_ref_idx_l1_active_minus1;
457
458	result.frame_num = pic->frame_num;
459	memcpy(result.frame_num_list, pic->frame_num_list, 4*16);
460	result.curr_field_order_cnt_list[0] = pic->field_order_cnt[0];
461	result.curr_field_order_cnt_list[1] = pic->field_order_cnt[1];
462	memcpy(result.field_order_cnt_list, pic->field_order_cnt_list, 4*16*2);
463
464	result.decoded_pic_idx = pic->frame_num;
465
466	return result;
467}
468
469/* get vc1 specific message bits */
470static struct ruvd_vc1 get_vc1_msg(struct pipe_vc1_picture_desc *pic)
471{
472	struct ruvd_vc1 result;
473
474	memset(&result, 0, sizeof(result));
475
476	switch(pic->base.profile) {
477	case PIPE_VIDEO_PROFILE_VC1_SIMPLE:
478		result.profile = RUVD_VC1_PROFILE_SIMPLE;
479		result.level = 1;
480		break;
481
482	case PIPE_VIDEO_PROFILE_VC1_MAIN:
483		result.profile = RUVD_VC1_PROFILE_MAIN;
484		result.level = 2;
485		break;
486
487	case PIPE_VIDEO_PROFILE_VC1_ADVANCED:
488		result.profile = RUVD_VC1_PROFILE_ADVANCED;
489		result.level = 4;
490		break;
491
492	default:
493		assert(0);
494	}
495
496	/* fields common for all profiles */
497	result.sps_info_flags |= pic->postprocflag << 7;
498	result.sps_info_flags |= pic->pulldown << 6;
499	result.sps_info_flags |= pic->interlace << 5;
500	result.sps_info_flags |= pic->tfcntrflag << 4;
501	result.sps_info_flags |= pic->finterpflag << 3;
502	result.sps_info_flags |= pic->psf << 1;
503
504	result.pps_info_flags |= pic->range_mapy_flag << 31;
505	result.pps_info_flags |= pic->range_mapy << 28;
506	result.pps_info_flags |= pic->range_mapuv_flag << 27;
507	result.pps_info_flags |= pic->range_mapuv << 24;
508	result.pps_info_flags |= pic->multires << 21;
509	result.pps_info_flags |= pic->maxbframes << 16;
510	result.pps_info_flags |= pic->overlap << 11;
511	result.pps_info_flags |= pic->quantizer << 9;
512	result.pps_info_flags |= pic->panscan_flag << 7;
513	result.pps_info_flags |= pic->refdist_flag << 6;
514	result.pps_info_flags |= pic->vstransform << 0;
515
516	/* some fields only apply to main/advanced profile */
517	if (pic->base.profile != PIPE_VIDEO_PROFILE_VC1_SIMPLE) {
518		result.pps_info_flags |= pic->syncmarker << 20;
519		result.pps_info_flags |= pic->rangered << 19;
520		result.pps_info_flags |= pic->loopfilter << 5;
521		result.pps_info_flags |= pic->fastuvmc << 4;
522		result.pps_info_flags |= pic->extended_mv << 3;
523		result.pps_info_flags |= pic->extended_dmv << 8;
524		result.pps_info_flags |= pic->dquant << 1;
525	}
526
527	result.chroma_format = 1;
528
529#if 0
530//(((unsigned int)(pPicParams->advance.reserved1))        << SPS_INFO_VC1_RESERVED_SHIFT)
531uint32_t 	slice_count
532uint8_t 	picture_type
533uint8_t 	frame_coding_mode
534uint8_t 	deblockEnable
535uint8_t 	pquant
536#endif
537
538	return result;
539}
540
541/* extract the frame number from a referenced video buffer */
542static uint32_t get_ref_pic_idx(struct ruvd_decoder *dec, struct pipe_video_buffer *ref)
543{
544	uint32_t min = MAX2(dec->frame_number, NUM_MPEG2_REFS) - NUM_MPEG2_REFS;
545	uint32_t max = MAX2(dec->frame_number, 1) - 1;
546	uintptr_t frame;
547
548	/* seems to be the most sane fallback */
549	if (!ref)
550		return max;
551
552	/* get the frame number from the associated data */
553	frame = (uintptr_t)vl_video_buffer_get_associated_data(ref, &dec->base);
554
555	/* limit the frame number to a valid range */
556	return MAX2(MIN2(frame, max), min);
557}
558
559/* get mpeg2 specific msg bits */
560static struct ruvd_mpeg2 get_mpeg2_msg(struct ruvd_decoder *dec,
561				       struct pipe_mpeg12_picture_desc *pic)
562{
563	const int *zscan = pic->alternate_scan ? vl_zscan_alternate : vl_zscan_normal;
564	struct ruvd_mpeg2 result;
565	unsigned i;
566
567	memset(&result, 0, sizeof(result));
568	result.decoded_pic_idx = dec->frame_number;
569	for (i = 0; i < 2; ++i)
570		result.ref_pic_idx[i] = get_ref_pic_idx(dec, pic->ref[i]);
571
572	result.load_intra_quantiser_matrix = 1;
573	result.load_nonintra_quantiser_matrix = 1;
574
575	for (i = 0; i < 64; ++i) {
576		result.intra_quantiser_matrix[i] = pic->intra_matrix[zscan[i]];
577		result.nonintra_quantiser_matrix[i] = pic->non_intra_matrix[zscan[i]];
578	}
579
580	result.profile_and_level_indication = 0;
581	result.chroma_format = 0x1;
582
583	result.picture_coding_type = pic->picture_coding_type;
584	result.f_code[0][0] = pic->f_code[0][0] + 1;
585	result.f_code[0][1] = pic->f_code[0][1] + 1;
586	result.f_code[1][0] = pic->f_code[1][0] + 1;
587	result.f_code[1][1] = pic->f_code[1][1] + 1;
588	result.intra_dc_precision = pic->intra_dc_precision;
589	result.pic_structure = pic->picture_structure;
590	result.top_field_first = pic->top_field_first;
591	result.frame_pred_frame_dct = pic->frame_pred_frame_dct;
592	result.concealment_motion_vectors = pic->concealment_motion_vectors;
593	result.q_scale_type = pic->q_scale_type;
594	result.intra_vlc_format = pic->intra_vlc_format;
595	result.alternate_scan = pic->alternate_scan;
596
597	return result;
598}
599
600/* get mpeg4 specific msg bits */
601static struct ruvd_mpeg4 get_mpeg4_msg(struct ruvd_decoder *dec,
602				       struct pipe_mpeg4_picture_desc *pic)
603{
604	struct ruvd_mpeg4 result;
605	unsigned i;
606
607	memset(&result, 0, sizeof(result));
608	result.decoded_pic_idx = dec->frame_number;
609	for (i = 0; i < 2; ++i)
610		result.ref_pic_idx[i] = get_ref_pic_idx(dec, pic->ref[i]);
611
612	result.variant_type = 0;
613	result.profile_and_level_indication = 0xF0; // ASP Level0
614
615	result.video_object_layer_verid = 0x5; // advanced simple
616	result.video_object_layer_shape = 0x0; // rectangular
617
618	result.video_object_layer_width = dec->base.width;
619	result.video_object_layer_height = dec->base.height;
620
621	result.vop_time_increment_resolution = pic->vop_time_increment_resolution;
622
623	result.flags |= pic->short_video_header << 0;
624	//result.flags |= obmc_disable << 1;
625	result.flags |= pic->interlaced << 2;
626        result.flags |= 1 << 3; // load_intra_quant_mat
627	result.flags |= 1 << 4; // load_nonintra_quant_mat
628	result.flags |= pic->quarter_sample << 5;
629	result.flags |= 1 << 6; // complexity_estimation_disable
630	result.flags |= pic->resync_marker_disable << 7;
631	//result.flags |= data_partitioned << 8;
632	//result.flags |= reversible_vlc << 9;
633	result.flags |= 0 << 10; // newpred_enable
634	result.flags |= 0 << 11; // reduced_resolution_vop_enable
635	//result.flags |= scalability << 12;
636	//result.flags |= is_object_layer_identifier << 13;
637	//result.flags |= fixed_vop_rate << 14;
638	//result.flags |= newpred_segment_type << 15;
639
640	result.quant_type = pic->quant_type;
641
642	for (i = 0; i < 64; ++i) {
643		result.intra_quant_mat[i] = pic->intra_matrix[vl_zscan_normal[i]];
644		result.nonintra_quant_mat[i] = pic->non_intra_matrix[vl_zscan_normal[i]];
645	}
646
647	/*
648	int32_t 	trd [2]
649	int32_t 	trb [2]
650	uint8_t 	vop_coding_type
651	uint8_t 	vop_fcode_forward
652	uint8_t 	vop_fcode_backward
653	uint8_t 	rounding_control
654	uint8_t 	alternate_vertical_scan_flag
655	uint8_t 	top_field_first
656	*/
657
658	return result;
659}
660
661static void get_mjpeg_slice_header(struct ruvd_decoder *dec, struct pipe_mjpeg_picture_desc *pic)
662{
663	int size = 0, saved_size, len_pos, i;
664	uint16_t *bs;
665	uint8_t *buf = dec->bs_ptr;
666
667	/* SOI */
668	buf[size++] = 0xff;
669	buf[size++] = 0xd8;
670
671	/* DQT */
672	buf[size++] = 0xff;
673	buf[size++] = 0xdb;
674
675	len_pos = size++;
676	size++;
677
678	for (i = 0; i < 4; ++i) {
679		if (pic->quantization_table.load_quantiser_table[i] == 0)
680			continue;
681
682		buf[size++] = i;
683		memcpy((buf + size), &pic->quantization_table.quantiser_table[i], 64);
684		size += 64;
685	}
686
687	bs = (uint16_t*)&buf[len_pos];
688	*bs = util_bswap16(size - 4);
689
690	saved_size = size;
691
692	/* DHT */
693	buf[size++] = 0xff;
694	buf[size++] = 0xc4;
695
696	len_pos = size++;
697	size++;
698
699	for (i = 0; i < 2; ++i) {
700		if (pic->huffman_table.load_huffman_table[i] == 0)
701			continue;
702
703		buf[size++] = 0x00 | i;
704		memcpy((buf + size), &pic->huffman_table.table[i].num_dc_codes, 16);
705		size += 16;
706		memcpy((buf + size), &pic->huffman_table.table[i].dc_values, 12);
707		size += 12;
708	}
709
710	for (i = 0; i < 2; ++i) {
711		if (pic->huffman_table.load_huffman_table[i] == 0)
712			continue;
713
714		buf[size++] = 0x10 | i;
715		memcpy((buf + size), &pic->huffman_table.table[i].num_ac_codes, 16);
716		size += 16;
717		memcpy((buf + size), &pic->huffman_table.table[i].ac_values, 162);
718		size += 162;
719	}
720
721	bs = (uint16_t*)&buf[len_pos];
722	*bs = util_bswap16(size - saved_size - 2);
723
724	saved_size = size;
725
726	/* DRI */
727	if (pic->slice_parameter.restart_interval) {
728		buf[size++] = 0xff;
729		buf[size++] = 0xdd;
730		buf[size++] = 0x00;
731		buf[size++] = 0x04;
732		bs = (uint16_t*)&buf[size++];
733		*bs = util_bswap16(pic->slice_parameter.restart_interval);
734		saved_size = ++size;
735	}
736
737	/* SOF */
738	buf[size++] = 0xff;
739	buf[size++] = 0xc0;
740
741	len_pos = size++;
742	size++;
743
744	buf[size++] = 0x08;
745
746	bs = (uint16_t*)&buf[size++];
747	*bs = util_bswap16(pic->picture_parameter.picture_height);
748	size++;
749
750	bs = (uint16_t*)&buf[size++];
751	*bs = util_bswap16(pic->picture_parameter.picture_width);
752	size++;
753
754	buf[size++] = pic->picture_parameter.num_components;
755
756	for (i = 0; i < pic->picture_parameter.num_components; ++i) {
757		buf[size++] = pic->picture_parameter.components[i].component_id;
758		buf[size++] = pic->picture_parameter.components[i].h_sampling_factor << 4 |
759			pic->picture_parameter.components[i].v_sampling_factor;
760		buf[size++] = pic->picture_parameter.components[i].quantiser_table_selector;
761	}
762
763	bs = (uint16_t*)&buf[len_pos];
764	*bs = util_bswap16(size - saved_size - 2);
765
766	saved_size = size;
767
768	/* SOS */
769	buf[size++] = 0xff;
770	buf[size++] = 0xda;
771
772	len_pos = size++;
773	size++;
774
775	buf[size++] = pic->slice_parameter.num_components;
776
777	for (i = 0; i < pic->slice_parameter.num_components; ++i) {
778		buf[size++] = pic->slice_parameter.components[i].component_selector;
779		buf[size++] = pic->slice_parameter.components[i].dc_table_selector << 4 |
780			pic->slice_parameter.components[i].ac_table_selector;
781	}
782
783	buf[size++] = 0x00;
784	buf[size++] = 0x3f;
785	buf[size++] = 0x00;
786
787	bs = (uint16_t*)&buf[len_pos];
788	*bs = util_bswap16(size - saved_size - 2);
789
790	dec->bs_ptr += size;
791	dec->bs_size += size;
792}
793
794/**
795 * destroy this video decoder
796 */
797static void ruvd_destroy(struct pipe_video_codec *decoder)
798{
799	struct ruvd_decoder *dec = (struct ruvd_decoder*)decoder;
800	unsigned i;
801
802	assert(decoder);
803
804	map_msg_fb_it_buf(dec);
805	dec->msg->size = sizeof(*dec->msg);
806	dec->msg->msg_type = RUVD_MSG_DESTROY;
807	dec->msg->stream_handle = dec->stream_handle;
808	send_msg_buf(dec);
809
810	flush(dec, 0);
811
812	dec->ws->cs_destroy(&dec->cs);
813
814	for (i = 0; i < NUM_BUFFERS; ++i) {
815		rvid_destroy_buffer(&dec->msg_fb_it_buffers[i]);
816		rvid_destroy_buffer(&dec->bs_buffers[i]);
817	}
818
819	rvid_destroy_buffer(&dec->dpb);
820	rvid_destroy_buffer(&dec->ctx);
821	rvid_destroy_buffer(&dec->sessionctx);
822
823	FREE(dec);
824}
825
826/**
827 * start decoding of a new frame
828 */
829static void ruvd_begin_frame(struct pipe_video_codec *decoder,
830			     struct pipe_video_buffer *target,
831			     struct pipe_picture_desc *picture)
832{
833	struct ruvd_decoder *dec = (struct ruvd_decoder*)decoder;
834	uintptr_t frame;
835
836	assert(decoder);
837
838	frame = ++dec->frame_number;
839	vl_video_buffer_set_associated_data(target, decoder, (void *)frame,
840					    &ruvd_destroy_associated_data);
841
842	dec->bs_size = 0;
843	dec->bs_ptr = dec->ws->buffer_map(dec->ws,
844		dec->bs_buffers[dec->cur_buffer].res->buf,
845		&dec->cs, PIPE_MAP_WRITE | RADEON_MAP_TEMPORARY);
846}
847
848/**
849 * decode a macroblock
850 */
851static void ruvd_decode_macroblock(struct pipe_video_codec *decoder,
852				   struct pipe_video_buffer *target,
853				   struct pipe_picture_desc *picture,
854				   const struct pipe_macroblock *macroblocks,
855				   unsigned num_macroblocks)
856{
857	/* not supported (yet) */
858	assert(0);
859}
860
861/**
862 * decode a bitstream
863 */
864static void ruvd_decode_bitstream(struct pipe_video_codec *decoder,
865				  struct pipe_video_buffer *target,
866				  struct pipe_picture_desc *picture,
867				  unsigned num_buffers,
868				  const void * const *buffers,
869				  const unsigned *sizes)
870{
871	struct ruvd_decoder *dec = (struct ruvd_decoder*)decoder;
872	enum pipe_video_format format = u_reduce_video_profile(picture->profile);
873	unsigned i;
874
875	assert(decoder);
876
877	if (!dec->bs_ptr)
878		return;
879
880	if (format == PIPE_VIDEO_FORMAT_JPEG)
881		get_mjpeg_slice_header(dec, (struct pipe_mjpeg_picture_desc*)picture);
882
883	for (i = 0; i < num_buffers; ++i) {
884		struct rvid_buffer *buf = &dec->bs_buffers[dec->cur_buffer];
885		unsigned new_size = dec->bs_size + sizes[i];
886
887		if (format == PIPE_VIDEO_FORMAT_JPEG)
888			new_size += 2; /* save for EOI */
889
890		if (new_size > buf->res->buf->size) {
891			dec->ws->buffer_unmap(dec->ws, buf->res->buf);
892			dec->bs_ptr = NULL;
893			if (!rvid_resize_buffer(dec->screen, &dec->cs, buf, new_size)) {
894				RVID_ERR("Can't resize bitstream buffer!");
895				return;
896			}
897
898			dec->bs_ptr = dec->ws->buffer_map(dec->ws, buf->res->buf, &dec->cs,
899							  PIPE_MAP_WRITE |
900							  RADEON_MAP_TEMPORARY);
901			if (!dec->bs_ptr)
902				return;
903
904			dec->bs_ptr += dec->bs_size;
905		}
906
907		memcpy(dec->bs_ptr, buffers[i], sizes[i]);
908		dec->bs_size += sizes[i];
909		dec->bs_ptr += sizes[i];
910	}
911
912	if (format == PIPE_VIDEO_FORMAT_JPEG) {
913		((uint8_t *)dec->bs_ptr)[0] = 0xff;	/* EOI */
914		((uint8_t *)dec->bs_ptr)[1] = 0xd9;
915		dec->bs_size += 2;
916		dec->bs_ptr += 2;
917	}
918}
919
920/**
921 * end decoding of the current frame
922 */
923static void ruvd_end_frame(struct pipe_video_codec *decoder,
924			   struct pipe_video_buffer *target,
925			   struct pipe_picture_desc *picture)
926{
927	struct ruvd_decoder *dec = (struct ruvd_decoder*)decoder;
928	struct pb_buffer *dt;
929	struct rvid_buffer *msg_fb_it_buf, *bs_buf;
930	unsigned bs_size;
931
932	assert(decoder);
933
934	if (!dec->bs_ptr)
935		return;
936
937	msg_fb_it_buf = &dec->msg_fb_it_buffers[dec->cur_buffer];
938	bs_buf = &dec->bs_buffers[dec->cur_buffer];
939
940	bs_size = align(dec->bs_size, 128);
941	memset(dec->bs_ptr, 0, bs_size - dec->bs_size);
942	dec->ws->buffer_unmap(dec->ws, bs_buf->res->buf);
943	dec->bs_ptr = NULL;
944
945	map_msg_fb_it_buf(dec);
946	dec->msg->size = sizeof(*dec->msg);
947	dec->msg->msg_type = RUVD_MSG_DECODE;
948	dec->msg->stream_handle = dec->stream_handle;
949	dec->msg->status_report_feedback_number = dec->frame_number;
950
951	dec->msg->body.decode.stream_type = dec->stream_type;
952	dec->msg->body.decode.decode_flags = 0x1;
953	dec->msg->body.decode.width_in_samples = dec->base.width;
954	dec->msg->body.decode.height_in_samples = dec->base.height;
955
956	if ((picture->profile == PIPE_VIDEO_PROFILE_VC1_SIMPLE) ||
957	    (picture->profile == PIPE_VIDEO_PROFILE_VC1_MAIN)) {
958		dec->msg->body.decode.width_in_samples = align(dec->msg->body.decode.width_in_samples, 16) / 16;
959		dec->msg->body.decode.height_in_samples = align(dec->msg->body.decode.height_in_samples, 16) / 16;
960	}
961
962	if (dec->dpb.res)
963		dec->msg->body.decode.dpb_size = dec->dpb.res->buf->size;
964	dec->msg->body.decode.bsd_size = bs_size;
965	dec->msg->body.decode.db_pitch = align(dec->base.width, get_db_pitch_alignment(dec));
966
967	dt = dec->set_dtb(dec->msg, (struct vl_video_buffer *)target);
968
969	switch (u_reduce_video_profile(picture->profile)) {
970	case PIPE_VIDEO_FORMAT_MPEG4_AVC:
971		dec->msg->body.decode.codec.h264 = get_h264_msg(dec, (struct pipe_h264_picture_desc*)picture);
972		break;
973
974	case PIPE_VIDEO_FORMAT_VC1:
975		dec->msg->body.decode.codec.vc1 = get_vc1_msg((struct pipe_vc1_picture_desc*)picture);
976		break;
977
978	case PIPE_VIDEO_FORMAT_MPEG12:
979		dec->msg->body.decode.codec.mpeg2 = get_mpeg2_msg(dec, (struct pipe_mpeg12_picture_desc*)picture);
980		break;
981
982	case PIPE_VIDEO_FORMAT_MPEG4:
983		dec->msg->body.decode.codec.mpeg4 = get_mpeg4_msg(dec, (struct pipe_mpeg4_picture_desc*)picture);
984		break;
985
986	case PIPE_VIDEO_FORMAT_JPEG:
987		break;
988
989	default:
990		assert(0);
991		return;
992	}
993
994	dec->msg->body.decode.db_surf_tile_config = dec->msg->body.decode.dt_surf_tile_config;
995	dec->msg->body.decode.extension_support = 0x1;
996
997	/* set at least the feedback buffer size */
998	dec->fb[0] = dec->fb_size;
999
1000	send_msg_buf(dec);
1001
1002	if (dec->dpb.res)
1003		send_cmd(dec, RUVD_CMD_DPB_BUFFER, dec->dpb.res->buf, 0,
1004			RADEON_USAGE_READWRITE, RADEON_DOMAIN_VRAM);
1005
1006	if (dec->ctx.res)
1007		send_cmd(dec, RUVD_CMD_CONTEXT_BUFFER, dec->ctx.res->buf, 0,
1008			RADEON_USAGE_READWRITE, RADEON_DOMAIN_VRAM);
1009	send_cmd(dec, RUVD_CMD_BITSTREAM_BUFFER, bs_buf->res->buf,
1010		 0, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
1011	send_cmd(dec, RUVD_CMD_DECODING_TARGET_BUFFER, dt, 0,
1012		 RADEON_USAGE_WRITE, RADEON_DOMAIN_VRAM);
1013	send_cmd(dec, RUVD_CMD_FEEDBACK_BUFFER, msg_fb_it_buf->res->buf,
1014		 FB_BUFFER_OFFSET, RADEON_USAGE_WRITE, RADEON_DOMAIN_GTT);
1015	if (have_it(dec))
1016		send_cmd(dec, RUVD_CMD_ITSCALING_TABLE_BUFFER, msg_fb_it_buf->res->buf,
1017			 FB_BUFFER_OFFSET + dec->fb_size, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
1018	set_reg(dec, dec->reg.cntl, 1);
1019
1020	flush(dec, PIPE_FLUSH_ASYNC);
1021	next_buffer(dec);
1022}
1023
1024/**
1025 * flush any outstanding command buffers to the hardware
1026 */
1027static void ruvd_flush(struct pipe_video_codec *decoder)
1028{
1029}
1030
1031/**
1032 * create and UVD decoder
1033 */
1034struct pipe_video_codec *ruvd_create_decoder(struct pipe_context *context,
1035					     const struct pipe_video_codec *templ,
1036					     ruvd_set_dtb set_dtb)
1037{
1038	struct radeon_winsys* ws = ((struct r600_common_context *)context)->ws;
1039	struct r600_common_context *rctx = (struct r600_common_context*)context;
1040	unsigned dpb_size;
1041	unsigned width = templ->width, height = templ->height;
1042	unsigned bs_buf_size;
1043	struct radeon_info info;
1044	struct ruvd_decoder *dec;
1045	int r, i;
1046
1047	ws->query_info(ws, &info, false, false);
1048
1049	switch(u_reduce_video_profile(templ->profile)) {
1050	case PIPE_VIDEO_FORMAT_MPEG12:
1051		if (templ->entrypoint > PIPE_VIDEO_ENTRYPOINT_BITSTREAM || info.family < CHIP_PALM)
1052			return vl_create_mpeg12_decoder(context, templ);
1053
1054		FALLTHROUGH;
1055	case PIPE_VIDEO_FORMAT_MPEG4:
1056		width = align(width, VL_MACROBLOCK_WIDTH);
1057		height = align(height, VL_MACROBLOCK_HEIGHT);
1058		break;
1059	case PIPE_VIDEO_FORMAT_MPEG4_AVC:
1060		width = align(width, VL_MACROBLOCK_WIDTH);
1061		height = align(height, VL_MACROBLOCK_HEIGHT);
1062		break;
1063
1064	default:
1065		break;
1066	}
1067
1068
1069	dec = CALLOC_STRUCT(ruvd_decoder);
1070
1071	if (!dec)
1072		return NULL;
1073
1074	dec->use_legacy = true;
1075
1076	dec->base = *templ;
1077	dec->base.context = context;
1078	dec->base.width = width;
1079	dec->base.height = height;
1080
1081	dec->base.destroy = ruvd_destroy;
1082	dec->base.begin_frame = ruvd_begin_frame;
1083	dec->base.decode_macroblock = ruvd_decode_macroblock;
1084	dec->base.decode_bitstream = ruvd_decode_bitstream;
1085	dec->base.end_frame = ruvd_end_frame;
1086	dec->base.flush = ruvd_flush;
1087
1088	dec->stream_type = profile2stream_type(dec, info.family);
1089	dec->set_dtb = set_dtb;
1090	dec->stream_handle = rvid_alloc_stream_handle();
1091	dec->screen = context->screen;
1092	dec->ws = ws;
1093
1094	if (!ws->cs_create(&dec->cs, rctx->ctx, AMD_IP_UVD, NULL, NULL, false)) {
1095		RVID_ERR("Can't get command submission context.\n");
1096		goto error;
1097	}
1098
1099	dec->fb_size = FB_BUFFER_SIZE;
1100	bs_buf_size = width * height * (512 / (16 * 16));
1101	for (i = 0; i < NUM_BUFFERS; ++i) {
1102		unsigned msg_fb_it_size = FB_BUFFER_OFFSET + dec->fb_size;
1103		STATIC_ASSERT(sizeof(struct ruvd_msg) <= FB_BUFFER_OFFSET);
1104		if (have_it(dec))
1105			msg_fb_it_size += IT_SCALING_TABLE_SIZE;
1106		if (!rvid_create_buffer(dec->screen, &dec->msg_fb_it_buffers[i],
1107					msg_fb_it_size, PIPE_USAGE_STAGING)) {
1108			RVID_ERR("Can't allocated message buffers.\n");
1109			goto error;
1110		}
1111
1112		if (!rvid_create_buffer(dec->screen, &dec->bs_buffers[i],
1113					bs_buf_size, PIPE_USAGE_STAGING)) {
1114			RVID_ERR("Can't allocated bitstream buffers.\n");
1115			goto error;
1116		}
1117
1118		rvid_clear_buffer(context, &dec->msg_fb_it_buffers[i]);
1119		rvid_clear_buffer(context, &dec->bs_buffers[i]);
1120	}
1121
1122	dpb_size = calc_dpb_size(dec);
1123	if (dpb_size) {
1124		if (!rvid_create_buffer(dec->screen, &dec->dpb, dpb_size, PIPE_USAGE_DEFAULT)) {
1125			RVID_ERR("Can't allocated dpb.\n");
1126			goto error;
1127		}
1128		rvid_clear_buffer(context, &dec->dpb);
1129	}
1130
1131	dec->reg.data0 = RUVD_GPCOM_VCPU_DATA0;
1132	dec->reg.data1 = RUVD_GPCOM_VCPU_DATA1;
1133	dec->reg.cmd = RUVD_GPCOM_VCPU_CMD;
1134	dec->reg.cntl = RUVD_ENGINE_CNTL;
1135
1136	map_msg_fb_it_buf(dec);
1137	dec->msg->size = sizeof(*dec->msg);
1138	dec->msg->msg_type = RUVD_MSG_CREATE;
1139	dec->msg->stream_handle = dec->stream_handle;
1140	dec->msg->body.create.stream_type = dec->stream_type;
1141	dec->msg->body.create.width_in_samples = dec->base.width;
1142	dec->msg->body.create.height_in_samples = dec->base.height;
1143	dec->msg->body.create.dpb_size = dpb_size;
1144	send_msg_buf(dec);
1145	r = flush(dec, 0);
1146	if (r)
1147		goto error;
1148
1149	next_buffer(dec);
1150
1151	return &dec->base;
1152
1153error:
1154	dec->ws->cs_destroy(&dec->cs);
1155
1156	for (i = 0; i < NUM_BUFFERS; ++i) {
1157		rvid_destroy_buffer(&dec->msg_fb_it_buffers[i]);
1158		rvid_destroy_buffer(&dec->bs_buffers[i]);
1159	}
1160
1161	rvid_destroy_buffer(&dec->dpb);
1162	rvid_destroy_buffer(&dec->ctx);
1163	rvid_destroy_buffer(&dec->sessionctx);
1164
1165	FREE(dec);
1166
1167	return NULL;
1168}
1169
1170/* calculate top/bottom offset */
1171static unsigned texture_offset(struct radeon_surf *surface, unsigned layer)
1172{
1173	return (uint64_t)surface->u.legacy.level[0].offset_256B * 256 +
1174		layer * (uint64_t)surface->u.legacy.level[0].slice_size_dw * 4;
1175}
1176
1177/* hw encode the aspect of macro tiles */
1178static unsigned macro_tile_aspect(unsigned macro_tile_aspect)
1179{
1180	switch (macro_tile_aspect) {
1181	default:
1182	case 1: macro_tile_aspect = 0;  break;
1183	case 2: macro_tile_aspect = 1;  break;
1184	case 4: macro_tile_aspect = 2;  break;
1185	case 8: macro_tile_aspect = 3;  break;
1186	}
1187	return macro_tile_aspect;
1188}
1189
1190/* hw encode the bank width and height */
1191static unsigned bank_wh(unsigned bankwh)
1192{
1193	switch (bankwh) {
1194	default:
1195	case 1: bankwh = 0;     break;
1196	case 2: bankwh = 1;     break;
1197	case 4: bankwh = 2;     break;
1198	case 8: bankwh = 3;     break;
1199	}
1200	return bankwh;
1201}
1202
1203/**
1204 * fill decoding target field from the luma and chroma surfaces
1205 */
1206void ruvd_set_dt_surfaces(struct ruvd_msg *msg, struct radeon_surf *luma,
1207			  struct radeon_surf *chroma)
1208{
1209	msg->body.decode.dt_pitch = luma->u.legacy.level[0].nblk_x * luma->blk_w;
1210	switch (luma->u.legacy.level[0].mode) {
1211	case RADEON_SURF_MODE_LINEAR_ALIGNED:
1212		msg->body.decode.dt_tiling_mode = RUVD_TILE_LINEAR;
1213		msg->body.decode.dt_array_mode = RUVD_ARRAY_MODE_LINEAR;
1214		break;
1215	case RADEON_SURF_MODE_1D:
1216		msg->body.decode.dt_tiling_mode = RUVD_TILE_8X8;
1217		msg->body.decode.dt_array_mode = RUVD_ARRAY_MODE_1D_THIN;
1218		break;
1219	case RADEON_SURF_MODE_2D:
1220		msg->body.decode.dt_tiling_mode = RUVD_TILE_8X8;
1221		msg->body.decode.dt_array_mode = RUVD_ARRAY_MODE_2D_THIN;
1222		break;
1223	default:
1224		assert(0);
1225		break;
1226	}
1227
1228	msg->body.decode.dt_luma_top_offset = texture_offset(luma, 0);
1229	if (chroma)
1230		msg->body.decode.dt_chroma_top_offset = texture_offset(chroma, 0);
1231	if (msg->body.decode.dt_field_mode) {
1232		msg->body.decode.dt_luma_bottom_offset = texture_offset(luma, 1);
1233		if (chroma)
1234			msg->body.decode.dt_chroma_bottom_offset = texture_offset(chroma, 1);
1235	} else {
1236		msg->body.decode.dt_luma_bottom_offset = msg->body.decode.dt_luma_top_offset;
1237		msg->body.decode.dt_chroma_bottom_offset = msg->body.decode.dt_chroma_top_offset;
1238	}
1239
1240	if (chroma) {
1241		assert(luma->u.legacy.bankw == chroma->u.legacy.bankw);
1242		assert(luma->u.legacy.bankh == chroma->u.legacy.bankh);
1243		assert(luma->u.legacy.mtilea == chroma->u.legacy.mtilea);
1244	}
1245
1246	msg->body.decode.dt_surf_tile_config |= RUVD_BANK_WIDTH(bank_wh(luma->u.legacy.bankw));
1247	msg->body.decode.dt_surf_tile_config |= RUVD_BANK_HEIGHT(bank_wh(luma->u.legacy.bankh));
1248	msg->body.decode.dt_surf_tile_config |= RUVD_MACRO_TILE_ASPECT_RATIO(macro_tile_aspect(luma->u.legacy.mtilea));
1249}
1250