Lines Matching refs:enc
80 static struct amdgpu_vce_encode enc;
172 memset(&enc, 0, sizeof(struct amdgpu_vce_encode));
292 enc.width = vce_create[6];
293 enc.height = vce_create[7];
296 alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
297 resources[num_resources++] = enc.fb[0].handle;
306 ib_cpu[len + 8] = ALIGN(enc.width, align);
307 ib_cpu[len + 9] = ALIGN(enc.width, align);
316 ib_cpu[len + 2] = enc.fb[0].addr >> 32;
317 ib_cpu[len + 3] = enc.fb[0].addr;
323 free_resource(&enc.fb[0]);
353 static void amdgpu_cs_vce_encode_idr(struct amdgpu_vce_encode *enc)
358 unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
361 luma_offset = enc->vbuf.addr;
369 ib_cpu[len + 2] = enc->bs[0].addr >> 32;
370 ib_cpu[len + 3] = enc->bs[0].addr;
373 ib_cpu[len + 2] = enc->cpb.addr >> 32;
374 ib_cpu[len + 3] = enc->cpb.addr;
383 ib_cpu[len + 2] = enc->fb[0].addr >> 32;
384 ib_cpu[len + 3] = enc->fb[0].addr;
391 ib_cpu[len + 14] = ALIGN(enc->width, align);
392 ib_cpu[len + 15] = ALIGN(enc->width, align);
396 enc->ib_len = len;
397 if (!enc->two_instance) {
403 static void amdgpu_cs_vce_encode_p(struct amdgpu_vce_encode *enc)
408 unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
410 len = (enc->two_instance) ? enc->ib_len : 0;
411 luma_offset = enc->vbuf.addr;
414 if (!enc->two_instance) {
421 ib_cpu[len + 2] = enc->bs[1].addr >> 32;
422 ib_cpu[len + 3] = enc->bs[1].addr;
425 ib_cpu[len + 2] = enc->cpb.addr >> 32;
426 ib_cpu[len + 3] = enc->cpb.addr;
435 ib_cpu[len + 2] = enc->fb[1].addr >> 32;
436 ib_cpu[len + 3] = enc->fb[1].addr;
444 ib_cpu[len + 14] = ALIGN(enc->width, align);
445 ib_cpu[len + 15] = ALIGN(enc->width, align);
463 static void check_result(struct amdgpu_vce_encode *enc)
471 r = amdgpu_bo_cpu_map(enc->fb[i].handle, (void **)&enc->fb[i].ptr);
473 ptr = (uint32_t *)enc->fb[i].ptr;
475 r = amdgpu_bo_cpu_unmap(enc->fb[i].handle);
477 r = amdgpu_bo_cpu_map(enc->bs[i].handle, (void **)&enc->bs[i].ptr);
480 sum += enc->bs[i].ptr[j];
482 r = amdgpu_bo_cpu_unmap(enc->bs[i].handle);
493 vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
496 alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
497 resources[num_resources++] = enc.fb[0].handle;
498 alloc_resource(&enc.fb[1], 4096, AMDGPU_GEM_DOMAIN_GTT);
499 resources[num_resources++] = enc.fb[1].handle;
500 alloc_resource(&enc.bs[0], bs_size, AMDGPU_GEM_DOMAIN_GTT);
501 resources[num_resources++] = enc.bs[0].handle;
502 alloc_resource(&enc.bs[1], bs_size, AMDGPU_GEM_DOMAIN_GTT);
503 resources[num_resources++] = enc.bs[1].handle;
504 alloc_resource(&enc.vbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
505 resources[num_resources++] = enc.vbuf.handle;
506 alloc_resource(&enc.cpb, cpb_size, AMDGPU_GEM_DOMAIN_VRAM);
507 resources[num_resources++] = enc.cpb.handle;
510 r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
513 memset(enc.vbuf.ptr, 0, vbuf_size);
514 for (i = 0; i < enc.height; ++i) {
515 memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
516 enc.vbuf.ptr += ALIGN(enc.width, align);
518 for (i = 0; i < enc.height / 2; ++i) {
519 memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
520 enc.vbuf.ptr += ALIGN(enc.width, align);
523 r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
530 amdgpu_cs_vce_encode_idr(&enc);
531 amdgpu_cs_vce_encode_p(&enc);
532 check_result(&enc);
536 amdgpu_cs_vce_encode_idr(&enc);
537 amdgpu_cs_vce_encode_p(&enc);
538 check_result(&enc);
542 enc.two_instance = true;
545 amdgpu_cs_vce_encode_idr(&enc);
548 amdgpu_cs_vce_encode_p(&enc);
549 check_result(&enc);
554 amdgpu_cs_vce_encode_idr(&enc);
555 amdgpu_cs_vce_encode_p(&enc);
556 check_result(&enc);
559 free_resource(&enc.fb[0]);
560 free_resource(&enc.fb[1]);
561 free_resource(&enc.bs[0]);
562 free_resource(&enc.bs[1]);
563 free_resource(&enc.vbuf);
564 free_resource(&enc.cpb);
567 static void amdgpu_cs_vce_mv(struct amdgpu_vce_encode *enc)
572 unsigned luma_size = ALIGN(enc->width, align) * ALIGN(enc->height, 16);
575 luma_offset = enc->vbuf.addr;
577 mv_ref_luma_offset = enc->mvrefbuf.addr;
584 ib_cpu[len + 2] = enc->bs[0].addr >> 32;
585 ib_cpu[len + 3] = enc->bs[0].addr;
588 ib_cpu[len + 2] = enc->cpb.addr >> 32;
589 ib_cpu[len + 3] = enc->cpb.addr;
598 ib_cpu[len + 2] = enc->fb[0].addr >> 32;
599 ib_cpu[len + 3] = enc->fb[0].addr;
604 ib_cpu[len + 4] = ALIGN(enc->width, align);
605 ib_cpu[len + 5] = ALIGN(enc->width, align);
607 ib_cpu[len + 7] = enc->mvb.addr >> 32;
608 ib_cpu[len + 8] = enc->mvb.addr;
618 ib_cpu[len + 13] = ALIGN(enc->height, 16);;
619 ib_cpu[len + 14] = ALIGN(enc->width, align);
620 ib_cpu[len + 15] = ALIGN(enc->width, align);
644 enc->ib_len = len;
649 static void check_mv_result(struct amdgpu_vce_encode *enc)
655 r = amdgpu_bo_cpu_map(enc->fb[0].handle, (void **)&enc->fb[0].ptr);
657 r = amdgpu_bo_cpu_unmap(enc->fb[0].handle);
659 r = amdgpu_bo_cpu_map(enc->mvb.handle, (void **)&enc->mvb.ptr);
661 for (j = 0, sum = 0; j < enc->mvbuf_size; ++j)
662 sum += enc->mvb.ptr[j];
664 r = amdgpu_bo_cpu_unmap(enc->mvb.handle);
674 vbuf_size = ALIGN(enc.width, align) * ALIGN(enc.height, 16) * 1.5;
675 enc.mvbuf_size = ALIGN(enc.width, 16) * ALIGN(enc.height, 16) / 8;
678 alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
679 resources[num_resources++] = enc.fb[0].handle;
680 alloc_resource(&enc.bs[0], bs_size, AMDGPU_GEM_DOMAIN_GTT);
681 resources[num_resources++] = enc.bs[0].handle;
682 alloc_resource(&enc.mvb, enc.mvbuf_size, AMDGPU_GEM_DOMAIN_GTT);
683 resources[num_resources++] = enc.mvb.handle;
684 alloc_resource(&enc.vbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
685 resources[num_resources++] = enc.vbuf.handle;
686 alloc_resource(&enc.mvrefbuf, vbuf_size, AMDGPU_GEM_DOMAIN_VRAM);
687 resources[num_resources++] = enc.mvrefbuf.handle;
688 alloc_resource(&enc.cpb, cpb_size, AMDGPU_GEM_DOMAIN_VRAM);
689 resources[num_resources++] = enc.cpb.handle;
692 r = amdgpu_bo_cpu_map(enc.vbuf.handle, (void **)&enc.vbuf.ptr);
695 memset(enc.vbuf.ptr, 0, vbuf_size);
696 for (i = 0; i < enc.height; ++i) {
697 memcpy(enc.vbuf.ptr, (frame + i * enc.width), enc.width);
698 enc.vbuf.ptr += ALIGN(enc.width, align);
700 for (i = 0; i < enc.height / 2; ++i) {
701 memcpy(enc.vbuf.ptr, ((frame + enc.height * enc.width) + i * enc.width), enc.width);
702 enc.vbuf.ptr += ALIGN(enc.width, align);
705 r = amdgpu_bo_cpu_unmap(enc.vbuf.handle);
708 r = amdgpu_bo_cpu_map(enc.mvrefbuf.handle, (void **)&enc.mvrefbuf.ptr);
711 memset(enc.mvrefbuf.ptr, 0, vbuf_size);
712 for (i = 0; i < enc.height; ++i) {
713 memcpy(enc.mvrefbuf.ptr, (frame + (enc.height - i -1) * enc.width), enc.width);
714 enc.mvrefbuf.ptr += ALIGN(enc.width, align);
716 for (i = 0; i < enc.height / 2; ++i) {
717 memcpy(enc.mvrefbuf.ptr,
718 ((frame + enc.height * enc.width) + (enc.height / 2 - i -1) * enc.width), enc.width);
719 enc.mvrefbuf.ptr += ALIGN(enc.width, align);
722 r = amdgpu_bo_cpu_unmap(enc.mvrefbuf.handle);
728 amdgpu_cs_vce_mv(&enc);
729 check_mv_result(&enc);
731 free_resource(&enc.fb[0]);
732 free_resource(&enc.bs[0]);
733 free_resource(&enc.vbuf);
734 free_resource(&enc.cpb);
735 free_resource(&enc.mvrefbuf);
736 free_resource(&enc.mvb);
744 alloc_resource(&enc.fb[0], 4096, AMDGPU_GEM_DOMAIN_GTT);
745 resources[num_resources++] = enc.fb[0].handle;
755 ib_cpu[len + 2] = enc.fb[0].addr >> 32;
756 ib_cpu[len + 3] = enc.fb[0].addr;
764 free_resource(&enc.fb[0]);