xref: /device/qemu/drivers/virtio/virtgpu.c (revision d6aed566)
1/*
2 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16 * Simple virtio-mmio gpu driver, without hardware accelarator.
17 * Using only synchronous request/response, no IRQ.
18 */
19
20#include "osal.h"
21#include "osal_io.h"
22#include "hdf_device_desc.h"
23#include "fb.h"
24#include "los_vm_phys.h"
25#include "los_vm_iomap.h"
26#include "virtmmio.h"
27
28#define VIRTIO_GPU_F_EDID   (1 << 1)
29
30#define VIRTQ_CONTROL_QSZ   4
31#define VIRTQ_CURSOR_QSZ    2
32#define NORMAL_CMD_ENTRIES  2
33
34#define FB_WIDTH_DFT        800
35#define FB_HEIGHT_DFT       480
36#define GPU_DFT_RATE        (1000 / 30)    /* ms, 30Hz */
37#define PIXEL_BYTES         4
38
39#define RESOURCEID_FB      1
40
41enum VirtgpuCtrlType {
42    /* 2d commands */
43    VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100,
44    VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
45    VIRTIO_GPU_CMD_RESOURCE_UNREF,
46    VIRTIO_GPU_CMD_SET_SCANOUT,
47    VIRTIO_GPU_CMD_RESOURCE_FLUSH,
48    VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
49    VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
50    VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
51    VIRTIO_GPU_CMD_GET_CAPSET_INFO,
52    VIRTIO_GPU_CMD_GET_CAPSET,
53    VIRTIO_GPU_CMD_GET_EDID,
54    /* cursor commands */
55    VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
56    VIRTIO_GPU_CMD_MOVE_CURSOR,
57    /* success responses */
58    VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
59    VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
60    VIRTIO_GPU_RESP_OK_CAPSET_INFO,
61    VIRTIO_GPU_RESP_OK_CAPSET,
62    VIRTIO_GPU_RESP_OK_EDID,
63    /* error responses */
64    VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
65    VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
66    VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
67    VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
68    VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
69    VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
70};
71
72enum VirtgpuFormats {
73    VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1,
74    VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM,
75    VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM,
76    VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM,
77
78    VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67,
79    VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM,
80
81    VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121,
82    VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
83};
84
85struct VirtgpuCtrlHdr {
86    uint32_t type;
87#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
88    uint32_t flags;
89    uint64_t fenceId;
90    uint32_t ctxId;
91    uint32_t padding;
92};
93
94struct VirtgpuRect {
95    uint32_t x;
96    uint32_t y;
97    uint32_t width;
98    uint32_t height;
99};
100
101struct VirtgpuResourceFlush {
102    struct VirtgpuCtrlHdr hdr;
103    struct VirtgpuRect r;
104    uint32_t resourceId;
105    uint32_t padding;
106};
107
108struct VirtgpuTransferToHost2D {
109    struct VirtgpuCtrlHdr hdr;
110    struct VirtgpuRect r;
111    uint64_t offset;
112    uint32_t resourceId;
113    uint32_t padding;
114};
115
116struct Virtgpu {
117    struct VirtmmioDev      dev;
118    OSAL_DECLARE_TIMER(timer);          /* refresh timer */
119
120    struct VirtgpuRect      screen;
121    uint8_t                 *fb;        /* frame buffer */
122    bool                    edid;
123
124    /*
125     * Normal operations(timer refresh) request/response buffers.
126     * We do not wait for their completion, so they must be static memory.
127     * When an operation happened, the last one must already done.
128     * Response is shared and ignored.
129     *
130     * control queue 4 descs: 0-trans_req 1-trans_resp 2-flush_req 3-flush_resp
131     *                        0-... (30Hz is enough to avoid override)
132     */
133    struct VirtgpuResourceFlush     flushReq;
134    struct VirtgpuTransferToHost2D  transReq;
135    struct VirtgpuCtrlHdr           resp;
136};
137static struct Virtgpu *g_virtGpu;   /* fb module need this data, using global for simplicity */
138
139static const char *ErrString(int err)
140{
141    switch (err) {
142        case VIRTIO_GPU_RESP_ERR_UNSPEC: return "unspec";
143        case VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY: return "out of memory";
144        case VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID: return "invalid scanout ID";
145        case VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID: return "invalid resource ID";
146        case VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID: return "invalid context ID";
147        case VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER: return "invalid parameter";
148        default: break;
149    }
150    return "unknown error";
151}
152
153static bool Feature0(uint32_t features, uint32_t *supported, void *dev)
154{
155    struct Virtgpu *gpu = dev;
156
157    if (features & VIRTIO_GPU_F_EDID) {
158        *supported |= VIRTIO_GPU_F_EDID;
159        gpu->edid = true;
160    }
161
162    return true;
163}
164
165static bool Feature1(uint32_t features, uint32_t *supported, void *dev)
166{
167    (void)dev;
168    if (features & VIRTIO_F_VERSION_1) {
169        *supported |= VIRTIO_F_VERSION_1;
170    } else {
171        HDF_LOGE("[%s]virtio-gpu has no VERSION_1 feature", __func__);
172        return false;
173    }
174
175    return true;
176}
177
178static bool NotifyAndWaitResponse(unsigned queue, struct Virtq *q, const void *req, volatile void *resp)
179{
180    const struct VirtgpuCtrlHdr *a = req;
181    volatile struct VirtgpuCtrlHdr *b = resp;
182
183    /* always use desc[0] [1] ([2]) for request-wait-response */
184    q->avail->ring[q->avail->index % q->qsz] = 0;
185    DSB;
186    q->avail->index++;
187    OSAL_WRITEL(queue, g_virtGpu->dev.base + VIRTMMIO_REG_QUEUENOTIFY);
188
189    /* spin for response */
190    while ((q->last == q->used->index) ||
191           ((a->flags == VIRTIO_GPU_FLAG_FENCE) && (a->fenceId != b->fenceId))) {
192        DSB;
193    }
194    q->last++;
195
196    if ((b->type < VIRTIO_GPU_RESP_OK_NODATA) || (b->type > VIRTIO_GPU_RESP_OK_EDID)) {
197        HDF_LOGE("[%s]virtio-gpu command=0x%x error=0x%x: %s", __func__, a->type, b->type, ErrString(b->type));
198        return false;
199    }
200
201    return true;
202}
203
204static bool RequestResponse(unsigned queue, const void *req, size_t reqSize, volatile void *resp, size_t respSize)
205{
206    struct Virtq *q = &g_virtGpu->dev.vq[queue];
207    uint16_t idx = 0;
208
209    /* NOTE: We need these data physical continuous. They came from kernel stack, so they must. */
210    q->desc[idx].pAddr = VMM_TO_DMA_ADDR((VADDR_T)req);
211    q->desc[idx].len = reqSize;
212    q->desc[idx].flag = VIRTQ_DESC_F_NEXT;
213    q->desc[idx].next = idx + 1;
214    idx++;
215    q->desc[idx].pAddr = VMM_TO_DMA_ADDR((VADDR_T)resp);
216    q->desc[idx].len = respSize;
217    q->desc[idx].flag = VIRTQ_DESC_F_WRITE;
218
219    return NotifyAndWaitResponse(queue, q, req, resp);
220}
221
222static bool RequestDataResponse(const void *req, size_t reqSize, const void *data,
223                                size_t dataSize, volatile void *resp, size_t respSize)
224{
225    struct Virtq *q = &g_virtGpu->dev.vq[0];
226    uint16_t idx = 0;
227
228    q->desc[idx].pAddr = VMM_TO_DMA_ADDR((VADDR_T)req);
229    q->desc[idx].len = reqSize;
230    q->desc[idx].flag = VIRTQ_DESC_F_NEXT;
231    q->desc[idx].next = idx + 1;
232    idx++;
233    q->desc[idx].pAddr = VMM_TO_DMA_ADDR((VADDR_T)data);
234    q->desc[idx].len = dataSize;
235    q->desc[idx].flag = VIRTQ_DESC_F_NEXT;
236    q->desc[idx].next = idx + 1;
237    idx++;
238    q->desc[idx].pAddr = VMM_TO_DMA_ADDR((VADDR_T)resp);
239    q->desc[idx].len = respSize;
240    q->desc[idx].flag = VIRTQ_DESC_F_WRITE;
241
242    return NotifyAndWaitResponse(0, q, req, resp);
243}
244
245/* For normal display refresh, do not wait response */
246static void RequestNoResponse(unsigned queue, const void *req, size_t reqSize, bool notify)
247{
248    struct Virtq *q = &g_virtGpu->dev.vq[queue];
249    uint16_t head = q->last % q->qsz;   /* `last` record next writable desc entry for request */
250
251    /* QEMU is busy for the full queue, give up this request */
252    if (abs(q->avail->index - (volatile uint16_t)q->used->index) >= VIRTQ_CONTROL_QSZ) {
253        return;
254    }
255
256    /* other fields initiated by PopulateVirtQ */
257    q->desc[head].pAddr = VMM_TO_DMA_ADDR((VADDR_T)req);
258    q->desc[head].len = reqSize;
259    q->last += NORMAL_CMD_ENTRIES;
260
261    q->avail->ring[q->avail->index % q->qsz] = head;
262    DSB;
263    q->avail->index++;
264
265    if (notify) {
266        OSAL_WRITEL(queue, g_virtGpu->dev.base + VIRTMMIO_REG_QUEUENOTIFY);
267    }
268}
269
270#define VIRTIO_GPU_MAX_SCANOUTS 16
271struct VirtgpuRespDisplayInfo {
272    struct VirtgpuCtrlHdr hdr;
273    struct {
274        struct VirtgpuRect r;
275        uint32_t enabled;
276        uint32_t flags;
277    } pmodes[VIRTIO_GPU_MAX_SCANOUTS];
278};
279static void CMDGetDisplayInfo(void)
280{
281    struct VirtgpuCtrlHdr req = {
282        .type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO
283    };
284    struct VirtgpuRespDisplayInfo resp = { 0 };
285
286    if (!RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp))) {
287        goto DEFAULT;
288    }
289
290    if (resp.pmodes[0].enabled) {
291        g_virtGpu->screen = resp.pmodes[0].r;
292        return;
293    } else {
294        HDF_LOGE("[%s]scanout 0 not enabled", __func__);
295    }
296
297DEFAULT:
298    g_virtGpu->screen.x = g_virtGpu->screen.y = 0;
299    g_virtGpu->screen.width = FB_WIDTH_DFT;
300    g_virtGpu->screen.height = FB_HEIGHT_DFT;
301}
302
303/* reserved for future use */
304struct VirtgpuGetEdid {
305    struct VirtgpuCtrlHdr hdr;
306    uint32_t scanout;
307    uint32_t padding;
308};
309struct VirtgpuRespEdid {
310    struct VirtgpuCtrlHdr hdr;
311    uint32_t size;
312    uint32_t padding;
313    uint8_t edid[1024];
314};
315static void CMDGetEdid(void)
316{
317    struct VirtgpuGetEdid req = {
318        .hdr.type = VIRTIO_GPU_CMD_GET_EDID
319    };
320    struct VirtgpuRespEdid resp = { 0 };
321
322    if (!RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp))) {
323        goto DEFAULT;
324    }
325
326DEFAULT:
327    return;
328}
329
330struct VirtgpuResourceCreate2D {
331    struct VirtgpuCtrlHdr hdr;
332    uint32_t resourceId;
333    uint32_t format;
334    uint32_t width;
335    uint32_t height;
336};
337static bool CMDResourceCreate2D(uint32_t resourceId)
338{
339    struct VirtgpuResourceCreate2D req = {
340        .hdr.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
341        .resourceId = resourceId,
342        .format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, /* sRGB, byte order: RGBARGBA... */
343        .width = (resourceId == RESOURCEID_FB) ? g_virtGpu->screen.width : 0,
344        .height = (resourceId == RESOURCEID_FB) ? g_virtGpu->screen.height : 0
345    };
346    struct VirtgpuCtrlHdr resp = { 0 };
347
348    return RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp));
349}
350
351struct VirtgpuSetScanout {
352    struct VirtgpuCtrlHdr hdr;
353    struct VirtgpuRect r;
354    uint32_t scanoutId;
355    uint32_t resourceId;
356};
357static bool CMDSetScanout(const struct VirtgpuRect *r)
358{
359    struct VirtgpuSetScanout req = {
360        .hdr.type = VIRTIO_GPU_CMD_SET_SCANOUT,
361        .r = *r,
362        .resourceId = RESOURCEID_FB
363    };
364    struct VirtgpuCtrlHdr resp = { 0 };
365
366    return RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp));
367}
368
369static bool CMDTransferToHost(uint32_t resourceId, const struct VirtgpuRect *r)
370{
371    struct VirtgpuTransferToHost2D req = {
372        .hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
373        .hdr.flags = VIRTIO_GPU_FLAG_FENCE,
374        .hdr.fenceId = r->x + r->y + r->width + r->height,
375        .r = *r,
376        .resourceId = resourceId,
377    };
378    struct VirtgpuCtrlHdr resp = { 0 };
379
380    return RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp));
381}
382
383static bool CMDResourceFlush(void)
384{
385    struct VirtgpuResourceFlush req = {
386        .hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH,
387        .r = g_virtGpu->screen,
388        .resourceId = RESOURCEID_FB,
389    };
390    struct VirtgpuCtrlHdr resp = { 0 };
391
392    return RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp));
393}
394
395struct VirtgpuResourceAttachBacking {
396    struct VirtgpuCtrlHdr hdr;
397    uint32_t resourceId;
398    uint32_t nrEntries;
399};
400struct VirtgpuMemEntry {
401    uint64_t addr;
402    uint32_t length;
403    uint32_t padding;
404};                                  /* vaddr's physical address should be continuous */
405static bool CMDResourceAttachBacking(uint32_t resourceId, uint64_t vaddr, uint32_t len)
406{
407    struct VirtgpuResourceAttachBacking req = {
408        .hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
409        .resourceId = resourceId,
410        .nrEntries = 1
411    };
412    struct VirtgpuMemEntry data = {
413        .addr = VMM_TO_DMA_ADDR(vaddr),
414        .length = len,
415    };
416    struct VirtgpuCtrlHdr resp = { 0 };
417
418    return RequestDataResponse(&req, sizeof(req), &data, sizeof(data), &resp, sizeof(resp));
419}
420
421static void NormOpsRefresh(uintptr_t arg)
422{
423    (void)arg;
424    RequestNoResponse(0, &g_virtGpu->transReq, sizeof(g_virtGpu->transReq), false);
425    RequestNoResponse(0, &g_virtGpu->flushReq, sizeof(g_virtGpu->flushReq), true);
426}
427
428/* fit user-space page size mmap */
429static inline size_t VirtgpuFbPageSize(void)
430{
431    return ALIGN(g_virtGpu->screen.width * g_virtGpu->screen.height * PIXEL_BYTES, PAGE_SIZE);
432}
433
434static void PopulateVirtQ(void)
435{
436    struct Virtq *q = NULL;
437    int i, n;
438    uint16_t qsz;
439
440    for (n = 0; n < VIRTQ_NUM; n++) {
441        if (n) {
442            qsz = VIRTQ_CURSOR_QSZ;
443        } else {
444            qsz = VIRTQ_CONTROL_QSZ;
445        }
446        q = &g_virtGpu->dev.vq[n];
447
448        for (i = 0; i < qsz; i += NORMAL_CMD_ENTRIES) {
449            q->desc[i].flag = VIRTQ_DESC_F_NEXT;
450            q->desc[i].next = i + 1;
451            q->desc[i + 1].pAddr = VMM_TO_DMA_ADDR((VADDR_T)&g_virtGpu->resp);
452            q->desc[i + 1].len = sizeof(g_virtGpu->resp);
453            q->desc[i + 1].flag = VIRTQ_DESC_F_WRITE;
454        }
455        /* change usage to record our next writable index */
456        q->last = 0;
457    }
458
459    g_virtGpu->transReq.hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D;
460    g_virtGpu->transReq.r = g_virtGpu->screen;
461    g_virtGpu->transReq.resourceId = RESOURCEID_FB;
462
463    g_virtGpu->flushReq.hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH;
464    g_virtGpu->flushReq.r = g_virtGpu->screen;
465    g_virtGpu->flushReq.resourceId = RESOURCEID_FB;
466}
467
468static bool VirtgpuBeginNormDisplay(void)
469{
470    int32_t ret;
471
472    if (!CMDTransferToHost(RESOURCEID_FB, &g_virtGpu->screen)) {
473        return false;
474    }
475    if (!CMDResourceFlush()) {
476        return false;
477    }
478
479    /* now we can fix queue entries to avoid redundant when do normal OPs */
480    PopulateVirtQ();
481
482    if ((ret = OsalTimerStartLoop(&g_virtGpu->timer)) != HDF_SUCCESS) {
483        HDF_LOGE("[%s]start timer failed: %d\n", __func__, ret);
484        return false;
485    }
486    return true;
487}
488
489/* unified DeInit for InitDev, HDF and fb */
490static void VirtgpuDeInit(struct Virtgpu *gpu)
491{
492    if (gpu->timer.realTimer) {
493        OsalTimerDelete(&gpu->timer);
494    }
495    if (gpu->fb) {
496        LOS_PhysPagesFreeContiguous(gpu->fb, VirtgpuFbPageSize() / PAGE_SIZE);
497    }
498    LOS_DmaMemFree(gpu);
499    g_virtGpu = NULL;
500}
501
502static struct Virtgpu *VirtgpuInitDev(void)
503{
504    struct Virtgpu *gpu = NULL;
505    VADDR_T base;
506    uint16_t qsz[VIRTQ_NUM];
507    int32_t ret, len;
508
509    /* NOTE: For simplicity, alloc all these data from physical continuous memory. */
510    len = sizeof(struct Virtgpu) + VirtqSize(VIRTQ_CONTROL_QSZ) + VirtqSize(VIRTQ_CURSOR_QSZ);
511    gpu = LOS_DmaMemAlloc(NULL, len, sizeof(void *), DMA_CACHE);
512    if (gpu == NULL) {
513        HDF_LOGE("[%s]alloc gpu memory failed", __func__);
514        return NULL;
515    }
516
517    if (!VirtmmioDiscover(VIRTMMIO_DEVICE_ID_GPU, &gpu->dev)) {
518        goto ERR_OUT;
519    }
520
521    VirtmmioInitBegin(&gpu->dev);
522
523    if (!VirtmmioNegotiate(&gpu->dev, Feature0, Feature1, gpu)) {
524        goto ERR_OUT1;
525    }
526
527    base = ALIGN((VADDR_T)gpu + sizeof(struct Virtgpu), VIRTQ_ALIGN_DESC);
528    qsz[0] = VIRTQ_CONTROL_QSZ;
529    qsz[1] = VIRTQ_CURSOR_QSZ;
530    if (VirtmmioConfigQueue(&gpu->dev, base, qsz, VIRTQ_NUM) == 0) {
531        goto ERR_OUT1;
532    }
533
534    /* framebuffer can be modified at any time, so we need a full screen refresh timer */
535    ret = OsalTimerCreate(&gpu->timer, GPU_DFT_RATE, NormOpsRefresh, 0);
536    if (ret != HDF_SUCCESS) {
537        HDF_LOGE("[%s]create timer failed: %d", __func__, ret);
538        goto ERR_OUT1;
539    }
540
541    for (int i = 0; i < VIRTQ_NUM; i++) {   /* hint device not using IRQ */
542        gpu->dev.vq[i].avail->flag = VIRTQ_AVAIL_F_NO_INTERRUPT;
543    }
544
545    VritmmioInitEnd(&gpu->dev);             /* now virt queue can be used */
546    return gpu;
547
548ERR_OUT1:
549    VirtmmioInitFailed(&gpu->dev);
550ERR_OUT:
551    VirtgpuDeInit(gpu);
552    return NULL;
553}
554
555static int32_t HdfVirtgpuInit(struct HdfDeviceObject *device)
556{
557    int32_t ret;
558
559    if (device == NULL) {
560        HDF_LOGE("[%s]device is null", __func__);
561        return HDF_ERR_INVALID_PARAM;
562    }
563
564    g_virtGpu = VirtgpuInitDev();
565    if (g_virtGpu == NULL) {
566        return HDF_FAILURE;
567    }
568    device->priv = g_virtGpu;
569
570    /* frame buffer resource are initiated here, using virt queue mechanism */
571    if ((ret = fb_register(0, 0)) != 0) {
572        HDF_LOGE("[%s]framebuffer register failed: %d", __func__, ret);
573        return HDF_FAILURE;
574    }
575
576    if (!VirtgpuBeginNormDisplay()) {
577        return HDF_FAILURE;
578    }
579
580    return HDF_SUCCESS;
581}
582
583static void HdfVirtgpuRelease(struct HdfDeviceObject *deviceObject)
584{
585    if (deviceObject) {
586        if (deviceObject->priv) {
587            VirtgpuDeInit(deviceObject->priv);
588        }
589    }
590}
591
592struct HdfDriverEntry g_virtGpuEntry = {
593    .moduleVersion = 1,
594    .moduleName = "HDF_VIRTIO_GPU",
595    .Init = HdfVirtgpuInit,
596    .Release = HdfVirtgpuRelease,
597};
598
599HDF_INIT(g_virtGpuEntry);
600
601
602/*
603 * video/fb.h interface implementation
604 */
605
606static bool VirtgpuInitResourceHelper(uint32_t resourceId)
607{
608    uint64_t va;
609    uint32_t len, w, h;
610
611    if (!CMDResourceCreate2D(resourceId)) {
612        return false;
613    }
614
615    if (resourceId == RESOURCEID_FB) {
616        va = (uint64_t)g_virtGpu->fb;
617        w = g_virtGpu->screen.width;
618        h = g_virtGpu->screen.height;
619    } else {
620        HDF_LOGE("[%s]error resource ID: %u", __func__, resourceId);
621        return false;
622    }
623    len = w * h * PIXEL_BYTES;
624    if (!CMDResourceAttachBacking(resourceId, va, len)) {
625        return false;
626    }
627
628    if (resourceId == RESOURCEID_FB) {
629        struct VirtgpuRect r = { 0, 0, w, h };
630        return CMDSetScanout(&r);
631    }
632    return true;
633}
634
635static bool VirtgpuInitResource(void)
636{
637    /* Framebuffer must be physical continuous. fb_register will zero the buffer */
638    g_virtGpu->fb = LOS_PhysPagesAllocContiguous(VirtgpuFbPageSize() / PAGE_SIZE);
639    if (g_virtGpu->fb == NULL) {
640        HDF_LOGE("[%s]alloc framebuffer memory fail", __func__);
641        return false;
642    }
643    if (!VirtgpuInitResourceHelper(RESOURCEID_FB)) {
644        return false;
645    }
646
647    return true;
648}
649
650int up_fbinitialize(int display)
651{
652    if (display != 0) {
653        return -1;
654    }
655
656    CMDGetDisplayInfo();
657    if (g_virtGpu->edid) {
658        CMDGetEdid();
659    }
660
661    if (!VirtgpuInitResource()) {
662        return -1;
663    }
664
665    return 0;
666}
667
668static int FbGetVideoInfo(struct fb_vtable_s *vtable, struct fb_videoinfo_s *vinfo)
669{
670    (void)vtable;
671    vinfo->fmt = FB_FMT_RGB32;  /* sRGB */
672    vinfo->xres = g_virtGpu->screen.width;
673    vinfo->yres = g_virtGpu->screen.height;
674    vinfo->nplanes = 1;
675    return 0;
676}
677
678#define BYTE_BITS   8
679static int FbGetPlaneInfo(struct fb_vtable_s *vtable, int planeno, struct fb_planeinfo_s *pinfo)
680{
681    if (planeno != 0) {
682        return -1;
683    }
684    (void)vtable;
685
686    pinfo->fbmem = g_virtGpu->fb;
687    pinfo->stride = g_virtGpu->screen.width * PIXEL_BYTES;
688    pinfo->fblen = pinfo->stride * g_virtGpu->screen.height;
689    pinfo->display = 0;
690    pinfo->bpp = PIXEL_BYTES * BYTE_BITS;
691    return 0;
692}
693
694#ifdef CONFIG_FB_OVERLAY
695static int FbGetOverlayInfo(struct fb_vtable_s *v, int overlayno, struct fb_overlayinfo_s *info)
696{
697    (void)v;
698    if (overlayno != 0) {
699        return -1;
700    }
701
702    info->fbmem = g_virtGpu->fb;
703    info->memphys = (void *)VMM_TO_DMA_ADDR((VADDR_T)g_virtGpu->fb);
704    info->stride = g_virtGpu->screen.width * PIXEL_BYTES;
705    info->fblen = info->stride * g_virtGpu->screen.height;
706    info->overlay = 0;
707    info->bpp = PIXEL_BYTES * BYTE_BITS;
708    info->accl = 0;
709    return 0;
710}
711#endif
712
713/* expect windows manager deal with concurrent access */
714static int FbOpen(struct fb_vtable_s *vtable)
715{
716    (void)vtable;
717    return 0;
718}
719
720static int FbRelease(struct fb_vtable_s *vtable)
721{
722    (void)vtable;
723    return 0;
724}
725
726static ssize_t FbMmap(struct fb_vtable_s *vtable, LosVmMapRegion *region)
727{
728    (void)vtable;
729    int n;
730
731    if ((region->range.size + (region->pgOff << PAGE_SHIFT)) > VirtgpuFbPageSize()) {
732        HDF_LOGE("[%s]mmap size + pgOff exceed framebuffer size", __func__);
733        return -1;
734    }
735    if (region->regionFlags & VM_MAP_REGION_FLAG_PERM_EXECUTE) {
736        HDF_LOGE("[%s]cannot set execute flag", __func__);
737        return -1;
738    }
739
740    region->regionFlags |= VM_MAP_REGION_FLAG_UNCACHED;
741    n = LOS_ArchMmuMap(&region->space->archMmu, region->range.base,
742                        VMM_TO_DMA_ADDR((VADDR_T)g_virtGpu->fb + (region->pgOff << PAGE_SHIFT)),
743                        region->range.size >> PAGE_SHIFT, region->regionFlags);
744    if (n != (region->range.size >> PAGE_SHIFT)) {
745        HDF_LOGE("[%s]mmu map error: %d", __func__, n);
746        return -1;
747    }
748
749    return 0;
750}
751
752/* used to happy video/fb.h configure */
753static int FbDummy(struct fb_vtable_s *v, int *s)
754{
755    (void)v;
756    (void)s;
757    HDF_LOGE("[%s]unsupported method", __func__);
758    return -1;
759}
760
761static struct fb_vtable_s g_virtGpuFbOps = {
762    .getvideoinfo = FbGetVideoInfo,
763    .getplaneinfo = FbGetPlaneInfo,
764    .fb_open = FbOpen,
765    .fb_release = FbRelease,
766#ifdef CONFIG_FB_CMAP
767    .getcmap = (int (*)(struct fb_vtable_s *, struct fb_cmap_s *))FbDummy,
768    .putcmap = (int (*)(struct fb_vtable_s *, const struct fb_cmap_s *))FbDummy,
769#endif
770#ifdef CONFIG_FB_OVERLAY
771    .getoverlayinfo = FbGetOverlayInfo,
772    .settransp = (int (*)(struct fb_vtable_s *, const struct fb_overlayinfo_s *))FbDummy,
773    .setchromakey = (int (*)(struct fb_vtable_s *, const struct fb_overlayinfo_s *))FbDummy,
774    .setcolor = (int (*)(struct fb_vtable_s *, const struct fb_overlayinfo_s *))FbDummy,
775    .setblank = (int (*)(struct fb_vtable_s *, const struct fb_overlayinfo_s *))FbDummy,
776    .setarea = (int (*)(struct fb_vtable_s *, const struct fb_overlayinfo_s *))FbDummy,
777# ifdef CONFIG_FB_OVERLAY_BLIT
778    .blit = (int (*)(struct fb_vtable_s *, const struct fb_overlayblit_s *))FbDummy,
779    .blend = (int (*)(struct fb_vtable_s *, const struct fb_overlayblend_s *))FbDummy,
780# endif
781    .fb_pan_display = (int (*)(struct fb_vtable_s *, struct fb_overlayinfo_s *))FbDummy,
782#endif
783    .fb_mmap = FbMmap
784};
785
786struct fb_vtable_s *up_fbgetvplane(int display, int vplane)
787{
788    if ((display != 0) || (vplane != 0)) {
789        return NULL;
790    }
791    return &g_virtGpuFbOps;
792}
793
794void up_fbuninitialize(int display)
795{
796    if (display != 0) {
797        return;
798    }
799
800    if (g_virtGpu) {
801        VirtgpuDeInit(g_virtGpu);
802    }
803}
804
805uint32_t VirtgpuGetXres(void)
806{
807    return g_virtGpu->screen.width;
808}
809
810uint32_t VirtgpuGetYres(void)
811{
812    return g_virtGpu->screen.height;
813}