1 /*
2  * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /*
16  * Simple virtio-mmio gpu driver, without hardware accelarator.
17  * Using only synchronous request/response, no IRQ.
18  */
19 
20 #include "osal.h"
21 #include "osal_io.h"
22 #include "hdf_device_desc.h"
23 #include "securec.h"
24 #include "los_compiler.h"
25 #include "los_memory.h"
26 #include "fb.h"
27 #include "virtmmio.h"
28 
29 #define VIRTIO_GPU_F_EDID   (1 << 1)
30 
31 #define VIRTQ_CONTROL_QSZ   4
32 #define VIRTQ_CURSOR_QSZ    2
33 #define NORMAL_CMD_ENTRIES  2
34 
35 #define FB_WIDTH_DFT        800
36 #define FB_HEIGHT_DFT       480
37 #define GPU_DFT_RATE        (1000 / 30)    /* ms, 30Hz */
38 #define PIXEL_BYTES         4
39 
40 #define RESOURCEID_FB      1
41 
42 enum VirtgpuCtrlType {
43     /* 2d commands */
44     VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100,
45     VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
46     VIRTIO_GPU_CMD_RESOURCE_UNREF,
47     VIRTIO_GPU_CMD_SET_SCANOUT,
48     VIRTIO_GPU_CMD_RESOURCE_FLUSH,
49     VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
50     VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
51     VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
52     VIRTIO_GPU_CMD_GET_CAPSET_INFO,
53     VIRTIO_GPU_CMD_GET_CAPSET,
54     VIRTIO_GPU_CMD_GET_EDID,
55     /* cursor commands */
56     VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
57     VIRTIO_GPU_CMD_MOVE_CURSOR,
58     /* success responses */
59     VIRTIO_GPU_RESP_OK_NODATA = 0x1100,
60     VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
61     VIRTIO_GPU_RESP_OK_CAPSET_INFO,
62     VIRTIO_GPU_RESP_OK_CAPSET,
63     VIRTIO_GPU_RESP_OK_EDID,
64     /* error responses */
65     VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
66     VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY,
67     VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID,
68     VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID,
69     VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID,
70     VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
71 };
72 
73 enum VirtgpuFormats {
74     VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1,
75     VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM,
76     VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM,
77     VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM,
78 
79     VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67,
80     VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM,
81 
82     VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121,
83     VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134,
84 };
85 
86 struct VirtgpuCtrlHdr {
87     uint32_t type;
88 #define VIRTIO_GPU_FLAG_FENCE (1 << 0)
89     uint32_t flags;
90     uint64_t fenceId;
91     uint32_t ctxId;
92     uint32_t padding;
93 };
94 
95 struct VirtgpuRect {
96     uint32_t x;
97     uint32_t y;
98     uint32_t width;
99     uint32_t height;
100 };
101 
102 struct VirtgpuResourceFlush {
103     struct VirtgpuCtrlHdr hdr;
104     struct VirtgpuRect r;
105     uint32_t resourceId;
106     uint32_t padding;
107 };
108 
109 struct VirtgpuTransferToHost2D {
110     struct VirtgpuCtrlHdr hdr;
111     struct VirtgpuRect r;
112     uint64_t offset;
113     uint32_t resourceId;
114     uint32_t padding;
115 };
116 
117 struct Virtgpu {
118     struct VirtmmioDev      dev;
119     OSAL_DECLARE_TIMER(timer);          /* refresh timer */
120 
121     struct VirtgpuRect      screen;
122     uint8_t                 *fb;        /* frame buffer */
123     bool                    edid;
124 
125     /*
126      * Normal operations(timer refresh) request/response buffers.
127      * We do not wait for their completion, so they must be static memory.
128      * When an operation happened, the last one must already done.
129      * Response is shared and ignored.
130      *
131      * control queue 4 descs: 0-trans_req 1-trans_resp 2-flush_req 3-flush_resp
132      *                        0-... (30Hz is enough to avoid override)
133      */
134     struct VirtgpuResourceFlush     flushReq;
135     struct VirtgpuTransferToHost2D  transReq;
136     struct VirtgpuCtrlHdr           resp;
137 };
138 static struct Virtgpu *g_virtGpu;   /* fb module need this data, using global for simplicity */
139 
ErrString(int err)140 static const char *ErrString(int err)
141 {
142     switch (err) {
143         case VIRTIO_GPU_RESP_ERR_UNSPEC: return "unspec";
144         case VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY: return "out of memory";
145         case VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID: return "invalid scanout ID";
146         case VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID: return "invalid resource ID";
147         case VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID: return "invalid context ID";
148         case VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER: return "invalid parameter";
149         default: break;
150     }
151     return "unknown error";
152 }
153 
Feature0(uint32_t features, uint32_t *supported, void *dev)154 static bool Feature0(uint32_t features, uint32_t *supported, void *dev)
155 {
156     struct Virtgpu *gpu = dev;
157 
158     if (features & VIRTIO_GPU_F_EDID) {
159         *supported |= VIRTIO_GPU_F_EDID;
160         gpu->edid = true;
161     }
162 
163     return true;
164 }
165 
Feature1(uint32_t features, uint32_t *supported, void *dev)166 static bool Feature1(uint32_t features, uint32_t *supported, void *dev)
167 {
168     (void)dev;
169     if (features & VIRTIO_F_VERSION_1) {
170         *supported |= VIRTIO_F_VERSION_1;
171     } else {
172         HDF_LOGE("[%s]virtio-gpu has no VERSION_1 feature", __func__);
173         return false;
174     }
175 
176     return true;
177 }
178 
NotifyAndWaitResponse(unsigned queue, struct Virtq *q, const void *req, volatile void *resp)179 static bool NotifyAndWaitResponse(unsigned queue, struct Virtq *q, const void *req, volatile void *resp)
180 {
181     const struct VirtgpuCtrlHdr *a = req;
182     volatile struct VirtgpuCtrlHdr *b = resp;
183 
184     /* always use desc[0] [1] ([2]) for request-wait-response */
185     q->avail->ring[q->avail->index % q->qsz] = 0;
186     DSB;
187     q->avail->index++;
188     OSAL_WRITEL(queue, g_virtGpu->dev.base + VIRTMMIO_REG_QUEUENOTIFY);
189 
190     /* spin for response */
191     while ((q->last == q->used->index) ||
192            ((a->flags == VIRTIO_GPU_FLAG_FENCE) && (a->fenceId != b->fenceId))) {
193         DSB;
194     }
195     q->last++;
196 
197     if ((b->type < VIRTIO_GPU_RESP_OK_NODATA) || (b->type > VIRTIO_GPU_RESP_OK_EDID)) {
198         HDF_LOGE("[%s]virtio-gpu command=0x%x error=0x%x: %s", __func__, a->type, b->type, ErrString(b->type));
199         return false;
200     }
201 
202     return true;
203 }
204 
RequestResponse(unsigned queue, const void *req, size_t reqSize, volatile void *resp, size_t respSize)205 static bool RequestResponse(unsigned queue, const void *req, size_t reqSize, volatile void *resp, size_t respSize)
206 {
207     struct Virtq *q = &g_virtGpu->dev.vq[queue];
208     uint16_t idx = 0;
209 
210     /* NOTE: We need these data physical continuous. They came from kernel stack, so they must. */
211     q->desc[idx].pAddr = u32_to_u64(VMM_TO_DMA_ADDR((VADDR_T)req));
212     q->desc[idx].len = reqSize;
213     q->desc[idx].flag = VIRTQ_DESC_F_NEXT;
214     q->desc[idx].next = idx + 1;
215     idx++;
216     q->desc[idx].pAddr = u32_to_u64(VMM_TO_DMA_ADDR((VADDR_T)resp));
217     q->desc[idx].len = respSize;
218     q->desc[idx].flag = VIRTQ_DESC_F_WRITE;
219 
220     return NotifyAndWaitResponse(queue, q, req, resp);
221 }
222 
RequestDataResponse(const void *req, size_t reqSize, const void *data, size_t dataSize, volatile void *resp, size_t respSize)223 static bool RequestDataResponse(const void *req, size_t reqSize, const void *data,
224                                 size_t dataSize, volatile void *resp, size_t respSize)
225 {
226     struct Virtq *q = &g_virtGpu->dev.vq[0];
227     uint16_t idx = 0;
228 
229     q->desc[idx].pAddr = u32_to_u64(VMM_TO_DMA_ADDR((VADDR_T)req));
230     q->desc[idx].len = reqSize;
231     q->desc[idx].flag = VIRTQ_DESC_F_NEXT;
232     q->desc[idx].next = idx + 1;
233     idx++;
234     q->desc[idx].pAddr = u32_to_u64(VMM_TO_DMA_ADDR((VADDR_T)data));
235     q->desc[idx].len = dataSize;
236     q->desc[idx].flag = VIRTQ_DESC_F_NEXT;
237     q->desc[idx].next = idx + 1;
238     idx++;
239     q->desc[idx].pAddr = u32_to_u64(VMM_TO_DMA_ADDR((VADDR_T)resp));
240     q->desc[idx].len = respSize;
241     q->desc[idx].flag = VIRTQ_DESC_F_WRITE;
242 
243     return NotifyAndWaitResponse(0, q, req, resp);
244 }
245 
246 /* For normal display refresh, do not wait response */
RequestNoResponse(unsigned queue, const void *req, size_t reqSize, bool notify)247 static void RequestNoResponse(unsigned queue, const void *req, size_t reqSize, bool notify)
248 {
249     struct Virtq *q = &g_virtGpu->dev.vq[queue];
250     uint16_t head = q->last % q->qsz;   /* `last` record next writable desc entry for request */
251 
252     /* QEMU is busy for the full queue, give up this request */
253     if (abs(q->avail->index - (volatile uint16_t)q->used->index) >= VIRTQ_CONTROL_QSZ) {
254         return;
255     }
256 
257     /* other fields initiated by PopulateVirtQ */
258     q->desc[head].pAddr = u32_to_u64(VMM_TO_DMA_ADDR((VADDR_T)req));
259     q->desc[head].len = reqSize;
260     q->last += NORMAL_CMD_ENTRIES;
261 
262     q->avail->ring[q->avail->index % q->qsz] = head;
263     DSB;
264     q->avail->index++;
265 
266     if (notify) {
267         OSAL_WRITEL(queue, g_virtGpu->dev.base + VIRTMMIO_REG_QUEUENOTIFY);
268     }
269 }
270 
271 #define VIRTIO_GPU_MAX_SCANOUTS 16
272 struct VirtgpuRespDisplayInfo {
273     struct VirtgpuCtrlHdr hdr;
274     struct {
275         struct VirtgpuRect r;
276         uint32_t enabled;
277         uint32_t flags;
278     } pmodes[VIRTIO_GPU_MAX_SCANOUTS];
279 };
CMDGetDisplayInfo(void)280 static void CMDGetDisplayInfo(void)
281 {
282     struct VirtgpuCtrlHdr req = {
283         .type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO
284     };
285     struct VirtgpuRespDisplayInfo resp = { 0 };
286 
287     if (!RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp))) {
288         goto DEFAULT;
289     }
290 
291     if (resp.pmodes[0].enabled) {
292         g_virtGpu->screen = resp.pmodes[0].r;
293         return;
294     } else {
295         HDF_LOGE("[%s]scanout 0 not enabled", __func__);
296     }
297 
298 DEFAULT:
299     g_virtGpu->screen.x = g_virtGpu->screen.y = 0;
300     g_virtGpu->screen.width = FB_WIDTH_DFT;
301     g_virtGpu->screen.height = FB_HEIGHT_DFT;
302 }
303 
304 /* reserved for future use */
305 struct VirtgpuGetEdid {
306     struct VirtgpuCtrlHdr hdr;
307     uint32_t scanout;
308     uint32_t padding;
309 };
310 struct VirtgpuRespEdid {
311     struct VirtgpuCtrlHdr hdr;
312     uint32_t size;
313     uint32_t padding;
314     uint8_t edid[1024];
315 };
CMDGetEdid(void)316 static void CMDGetEdid(void)
317 {
318     struct VirtgpuGetEdid req = {
319         .hdr.type = VIRTIO_GPU_CMD_GET_EDID
320     };
321     struct VirtgpuRespEdid resp = { 0 };
322 
323     if (!RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp))) {
324         goto DEFAULT;
325     }
326 
327 DEFAULT:
328     return;
329 }
330 
331 struct VirtgpuResourceCreate2D {
332     struct VirtgpuCtrlHdr hdr;
333     uint32_t resourceId;
334     uint32_t format;
335     uint32_t width;
336     uint32_t height;
337 };
CMDResourceCreate2D(uint32_t resourceId)338 static bool CMDResourceCreate2D(uint32_t resourceId)
339 {
340     struct VirtgpuResourceCreate2D req = {
341         .hdr.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
342         .resourceId = resourceId,
343         .format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM, /* sRGB, byte order: RGBARGBA... */
344         .width = (resourceId == RESOURCEID_FB) ? g_virtGpu->screen.width : 0,
345         .height = (resourceId == RESOURCEID_FB) ? g_virtGpu->screen.height : 0
346     };
347     struct VirtgpuCtrlHdr resp = { 0 };
348 
349     return RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp));
350 }
351 
352 struct VirtgpuSetScanout {
353     struct VirtgpuCtrlHdr hdr;
354     struct VirtgpuRect r;
355     uint32_t scanoutId;
356     uint32_t resourceId;
357 };
CMDSetScanout(const struct VirtgpuRect *r)358 static bool CMDSetScanout(const struct VirtgpuRect *r)
359 {
360     struct VirtgpuSetScanout req = {
361         .hdr.type = VIRTIO_GPU_CMD_SET_SCANOUT,
362         .r = *r,
363         .resourceId = RESOURCEID_FB
364     };
365     struct VirtgpuCtrlHdr resp = { 0 };
366 
367     return RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp));
368 }
369 
CMDTransferToHost(uint32_t resourceId, const struct VirtgpuRect *r)370 static bool CMDTransferToHost(uint32_t resourceId, const struct VirtgpuRect *r)
371 {
372     struct VirtgpuTransferToHost2D req = {
373         .hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
374         .hdr.flags = VIRTIO_GPU_FLAG_FENCE,
375         .hdr.fenceId = r->x + r->y + r->width + r->height,
376         .r = *r,
377         .resourceId = resourceId,
378     };
379     struct VirtgpuCtrlHdr resp = { 0 };
380 
381     return RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp));
382 }
383 
CMDResourceFlush(void)384 static bool CMDResourceFlush(void)
385 {
386     struct VirtgpuResourceFlush req = {
387         .hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH,
388         .r = g_virtGpu->screen,
389         .resourceId = RESOURCEID_FB,
390     };
391     struct VirtgpuCtrlHdr resp = { 0 };
392 
393     return RequestResponse(0, &req, sizeof(req), &resp, sizeof(resp));
394 }
395 
396 struct VirtgpuResourceAttachBacking {
397     struct VirtgpuCtrlHdr hdr;
398     uint32_t resourceId;
399     uint32_t nrEntries;
400 };
401 struct VirtgpuMemEntry {
402     uint64_t addr;
403     uint32_t length;
404     uint32_t padding;
405 };
406 
407 /* vaddr's physical address should be continuous */
CMDResourceAttachBacking(uint32_t resourceId, uint64_t vaddr, uint32_t len)408 static bool CMDResourceAttachBacking(uint32_t resourceId, uint64_t vaddr, uint32_t len)
409 {
410     struct VirtgpuResourceAttachBacking req = {
411         .hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
412         .resourceId = resourceId,
413         .nrEntries = 1
414     };
415     struct VirtgpuMemEntry data = {
416         .addr = VMM_TO_DMA_ADDR(vaddr),
417         .length = len,
418     };
419     struct VirtgpuCtrlHdr resp = { 0 };
420 
421     return RequestDataResponse(&req, sizeof(req), &data, sizeof(data), &resp, sizeof(resp));
422 }
423 
NormOpsRefresh(uintptr_t arg)424 static void NormOpsRefresh(uintptr_t arg)
425 {
426     (void)arg;
427     RequestNoResponse(0, &g_virtGpu->transReq, sizeof(g_virtGpu->transReq), false);
428     RequestNoResponse(0, &g_virtGpu->flushReq, sizeof(g_virtGpu->flushReq), true);
429 }
430 
431 /* fit user-space page size mmap */
VirtgpuFbPageSize(void)432 static inline size_t VirtgpuFbPageSize(void)
433 {
434    return g_virtGpu->screen.width * g_virtGpu->screen.height * PIXEL_BYTES;
435 }
436 
PopulateVirtQ(void)437 static void PopulateVirtQ(void)
438 {
439     struct Virtq *q = NULL;
440     int i, n;
441     uint16_t qsz;
442 
443     for (n = 0; n < VIRTQ_NUM; n++) {
444         if (n) {
445             qsz = VIRTQ_CURSOR_QSZ;
446         } else {
447             qsz = VIRTQ_CONTROL_QSZ;
448         }
449         q = &g_virtGpu->dev.vq[n];
450 
451         for (i = 0; i < qsz; i += NORMAL_CMD_ENTRIES) {
452             q->desc[i].flag = VIRTQ_DESC_F_NEXT;
453             q->desc[i].next = i + 1;
454             q->desc[i + 1].pAddr = u32_to_u64(VMM_TO_DMA_ADDR((VADDR_T)&g_virtGpu->resp));
455             q->desc[i + 1].len = sizeof(g_virtGpu->resp);
456             q->desc[i + 1].flag = VIRTQ_DESC_F_WRITE;
457         }
458         /* change usage to record our next writable index */
459         q->last = 0;
460     }
461 
462     g_virtGpu->transReq.hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D;
463     g_virtGpu->transReq.r = g_virtGpu->screen;
464     g_virtGpu->transReq.resourceId = RESOURCEID_FB;
465 
466     g_virtGpu->flushReq.hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH;
467     g_virtGpu->flushReq.r = g_virtGpu->screen;
468     g_virtGpu->flushReq.resourceId = RESOURCEID_FB;
469 }
470 
VirtgpuBeginNormDisplay(void)471 static bool VirtgpuBeginNormDisplay(void)
472 {
473     int32_t ret;
474 
475     if (!CMDTransferToHost(RESOURCEID_FB, &g_virtGpu->screen)) {
476         return false;
477     }
478     if (!CMDResourceFlush()) {
479         return false;
480     }
481 
482     /* now we can fix queue entries to avoid redundant when do normal OPs */
483     PopulateVirtQ();
484 
485     if ((ret = OsalTimerStartLoop(&g_virtGpu->timer)) != HDF_SUCCESS) {
486         HDF_LOGE("[%s]start timer failed: %d\n", __func__, ret);
487         return false;
488     }
489 
490     return true;
491 }
492 
493 /* unified DeInit for InitDev, HDF and fb */
VirtgpuDeInit(struct Virtgpu *gpu)494 static void VirtgpuDeInit(struct Virtgpu *gpu)
495 {
496     if (gpu->timer.realTimer) {
497         OsalTimerDelete(&gpu->timer);
498     }
499     if (gpu->fb) {
500         LOS_MemFree(OS_SYS_MEM_ADDR, gpu->fb);
501     }
502     LOS_MemFree(OS_SYS_MEM_ADDR, gpu);
503     g_virtGpu = NULL;
504 }
505 
VirtgpuInitDev(void)506 static struct Virtgpu *VirtgpuInitDev(void)
507 {
508     struct Virtgpu *gpu = NULL;
509     VADDR_T base;
510     uint16_t qsz[VIRTQ_NUM];
511     int32_t ret, len;
512 
513     /* NOTE: For simplicity, alloc all these data from physical continuous memory. */
514     len = sizeof(struct Virtgpu) + VirtqSize(VIRTQ_CONTROL_QSZ) + VirtqSize(VIRTQ_CURSOR_QSZ);
515     gpu = LOS_MemAlloc(OS_SYS_MEM_ADDR, len * sizeof(void *));
516     if (gpu != NULL) {
517         (void)memset_s(gpu, len * sizeof(void *), 0, len * sizeof(void *));
518     } else {
519         HDF_LOGE("[%s]alloc gpu memory failed\n", __func__);
520         return NULL;
521     }
522 
523     if (!VirtmmioDiscover(VIRTMMIO_DEVICE_ID_GPU, &gpu->dev)) {
524         HDF_LOGE("[%s]VirtmmioDiscover failed\n", __func__);
525         goto ERR_OUT;
526     }
527 
528     VirtmmioInitBegin(&gpu->dev);
529 
530     if (!VirtmmioNegotiate(&gpu->dev, Feature0, Feature1, gpu)) {
531         HDF_LOGE("[%s]VirtmmioNegotiate failed\n", __func__);
532         goto ERR_OUT1;
533     }
534 
535     base = ALIGN((VADDR_T)gpu + sizeof(struct Virtgpu), VIRTQ_ALIGN_DESC);
536     qsz[0] = VIRTQ_CONTROL_QSZ;
537     qsz[1] = VIRTQ_CURSOR_QSZ;
538     if (VirtmmioConfigQueue(&gpu->dev, base, qsz, VIRTQ_NUM) == 0) {
539         HDF_LOGE("[%s]VirtmmioConfigQueue failed\n", __func__);
540         goto ERR_OUT1;
541     }
542 
543     /* framebuffer can be modified at any time, so we need a full screen refresh timer */
544     ret = OsalTimerCreate(&gpu->timer, GPU_DFT_RATE, NormOpsRefresh, 0);
545     if (ret != HDF_SUCCESS) {
546         HDF_LOGE("[%s]create timer failed: %d\n", __func__, ret);
547         goto ERR_OUT1;
548     }
549 
550     for (int i = 0; i < VIRTQ_NUM; i++) {   /* hint device not using IRQ */
551         gpu->dev.vq[i].avail->flag = VIRTQ_AVAIL_F_NO_INTERRUPT;
552     }
553 
554     VritmmioInitEnd(&gpu->dev);             /* now virt queue can be used */
555     return gpu;
556 
557 ERR_OUT1:
558     VirtmmioInitFailed(&gpu->dev);
559 ERR_OUT:
560     VirtgpuDeInit(gpu);
561     return NULL;
562 }
563 
HdfVirtgpuInit(struct HdfDeviceObject *device)564 static int32_t HdfVirtgpuInit(struct HdfDeviceObject *device)
565 {
566     int32_t ret;
567     HDF_LOGI("HdfVirtgpuInit begin!......\n");
568 
569     if (device == NULL) {
570         HDF_LOGE("[%s]device is null", __func__);
571         return HDF_ERR_INVALID_PARAM;
572     }
573 
574     g_virtGpu = VirtgpuInitDev();
575     if (g_virtGpu == NULL) {
576         return HDF_FAILURE;
577     }
578     device->priv = g_virtGpu;
579 
580     /* frame buffer resource are initiated here, using virt queue mechanism */
581     if ((ret = fb_register(0, 0)) != 0) {
582         HDF_LOGE("[%s]framebuffer register failed: %d", __func__, ret);
583         return HDF_FAILURE;
584     }
585 
586     if (!VirtgpuBeginNormDisplay()) {
587         return HDF_FAILURE;
588     }
589 
590     HDF_LOGI("HdfVirtgpuInit end!......\n");
591 
592     return HDF_SUCCESS;
593 }
594 
HdfVirtgpuRelease(struct HdfDeviceObject *deviceObject)595 static void HdfVirtgpuRelease(struct HdfDeviceObject *deviceObject)
596 {
597     if (deviceObject) {
598         if (deviceObject->priv) {
599             fb_unregister(0);
600         }
601     }
602 }
603 
604 struct HdfDriverEntry g_virtGpuEntry = {
605     .moduleVersion = 1,
606     .moduleName = "HDF_VIRTIO_GPU",
607     .Init = HdfVirtgpuInit,
608     .Release = HdfVirtgpuRelease,
609 };
610 
611 HDF_INIT(g_virtGpuEntry);
612 
613 
614 /*
615  * video/fb.h interface implementation
616  */
617 
VirtgpuInitResourceHelper(uint32_t resourceId)618 static bool VirtgpuInitResourceHelper(uint32_t resourceId)
619 {
620     uint64_t va;
621     uint32_t len, w, h;
622 
623     if (!CMDResourceCreate2D(resourceId)) {
624         return false;
625     }
626 
627     if (resourceId == RESOURCEID_FB) {
628         va = u32_to_u64((uint32_t)g_virtGpu->fb);
629         w = g_virtGpu->screen.width;
630         h = g_virtGpu->screen.height;
631     } else {
632         HDF_LOGE("[%s]error resource ID: %u", __func__, resourceId);
633         return false;
634     }
635     len = w * h * PIXEL_BYTES;
636     if (!CMDResourceAttachBacking(resourceId, va, len)) {
637         return false;
638     }
639 
640     if (resourceId == RESOURCEID_FB) {
641         struct VirtgpuRect r = { 0, 0, w, h };
642         return CMDSetScanout(&r);
643     }
644     return true;
645 }
646 
VirtgpuInitResource(void)647 static bool VirtgpuInitResource(void)
648 {
649     /* Framebuffer must be physical continuous. fb_register will zero the buffer */
650     g_virtGpu->fb = LOS_MemAlloc(OS_SYS_MEM_ADDR, VirtgpuFbPageSize());
651     if (g_virtGpu->fb != NULL) {
652         (void)memset_s(g_virtGpu->fb, VirtgpuFbPageSize(), 0, VirtgpuFbPageSize());
653     } else {
654         HDF_LOGE("[%s]alloc framebuffer memory fail", __func__);
655         return false;
656     }
657     if (!VirtgpuInitResourceHelper(RESOURCEID_FB)) {
658         return false;
659     }
660 
661     return true;
662 }
663 
up_fbinitialize(int display)664 int up_fbinitialize(int display)
665 {
666     if (display != 0) {
667         return -1;
668     }
669 
670     CMDGetDisplayInfo();
671     if (g_virtGpu->edid) {
672         CMDGetEdid();
673     }
674 
675     if (!VirtgpuInitResource()) {
676         return -1;
677     }
678 
679     return 0;
680 }
681 
FbGetVideoInfo(struct fb_vtable_s *vtable, struct fb_videoinfo_s *vinfo)682 static int FbGetVideoInfo(struct fb_vtable_s *vtable, struct fb_videoinfo_s *vinfo)
683 {
684     (void)vtable;
685     vinfo->fmt = FB_FMT_RGB32;  /* sRGB */
686     vinfo->xres = g_virtGpu->screen.width;
687     vinfo->yres = g_virtGpu->screen.height;
688     vinfo->nplanes = 1;
689     return 0;
690 }
691 
692 #define BYTE_BITS   8
FbGetPlaneInfo(struct fb_vtable_s *vtable, int planeno, struct fb_planeinfo_s *pinfo)693 static int FbGetPlaneInfo(struct fb_vtable_s *vtable, int planeno, struct fb_planeinfo_s *pinfo)
694 {
695     if (planeno != 0) {
696         return -1;
697     }
698     (void)vtable;
699 
700     pinfo->fbmem = g_virtGpu->fb;
701     pinfo->stride = g_virtGpu->screen.width * PIXEL_BYTES;
702     pinfo->fblen = pinfo->stride * g_virtGpu->screen.height;
703     pinfo->display = 0;
704     pinfo->bpp = PIXEL_BYTES * BYTE_BITS;
705     return 0;
706 }
707 
708 #ifdef CONFIG_FB_OVERLAY
FbGetOverlayInfo(struct fb_vtable_s *v, int overlayno, struct fb_overlayinfo_s *info)709 static int FbGetOverlayInfo(struct fb_vtable_s *v, int overlayno, struct fb_overlayinfo_s *info)
710 {
711     (void)v;
712     if (overlayno != 0) {
713         return -1;
714     }
715 
716     info->fbmem = g_virtGpu->fb;
717     info->memphys = (void *)VMM_TO_DMA_ADDR((VADDR_T)g_virtGpu->fb);
718     info->stride = g_virtGpu->screen.width * PIXEL_BYTES;
719     info->fblen = info->stride * g_virtGpu->screen.height;
720     info->overlay = 0;
721     info->bpp = PIXEL_BYTES * BYTE_BITS;
722     info->accl = 0;
723     return 0;
724 }
725 #endif
726 
727 /* expect windows manager deal with concurrent access */
FbOpen(struct fb_vtable_s *vtable)728 static int FbOpen(struct fb_vtable_s *vtable)
729 {
730     (void)vtable;
731     return 0;
732 }
733 
FbRelease(struct fb_vtable_s *vtable)734 static int FbRelease(struct fb_vtable_s *vtable)
735 {
736     (void)vtable;
737     return 0;
738 }
739 
740 /* used to happy video/fb.h configure */
FbDummy(struct fb_vtable_s *v, int *s)741 static int FbDummy(struct fb_vtable_s *v, int *s)
742 {
743     (void)v;
744     (void)s;
745     HDF_LOGE("[%s]unsupported method", __func__);
746     return -1;
747 }
748 
749 static struct fb_vtable_s g_virtGpuFbOps = {
750     .getvideoinfo = FbGetVideoInfo,
751     .getplaneinfo = FbGetPlaneInfo,
752     .fb_open = FbOpen,
753     .fb_release = FbRelease,
754 #ifdef CONFIG_FB_CMAP
755     .getcmap = (int (*)(struct fb_vtable_s *, struct fb_cmap_s *))FbDummy,
756     .putcmap = (int (*)(struct fb_vtable_s *, const struct fb_cmap_s *))FbDummy,
757 #endif
758 #ifdef CONFIG_FB_OVERLAY
759     .getoverlayinfo = FbGetOverlayInfo,
760     .settransp = (int (*)(struct fb_vtable_s *, const struct fb_overlayinfo_s *))FbDummy,
761     .setchromakey = (int (*)(struct fb_vtable_s *, const struct fb_overlayinfo_s *))FbDummy,
762     .setcolor = (int (*)(struct fb_vtable_s *, const struct fb_overlayinfo_s *))FbDummy,
763     .setblank = (int (*)(struct fb_vtable_s *, const struct fb_overlayinfo_s *))FbDummy,
764     .setarea = (int (*)(struct fb_vtable_s *, const struct fb_overlayinfo_s *))FbDummy,
765 # ifdef CONFIG_FB_OVERLAY_BLIT
766     .blit = (int (*)(struct fb_vtable_s *, const struct fb_overlayblit_s *))FbDummy,
767     .blend = (int (*)(struct fb_vtable_s *, const struct fb_overlayblend_s *))FbDummy,
768 # endif
769     .fb_pan_display = (int (*)(struct fb_vtable_s *, struct fb_overlayinfo_s *))FbDummy,
770 #endif
771 };
772 
up_fbgetvplane(int display, int vplane)773 struct fb_vtable_s *up_fbgetvplane(int display, int vplane)
774 {
775     if ((display != 0) || (vplane != 0)) {
776         return NULL;
777     }
778     return &g_virtGpuFbOps;
779 }
780 
up_fbuninitialize(int display)781 void up_fbuninitialize(int display)
782 {
783     if (display != 0) {
784         return;
785     }
786 
787     if (g_virtGpu) {
788         VirtgpuDeInit(g_virtGpu);
789     }
790 }
791