1/*
2 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/* enable lwip 'netif_add' API */
17#define __LWIP__
18
19#include "los_reg.h"
20#include "los_compiler.h"
21#include "los_debug.h"
22#include "los_interrupt.h"
23
24#define IFNAMSIZ  IF_NAMESIZE
25
26#include "los_task.h"
27#include "los_sched.h"
28VOID LOS_TaskLockSave(UINT32 *intSave)
29{
30    *intSave = LOS_IntLock();
31    g_losTaskLock++;
32}
33
34VOID LOS_TaskUnlockRestore(UINT32 intSave)
35{
36    if (g_losTaskLock > 0) {
37        g_losTaskLock--;
38        if (g_losTaskLock == 0) {
39            LOS_IntRestore(intSave);
40            LOS_Schedule();
41            return;
42        }
43    }
44
45    LOS_IntRestore(intSave);
46}
47
48#define LOS_SpinLock(lock) LOS_TaskLock()
49#define LOS_SpinUnlock(lock) LOS_TaskUnlock()
50#define LOS_SpinLockSave(lock, intSave)  LOS_TaskLockSave(intSave)
51#define LOS_SpinUnlockRestore(lock, intSave)  LOS_TaskUnlockRestore(intSave)
52
53#include "stddef.h"
54typedef struct Spinlock {
55    size_t      rawLock;
56#ifdef LOSCFG_KERNEL_SMP
57    UINT32      cpuid;
58    VOID        *owner;
59    const CHAR  *name;
60#endif
61} SPIN_LOCK_S;
62
63/* kernel changed lwip 'netif->client_data' size, so this should be prior */
64#include "netinet/if_ether.h"
65
66#include "lwip/opt.h"
67#include "lwip/netif.h"
68#include "lwip/etharp.h"
69#include "lwip/tcpip.h"
70#include "lwip/mem.h"
71#include "virtmmio.h"
72
73#define VIRTIO_NET_F_MTU                    (1 << 3)
74#define VIRTIO_NET_F_MAC                    (1 << 5)
75struct VirtnetConfig {
76    uint8_t mac[6];
77    uint16_t status;
78    uint16_t maxVirtqPairs;
79    uint16_t mtu;
80};
81
82#define VIRTMMIO_NETIF_NAME                 "virtnet"
83#define VIRTMMIO_NETIF_NICK                 "vn0"
84#define VIRTMMIO_NETIF_DFT_IP               "10.0.2.15"
85#define VIRTMMIO_NETIF_DFT_GW               "10.0.2.2"
86#define VIRTMMIO_NETIF_DFT_MASK             "255.255.255.0"
87#define VIRTMMIO_NETIF_DFT_RXQSZ            16
88#define VIRTMMIO_NETIF_DFT_TXQSZ            32
89
90/* This struct is actually ignored by this simple driver */
91struct VirtnetHdr {
92    uint8_t flag;
93    uint8_t gsoType;
94    uint16_t hdrLen;
95    uint16_t gsoSize;
96    uint16_t csumStart;
97    uint16_t csumOffset;
98    uint16_t numBuffers;
99};
100
101/*
102 * We use two queues for Tx/Rx respectively. When Tx/Rx, no dynamic memory alloc/free:
103 * output pbuf directly put into queue and freed by tcpip_thread when used; input has
104 * some fixed-size buffers just after the queues and released by application when consumed.
105 *
106 * Tx/Rx queues memory layout:
107 *                         Rx queue                                Tx queue             Rx buffers
108 * +-----------------+------------------+------------------++------+-------+------++----------------------+
109 * | desc: 16B align | avail: 2B align  | used: 4B align   || desc | avail | used || 4B align             |
110 * | 16∗(Queue Size) | 4+2∗(Queue Size) | 4+8∗(Queue Size) ||      |       |      || 1528*(Rx Queue Size) |
111 * +-----------------+------------------+------------------++------+-------+------++----------------------+
112 */
113#define VIRTQ_NUM_NET       2
114#define VIRTQ_RXBUF_ALIGN   4
115#define VIRTQ_RXBUF_SIZE    ALIGN(sizeof(struct VirtnetHdr) + ETH_FRAME_LEN, VIRTQ_RXBUF_ALIGN)
116
117struct RbufRecord {
118    struct pbuf_custom  cbuf;
119    struct VirtNetif    *nic;
120    uint16_t            id;     /* index to Rx vq[0].desc[] */
121};
122
123struct TbufRecord {
124    struct pbuf         *head;  /* first pbuf address of this pbuf chain */
125    uint16_t            count;  /* occupied desc entries, including VirtnetHdr */
126    uint16_t            tail;   /* tail pbuf's index to Tx vq[1].desc[] */
127};
128
129struct VirtNetif {
130    struct VirtmmioDev  dev;
131
132    struct RbufRecord   *rbufRec;
133    SPIN_LOCK_S         recvLock;
134
135    uint16_t            tFreeHdr;   /* head of Tx free desc entries list */
136    uint16_t            tFreeNum;
137    struct TbufRecord   *tbufRec;
138    SPIN_LOCK_S         transLock;
139
140    struct VirtnetHdr   vnHdr;
141};
142
143static bool Feature0(uint32_t features, uint32_t *supported, void *dev)
144{
145    struct netif *netif = dev;
146    struct VirtNetif *nic = netif->state;
147    struct VirtnetConfig *conf = (struct VirtnetConfig *)(nic->dev.base + VIRTMMIO_REG_CONFIG);
148    int i;
149
150    if (features & VIRTIO_NET_F_MTU) {
151        if (conf->mtu > ETH_DATA_LEN) {
152            PRINT_ERR("unsupported backend net MTU: %u\n", conf->mtu);
153            return false;
154        }
155        netif->mtu = conf->mtu;
156        *supported |= VIRTIO_NET_F_MTU;
157    } else {
158        netif->mtu = ETH_DATA_LEN;
159    }
160
161    LOS_ASSERT(features & VIRTIO_NET_F_MAC);
162    for (i = 0; i < ETHARP_HWADDR_LEN; i++) {
163        netif->hwaddr[i] = conf->mac[i];
164    }
165    netif->hwaddr_len = ETHARP_HWADDR_LEN;
166    *supported |= VIRTIO_NET_F_MAC;
167
168    return true;
169}
170
171static bool Feature1(uint32_t features, uint32_t *supported, void *dev)
172{
173    if (features & VIRTIO_F_VERSION_1) {
174        *supported |= VIRTIO_F_VERSION_1;
175    } else {
176        PRINT_ERR("net device has no VERSION_1 feature\n");
177        return false;
178    }
179
180    return true;
181}
182
183static err_t InitTxFreelist(struct VirtNetif *nic)
184{
185    int i;
186
187    nic->tbufRec = malloc(sizeof(struct TbufRecord) * nic->dev.vq[1].qsz);
188    if (nic->tbufRec == NULL) {
189        PRINT_ERR("alloc nic->tbufRec memory failed\n");
190        return ERR_MEM;
191    }
192
193    for (i = 0; i < nic->dev.vq[1].qsz - 1; i++) {
194        nic->dev.vq[1].desc[i].flag = VIRTQ_DESC_F_NEXT;
195        nic->dev.vq[1].desc[i].next = i + 1;
196    }
197    nic->tFreeHdr = 0;
198    nic->tFreeNum = nic->dev.vq[1].qsz;
199
200    return ERR_OK;
201}
202
203static void FreeTxEntry(struct VirtNetif *nic, uint16_t head)
204{
205    uint16_t count, idx, tail;
206    struct pbuf *phead = NULL;
207    struct Virtq *q = &nic->dev.vq[1];
208
209    idx = q->desc[head].next;
210    phead = nic->tbufRec[idx].head;
211    count = nic->tbufRec[idx].count;
212    tail = nic->tbufRec[idx].tail;
213
214    LOS_SpinLock(&nic->transLock);
215    if (nic->tFreeNum > 0) {
216        q->desc[tail].next = nic->tFreeHdr;
217        q->desc[tail].flag = VIRTQ_DESC_F_NEXT;
218    }
219    nic->tFreeNum += count;
220    nic->tFreeHdr = head;
221    LOS_SpinUnlock(&nic->transLock);
222
223    pbuf_free_callback(phead);
224}
225
226static void ReleaseRxEntry(struct pbuf *p)
227{
228    struct RbufRecord *pc = (struct RbufRecord *)p;
229    struct VirtNetif *nic = pc->nic;
230    uint32_t intSave;
231
232    LOS_SpinLockSave(&nic->recvLock, &intSave);
233    nic->dev.vq[0].avail->ring[nic->dev.vq[0].avail->index % nic->dev.vq[0].qsz] = pc->id;
234    DSB;
235    nic->dev.vq[0].avail->index++;
236    LOS_SpinUnlockRestore(&nic->recvLock, intSave);
237
238    if (nic->dev.vq[0].used->flag != VIRTQ_USED_F_NO_NOTIFY) {
239        FENCE_WRITE_UINT32(0, nic->dev.base + VIRTMMIO_REG_QUEUENOTIFY);
240    }
241}
242
243static err_t ConfigRxBuffer(struct VirtNetif *nic, VADDR_T buf)
244{
245    uint32_t i;
246    PADDR_T paddr;
247    struct Virtq *q = &nic->dev.vq[0];
248
249    nic->rbufRec = calloc(q->qsz, sizeof(struct RbufRecord));
250    if (nic->rbufRec == NULL) {
251        PRINT_ERR("alloc nic->rbufRec memory failed\n");
252        return ERR_MEM;
253    }
254
255    paddr = VMM_TO_DMA_ADDR(buf);
256
257    for (i = 0; i < q->qsz; i++) {
258        q->desc[i].pAddr = u32_to_u64(paddr);
259        q->desc[i].len = sizeof(struct VirtnetHdr) + ETH_FRAME_LEN;
260        q->desc[i].flag = VIRTQ_DESC_F_WRITE;
261        paddr += VIRTQ_RXBUF_SIZE;
262
263        q->avail->ring[i] = i;
264
265        nic->rbufRec[i].cbuf.custom_free_function = ReleaseRxEntry;
266        nic->rbufRec[i].nic = nic;
267        nic->rbufRec[i].id = i;
268    }
269
270    return ERR_OK;
271}
272
273static err_t ConfigQueue(struct VirtNetif *nic)
274{
275    VADDR_T buf, pad;
276    void *base = NULL;
277    err_t ret;
278    size_t size;
279    uint16_t qsz[VIRTQ_NUM_NET];
280
281    /*
282     * lwip request (packet address - ETH_PAD_SIZE) must align with 4B.
283     * We pad before the first Rx buf to happy it. Rx buf = VirtnetHdr + packet,
284     * then (buf base + pad + VirtnetHdr - ETH_PAD_SIZE) should align with 4B.
285     * When allocating memory, VIRTQ_RXBUF_ALIGN - 1 is enough for padding.
286     */
287    qsz[0] = VIRTMMIO_NETIF_DFT_RXQSZ;
288    qsz[1] = VIRTMMIO_NETIF_DFT_TXQSZ;
289    size = VirtqSize(qsz[0]) + VirtqSize(qsz[1]) + VIRTQ_RXBUF_ALIGN - 1 + qsz[0] * VIRTQ_RXBUF_SIZE;
290
291    base = calloc(1, size);
292    if (base == NULL) {
293        PRINT_ERR("alloc queues memory failed\n");
294        return ERR_MEM;
295    }
296
297    buf = VirtmmioConfigQueue(&nic->dev, (VADDR_T)base, qsz, VIRTQ_NUM_NET);
298    if (buf == 0) {
299        return ERR_IF;
300    }
301
302    pad = (buf + sizeof(struct VirtnetHdr) - ETH_PAD_SIZE) % VIRTQ_RXBUF_ALIGN;
303    if (pad) {
304        pad = VIRTQ_RXBUF_ALIGN - pad;
305    }
306    buf += pad;
307    if ((ret = ConfigRxBuffer(nic, buf)) != ERR_OK) {
308        return ret;
309    }
310
311    if ((ret = InitTxFreelist(nic)) != ERR_OK) {
312        return ret;
313    }
314
315    return ERR_OK;
316}
317
318static uint16_t GetTxFreeEntry(struct VirtNetif *nic, uint16_t count)
319{
320    uint32_t intSave;
321    uint16_t head, tail, idx;
322
323RETRY:
324    LOS_SpinLockSave(&nic->transLock, &intSave);
325    if (count > nic->tFreeNum) {
326        LOS_SpinUnlockRestore(&nic->transLock, intSave);
327        LOS_TaskYield();
328        goto RETRY;
329    }
330
331    nic->tFreeNum -= count;
332    head = nic->tFreeHdr;
333    idx = head;
334    while (count--) {
335        tail = idx;
336        idx = nic->dev.vq[1].desc[idx].next;
337    }
338    nic->tFreeHdr = idx;   /* may be invalid if empty, but tFreeNum must be valid: 0 */
339    LOS_SpinUnlockRestore(&nic->transLock, intSave);
340    nic->dev.vq[1].desc[tail].flag &= ~VIRTQ_DESC_F_NEXT;
341
342    return head;
343}
344
345static err_t LowLevelOutput(struct netif *netif, struct pbuf *p)
346{
347    uint16_t add, idx, head, tmp;
348    struct pbuf *q = NULL;
349    struct VirtNetif *nic = netif->state;
350    struct Virtq *trans = &nic->dev.vq[1];
351
352#if ETH_PAD_SIZE
353    pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
354#endif
355
356    /* plus 1 for VirtnetHdr */
357    add = pbuf_clen(p) + 1;
358    if (add > trans->qsz) {
359        PRINT_ERR("packet pbuf_clen %u larger than supported %u\n", add - 1, trans->qsz - 1);
360        return ERR_IF;
361    }
362
363    head = GetTxFreeEntry(nic, add);
364    trans->desc[head].pAddr = u32_to_u64(VMM_TO_DMA_ADDR((PADDR_T)&nic->vnHdr));
365    trans->desc[head].len = sizeof(struct VirtnetHdr);
366    idx = trans->desc[head].next;
367    tmp = head;
368    q = p;
369    while (q != NULL) {
370        tmp = trans->desc[tmp].next;
371        trans->desc[tmp].pAddr = u32_to_u64(VMM_TO_DMA_ADDR((PADDR_T)q->payload));
372        trans->desc[tmp].len = q->len;
373        q = q->next;
374    }
375
376    nic->tbufRec[idx].head = p;
377    nic->tbufRec[idx].count = add;
378    nic->tbufRec[idx].tail = tmp;
379    pbuf_ref(p);
380
381    trans->avail->ring[trans->avail->index % trans->qsz] = head;
382    DSB;
383    trans->avail->index++;
384    FENCE_WRITE_UINT32(1, nic->dev.base + VIRTMMIO_REG_QUEUENOTIFY);
385
386#if ETH_PAD_SIZE
387    pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
388#endif
389
390    return ERR_OK;
391}
392
393static struct pbuf *LowLevelInput(const struct netif *netif, const struct VirtqUsedElem *e)
394{
395    struct VirtNetif *nic = netif->state;
396    struct pbuf *p = NULL;
397    uint16_t len;
398    VADDR_T payload;
399
400    payload = DMA_TO_VMM_ADDR(nic->dev.vq[0].desc[e->id].pAddr) + sizeof(struct VirtnetHdr);
401#if ETH_PAD_SIZE
402    payload -= ETH_PAD_SIZE;
403#endif
404    pbuf_alloced_custom(PBUF_RAW, ETH_FRAME_LEN, PBUF_ROM | PBUF_ALLOC_FLAG_RX,
405                        &nic->rbufRec[e->id].cbuf, (void *)payload, ETH_FRAME_LEN);
406
407    len = e->len - sizeof(struct VirtnetHdr);
408    LOS_ASSERT(len <= ETH_FRAME_LEN);
409#if ETH_PAD_SIZE
410    len += ETH_PAD_SIZE;
411#endif
412
413    p = &nic->rbufRec[e->id].cbuf.pbuf;
414    p->len = len;
415    p->tot_len = p->len;
416    return p;
417}
418
419static void VirtnetRxHandle(struct netif *netif)
420{
421    struct VirtNetif *nic = netif->state;
422    struct Virtq *q = &nic->dev.vq[0];
423    struct pbuf *buf = NULL;
424    struct VirtqUsedElem *e = NULL;
425
426    q->avail->flag = VIRTQ_AVAIL_F_NO_INTERRUPT;
427    while (1) {
428        if (q->last == q->used->index) {
429            q->avail->flag = 0;
430            /* recheck if new one come in between empty ring and enable interrupt */
431            DSB;
432            if (q->last == q->used->index) {
433                break;
434            }
435            q->avail->flag = VIRTQ_AVAIL_F_NO_INTERRUPT;
436        }
437
438        DSB;
439        e = &q->used->ring[q->last % q->qsz];
440        buf = LowLevelInput(netif, e);
441        if (netif->input(buf, netif) != ERR_OK) {
442            LWIP_DEBUGF(NETIF_DEBUG, ("IP input error\n"));
443            ReleaseRxEntry(buf);
444        }
445
446        q->last++;
447    }
448}
449
450static void VirtnetTxHandle(struct VirtNetif *nic)
451{
452    struct Virtq *q = &nic->dev.vq[1];
453    struct VirtqUsedElem *e = NULL;
454
455    /* Bypass recheck as VirtnetRxHandle */
456    q->avail->flag = VIRTQ_AVAIL_F_NO_INTERRUPT;
457    while (q->last != q->used->index) {
458        DSB;
459        e = &q->used->ring[q->last % q->qsz];
460        FreeTxEntry(nic, e->id);
461        q->last++;
462    }
463    q->avail->flag = 0;
464}
465
466static void VirtnetIRQhandle(void *param)
467{
468    struct netif *netif = (struct netif *)param;
469    struct VirtNetif *nic = netif->state;
470
471    if (!(GET_UINT32(nic->dev.base + VIRTMMIO_REG_INTERRUPTSTATUS) & VIRTMMIO_IRQ_NOTIFY_USED)) {
472        return;
473    }
474
475    VirtnetRxHandle(netif);
476
477    VirtnetTxHandle(nic);
478
479    FENCE_WRITE_UINT32(VIRTMMIO_IRQ_NOTIFY_USED, nic->dev.base + VIRTMMIO_REG_INTERRUPTACK);
480}
481
482static err_t LowLevelInit(struct netif *netif)
483{
484    struct VirtNetif *nic = netif->state;
485    int ret;
486
487    if (!VirtmmioDiscover(VIRTMMIO_DEVICE_ID_NET, &nic->dev)) {
488        return ERR_IF;
489    }
490
491    VirtmmioInitBegin(&nic->dev);
492
493    if (!VirtmmioNegotiate(&nic->dev, Feature0, Feature1, netif)) {
494        ret = ERR_IF;
495        goto ERR_OUT;
496    }
497
498    if ((ret = ConfigQueue(nic)) != ERR_OK) {
499        goto ERR_OUT;
500    }
501
502    if (!VirtmmioRegisterIRQ(&nic->dev, (HWI_PROC_FUNC)VirtnetIRQhandle, netif, VIRTMMIO_NETIF_NAME)) {
503        ret = ERR_IF;
504        goto ERR_OUT;
505    }
506
507    VritmmioInitEnd(&nic->dev);
508
509    /* everything is ready, now notify device the receive buffer */
510    nic->dev.vq[0].avail->index += nic->dev.vq[0].qsz;
511    FENCE_WRITE_UINT32(0, nic->dev.base + VIRTMMIO_REG_QUEUENOTIFY);
512    return ERR_OK;
513
514ERR_OUT:
515    VirtmmioInitFailed(&nic->dev);
516    return ret;
517}
518
519static err_t EthernetIfInit(struct netif *netif)
520{
521    struct VirtNetif *nic = NULL;
522
523    LWIP_ASSERT("netif != NULL", (netif != NULL));
524
525    nic = mem_calloc(1, sizeof(struct VirtNetif));
526    if (nic == NULL) {
527        PRINT_ERR("alloc nic memory failed\n");
528        return ERR_MEM;
529    }
530    netif->state = nic;
531
532#if LWIP_NETIF_HOSTNAME
533    netif->hostname = VIRTMMIO_NETIF_NAME;
534#endif
535
536    strncpy_s(netif->name, sizeof(netif->name), VIRTMMIO_NETIF_NICK, sizeof(netif->name));
537    strncpy_s(netif->full_name, sizeof(netif->full_name), VIRTMMIO_NETIF_NICK, sizeof(netif->full_name));
538
539    netif->output = etharp_output;
540    netif->linkoutput = LowLevelOutput;
541
542    netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_LINK_UP;
543
544    return LowLevelInit(netif);
545}
546
547static void VirtnetDeInit(struct netif *netif)
548{
549    struct VirtNetif *nic = netif->state;
550
551    if (nic && (nic->dev.irq & ~_IRQ_MASK)) {
552        LOS_HwiDelete(nic->dev.irq, NULL);
553    }
554    if (nic && nic->rbufRec) {
555        free(nic->rbufRec);
556    }
557    if (nic && nic->tbufRec) {
558        free(nic->tbufRec);
559    }
560    if (nic && nic->dev.vq[0].desc) {
561        free(nic->dev.vq[0].desc);
562    }
563    if (nic) {
564        mem_free(nic);
565    }
566    mem_free(netif);
567}
568
569struct netif *VirtnetInit(void)
570{
571    ip4_addr_t ip, mask, gw;
572    struct netif *netif = NULL;
573
574    netif = mem_calloc(1, sizeof(struct netif));
575    if (netif == NULL) {
576        PRINT_ERR("alloc netif memory failed\n");
577        return NULL;
578    }
579
580    ip.addr = ipaddr_addr(VIRTMMIO_NETIF_DFT_IP);
581    mask.addr = ipaddr_addr(VIRTMMIO_NETIF_DFT_MASK);
582    gw.addr = ipaddr_addr(VIRTMMIO_NETIF_DFT_GW);
583    if (netif_add(netif, &ip, &mask, &gw, netif->state,
584                    EthernetIfInit, tcpip_input) == NULL) {
585        PRINT_ERR("add virtio-mmio net device failed\n");
586        VirtnetDeInit(netif);
587        return NULL;
588    }
589
590    return netif;
591}
592
593