1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2023 Huawei Device Co., Ltd.
4 *
5 * Description: Bluetooth virtual network device used in
6 * the NewIP over Bluetooth communication scenario.
7 *
8 * Author: Yang Yanjun <yangyanjun@huawei.com>
9 *
10 * Data: 2023-03-14
11 */
12
13 #define pr_fmt(fmt) "newip-bt: [%s:%d] " fmt, __func__, __LINE__
14
15 #include "btdev.h"
16
17 #define ndev_name(vnet) bt_virnet_get_ndev_name(vnet) /* btn1/2/3/4/... */
18 #define cdev_name(vnet) bt_virnet_get_cdev_name(vnet) /* dev/btdev1/2/3/4/... */
19
20 /* /sys/module/btdev/parameters/btdev_debug */
21 bool g_btdev_debug;
22 module_param_named(btdev_debug, g_btdev_debug, bool, 0644);
23
24 #define btdev_dbg(fmt, ...) \
25 do { \
26 if (g_btdev_debug) \
27 pr_crit(fmt, ##__VA_ARGS__); \
28 } while (0)
29
30 #define btdev_dbg_err(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
31
32 static struct bt_drv *g_bt_drv;
33
bt_seq_show(struct seq_file *m, void *v)34 static int bt_seq_show(struct seq_file *m, void *v)
35 {
36 struct bt_virnet *vnet = NULL;
37
38 if (unlikely(!g_bt_drv)) {
39 btdev_dbg_err("invalid bt_drv");
40 return -EINVAL;
41 }
42
43 seq_printf(m, "Total device: %d (bitmap: 0x%X) Ring size: %d\n",
44 bt_get_total_device(g_bt_drv), g_bt_drv->bitmap,
45 BT_RING_BUFFER_SIZE);
46
47 list_for_each_entry(vnet, &g_bt_drv->devices_table->head, virnet_entry) {
48 seq_printf(m, "dev: %12s, interface: %7s, state: %12s, MTU: %4d\n",
49 cdev_name(vnet), ndev_name(vnet),
50 bt_virnet_get_state_rep(vnet), vnet->ndev->mtu);
51 seq_printf(m, "ring head: %4d, ring tail: %4d, packets num: %4d\n",
52 vnet->tx_ring->head, vnet->tx_ring->tail,
53 bt_virnet_get_ring_packets(vnet));
54 }
55
56 return OK;
57 }
58
bt_proc_open(struct inode *inode, struct file *file)59 static int bt_proc_open(struct inode *inode, struct file *file)
60 {
61 if (unlikely(!inode) || unlikely(!file)) {
62 btdev_dbg_err("invalid parameter");
63 return -EINVAL;
64 }
65
66 return single_open(file, bt_seq_show, PDE_DATA(inode));
67 }
68
69 static struct proc_ops g_bt_proc_fops = {
70 .proc_open = bt_proc_open,
71 .proc_read = seq_read,
72 .proc_lseek = seq_lseek,
73 .proc_release = single_release};
74
__bt_virnet_open(struct file *filp, struct bt_virnet *vnet)75 static int __bt_virnet_open(struct file *filp, struct bt_virnet *vnet)
76 {
77 struct net_device *ndev;
78
79 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
80 /* Check whether xx_open_limit is equal to 0 after subtracting 1.
81 * If so, return true
82 */
83 if (unlikely(!atomic_dec_and_test(&vnet->io_file->read_open_limit)))
84 goto read_twice_already;
85 } else if ((filp->f_flags & O_ACCMODE) == O_WRONLY) {
86 if (unlikely(!atomic_dec_and_test(&vnet->io_file->write_open_limit)))
87 goto write_twice_already;
88 } else if ((filp->f_flags & O_ACCMODE) == O_RDWR) {
89 if (unlikely(!atomic_dec_and_test(&vnet->io_file->read_open_limit)))
90 goto read_twice_already;
91 if (unlikely(!atomic_dec_and_test(&vnet->io_file->write_open_limit)))
92 goto write_twice_already;
93 }
94
95 /* Set xx_open_limit to 0 when the file is first opened */
96 rtnl_lock();
97 ndev = vnet->ndev;
98 if (unlikely(!(ndev->flags & IFF_UP))) {
99 int ret = dev_change_flags(ndev, ndev->flags | IFF_UP, NULL);
100
101 if (unlikely(ret < 0)) {
102 rtnl_unlock();
103 btdev_dbg_err("%s dev change flags failed, ret=%d", cdev_name(vnet), ret);
104 return -EBUSY;
105 }
106 }
107 rtnl_unlock();
108
109 set_state(vnet, BT_VIRNET_STATE_CONNECTED);
110 filp->private_data = vnet;
111 btdev_dbg("%s has been opened", cdev_name(vnet));
112 return OK;
113
114 /* If the file is not opened for the first time, an error occurs
115 * and xx_open_limit is restored to the open state. (set to 0)
116 */
117 read_twice_already:
118 atomic_inc(&vnet->io_file->read_open_limit);
119 btdev_dbg_err("%s has been opened for read twice already", cdev_name(vnet));
120 return -EBUSY;
121
122 write_twice_already:
123 atomic_inc(&vnet->io_file->write_open_limit);
124 btdev_dbg_err("%s has been opened for write twice already", cdev_name(vnet));
125 return -EBUSY;
126 }
127
bt_io_file_open(struct inode *node, struct file *filp)128 static int bt_io_file_open(struct inode *node, struct file *filp)
129 {
130 struct bt_virnet *vnet = NULL;
131
132 if (unlikely(!node) || unlikely(!filp)) {
133 btdev_dbg_err("invalid parameter");
134 return -EINVAL;
135 }
136
137 list_for_each_entry(vnet, &g_bt_drv->devices_table->head, virnet_entry) {
138 if (bt_virnet_get_cdev(vnet) == node->i_cdev)
139 return __bt_virnet_open(filp, vnet);
140 }
141 return -EIO;
142 }
143
bt_io_file_release(struct inode *node, struct file *filp)144 static int bt_io_file_release(struct inode *node, struct file *filp)
145 {
146 struct bt_virnet *vnet = NULL;
147
148 if (unlikely(!filp) || unlikely(!filp->private_data)) {
149 btdev_dbg_err("invalid parameter");
150 return -EINVAL;
151 }
152
153 vnet = filp->private_data;
154 btdev_dbg("%s has been released", cdev_name(vnet));
155
156 /* Set xx_open_limit to 1 when the file is closed */
157 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
158 atomic_inc(&vnet->io_file->read_open_limit);
159 } else if ((filp->f_flags & O_ACCMODE) == O_WRONLY) {
160 atomic_inc(&vnet->io_file->write_open_limit);
161 } else if ((filp->f_flags & O_ACCMODE) == O_RDWR) {
162 atomic_inc(&vnet->io_file->read_open_limit);
163 atomic_inc(&vnet->io_file->write_open_limit);
164 }
165
166 set_state(vnet, BT_VIRNET_STATE_DISCONNECTED);
167
168 return OK;
169 }
170
bt_io_file_read(struct file *filp, char __user *buffer, size_t size, loff_t *off)171 static ssize_t bt_io_file_read(struct file *filp,
172 char __user *buffer,
173 size_t size, loff_t *off)
174 {
175 struct bt_virnet *vnet = NULL;
176 ssize_t out_sz;
177 struct sk_buff *skb = NULL;
178
179 if (unlikely(!filp) || unlikely(!buffer) || unlikely(!filp->private_data)) {
180 btdev_dbg_err("invalid parameter");
181 return -EINVAL;
182 }
183
184 vnet = filp->private_data;
185 while (unlikely(bt_ring_is_empty(vnet->tx_ring))) {
186 if (filp->f_flags & O_NONBLOCK)
187 return -EAGAIN;
188
189 if (wait_event_interruptible(vnet->rx_queue, !bt_ring_is_empty(vnet->tx_ring)))
190 return -ERESTARTSYS;
191 }
192
193 skb = bt_ring_current(vnet->tx_ring);
194 if (unlikely(!skb)) {
195 btdev_dbg_err("%s invalid skb", cdev_name(vnet));
196 return -EINVAL;
197 }
198 out_sz = skb->len > MACADDR_LEN ? (skb->len - MACADDR_LEN) : 0;
199 if (unlikely(out_sz > size) || unlikely(out_sz == 0)) {
200 /* Obtain the skb pointer from the ring buf and ask whether the user-state buf
201 * length can store data in the skb. If the user-state buf length is not enough,
202 * the skb cannot be released at this time, because the skb is still unchained
203 * on the ring buf.
204 */
205 btdev_dbg_err("%s usr-buf too small, skb-len=%ld, usr-buf-len=%ld, skb-len=%u",
206 cdev_name(vnet), (long)out_sz, (long)size, skb->len);
207 return -EINVAL;
208 }
209
210 bt_ring_consume(vnet->tx_ring);
211 if (copy_to_user(buffer, skb->data + MACADDR_LEN, out_sz)) {
212 /* The skb pointer is obtained from the ring buf and the skb has been unchained
213 * from the ring buf. In this case, the skb needs to be released when the skb data
214 * fails to be copied to the user mode.
215 */
216 btdev_dbg_err("%s copy to user failed", cdev_name(vnet));
217 dev_kfree_skb(skb);
218 return -EIO;
219 }
220 dev_kfree_skb(skb);
221
222 btdev_dbg("read %ld data from %s", (long)out_sz, cdev_name(vnet));
223 if (unlikely(netif_queue_stopped(vnet->ndev))) {
224 btdev_dbg("consume data: wake the queue");
225 netif_wake_queue(vnet->ndev);
226 }
227
228 return out_sz;
229 }
230
bt_io_file_write(struct file *filp, const char __user *buffer, size_t size, loff_t *off)231 static ssize_t bt_io_file_write(struct file *filp,
232 const char __user *buffer,
233 size_t size, loff_t *off)
234 {
235 struct bt_virnet *vnet = NULL;
236 struct sk_buff *skb = NULL;
237 int ret;
238 int len;
239 ssize_t in_sz;
240
241 if (unlikely(!filp) || unlikely(!buffer) || unlikely(!filp->private_data)) {
242 btdev_dbg_err("invalid parameter");
243 return -EINVAL;
244 }
245
246 vnet = filp->private_data;
247 in_sz = size + MACADDR_LEN;
248
249 /* Ethernet head length: DMAC(6B) + SMAC(6B) + eth-type(2B) */
250 skb = netdev_alloc_skb(bt_virnet_get_ndev(vnet), in_sz + NEWIP_TYPE_SIZE);
251 if (unlikely(!skb))
252 return -ENOMEM;
253
254 skb_reserve(skb, NEWIP_TYPE_SIZE);
255 skb_put(skb, in_sz);
256
257 memset(skb->data, 0, MACADDR_LEN);
258 if (copy_from_user(skb->data + MACADDR_LEN, buffer, size)) {
259 btdev_dbg_err("%s copy from user failed", cdev_name(vnet));
260 dev_kfree_skb(skb);
261 return -EIO;
262 }
263
264 len = skb->len;
265 skb->dev = bt_virnet_get_ndev(vnet);
266 skb->protocol = eth_type_trans(skb, bt_virnet_get_ndev(vnet));
267 ret = netif_rx_ni(skb);
268
269 if (ret == NET_RX_SUCCESS) {
270 btdev_dbg("write %lu bytes data to %s", size, cdev_name(vnet));
271 vnet->ndev->stats.rx_packets++;
272 vnet->ndev->stats.rx_bytes += len;
273 } else {
274 btdev_dbg_err("failed to write %lu bytes data to %s", size, cdev_name(vnet));
275 vnet->ndev->stats.rx_errors++;
276 vnet->ndev->stats.rx_dropped++;
277 }
278
279 return size;
280 }
281
bt_virnet_change_mtu(struct net_device *dev, int mtu)282 static int bt_virnet_change_mtu(struct net_device *dev, int mtu)
283 {
284 if (unlikely(!dev) || unlikely(mtu < 0) || unlikely(mtu > BT_MAX_MTU)) {
285 btdev_dbg_err("invalid parameter");
286 return -EINVAL;
287 }
288 btdev_dbg("change %s mtu %u to %u", dev->name, dev->mtu, mtu);
289 dev->mtu = mtu;
290 return OK;
291 }
292
bt_set_mtu(struct net_device *dev, int mtu)293 static int bt_set_mtu(struct net_device *dev, int mtu)
294 {
295 int err = OK;
296
297 if (unlikely(mtu < 0) || unlikely(mtu > BT_MAX_MTU)) {
298 btdev_dbg_err("invalid parameter");
299 return -EINVAL;
300 }
301
302 rtnl_lock();
303 err = dev_set_mtu(dev, mtu);
304 rtnl_unlock();
305 if (err < 0)
306 btdev_dbg_err("failed to set %s mtu to %d, err=%d", dev->name, mtu, err);
307 else
308 btdev_dbg("set %s mtu to %d", dev->name, mtu);
309
310 return err;
311 }
312
bt_cmd_enable_virnet(struct bt_virnet *vnet, unsigned long arg)313 static int bt_cmd_enable_virnet(struct bt_virnet *vnet, unsigned long arg)
314 {
315 int ret;
316
317 if (unlikely(vnet->state != BT_VIRNET_STATE_DISABLED)) {
318 btdev_dbg_err("%s enable can only be set at disabled state", cdev_name(vnet));
319 return -EINVAL; // enable failed
320 }
321
322 rtnl_lock();
323 ret = dev_change_flags(vnet->ndev, vnet->ndev->flags | IFF_UP, NULL);
324 rtnl_unlock();
325 if (unlikely(ret < 0)) {
326 btdev_dbg_err("%s dev change flags failed, ret=%d", cdev_name(vnet), ret);
327 return -EIO;
328 }
329
330 btdev_dbg("%s has been enabled", cdev_name(vnet));
331 set_state(vnet, BT_VIRNET_STATE_CONNECTED);
332 return OK;
333 }
334
bt_cmd_disable_virnet(struct bt_virnet *vnet, unsigned long arg)335 static int bt_cmd_disable_virnet(struct bt_virnet *vnet, unsigned long arg)
336 {
337 int ret;
338
339 if (unlikely(vnet->state != BT_VIRNET_STATE_CONNECTED)) {
340 btdev_dbg_err("%s disable can only be set at connected state", cdev_name(vnet));
341 return -EINVAL;
342 }
343
344 rtnl_lock();
345 ret = dev_change_flags(vnet->ndev, vnet->ndev->flags & ~IFF_UP, NULL);
346 rtnl_unlock();
347 if (unlikely(ret < 0)) {
348 btdev_dbg_err("%s dev change flags failed, ret=%d", cdev_name(vnet), ret);
349 return -EIO;
350 }
351
352 btdev_dbg("%s has been disabled", cdev_name(vnet));
353 set_state(vnet, BT_VIRNET_STATE_DISABLED);
354 return OK;
355 }
356
bt_cmd_change_mtu(struct bt_virnet *vnet, unsigned long arg)357 static int bt_cmd_change_mtu(struct bt_virnet *vnet, unsigned long arg)
358 {
359 int mtu;
360 int ret;
361
362 if (unlikely(get_user(mtu, (int __user *)arg))) {
363 btdev_dbg_err("%s get user failed", ndev_name(vnet));
364 return -EIO;
365 }
366
367 ret = bt_set_mtu(vnet->ndev, mtu);
368 if (unlikely(ret < 0)) {
369 btdev_dbg_err("%s changed mtu to %d failed", ndev_name(vnet), mtu);
370 return -EIO;
371 }
372
373 btdev_dbg("%s changed mtu to %d", ndev_name(vnet), mtu);
374 return OK;
375 }
376
bt_cmd_peek_packet(struct bt_virnet *vnet, unsigned long arg)377 static int bt_cmd_peek_packet(struct bt_virnet *vnet, unsigned long arg)
378 {
379 u32 len;
380 struct sk_buff *skb;
381
382 if (unlikely(bt_ring_is_empty(vnet->tx_ring))) {
383 btdev_dbg_err("%s ring is empty", ndev_name(vnet));
384 return -EAGAIN;
385 }
386
387 /* The user state retrieves the data length from the ring buf, rather than
388 * unchain the skb from the ring buf, so there is no need to release the skb
389 */
390 skb = bt_ring_current(vnet->tx_ring);
391 if (unlikely(!skb)) {
392 btdev_dbg_err("%s invalid skb", ndev_name(vnet));
393 return -EINVAL;
394 }
395
396 len = skb->len - MACADDR_LEN;
397 if (unlikely(put_user(len, (int __user *)arg))) {
398 btdev_dbg_err("%s put_user failed", ndev_name(vnet));
399 return -EIO;
400 }
401
402 btdev_dbg("%s get packet len is %u", ndev_name(vnet), len);
403 return OK;
404 }
405
bt_io_file_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)406 static long bt_io_file_ioctl(struct file *filep,
407 unsigned int cmd,
408 unsigned long arg)
409 {
410 long ret;
411 struct bt_virnet *vnet = NULL;
412
413 if (unlikely(!filep) || unlikely(!filep->private_data)) {
414 btdev_dbg_err("invalid parameter");
415 return -EINVAL;
416 }
417 vnet = filep->private_data;
418 switch (cmd) {
419 case BT_IOC_CHANGE_MTU:
420 ret = bt_cmd_change_mtu(vnet, arg);
421 break;
422 case BT_IOC_ENABLE:
423 ret = bt_cmd_enable_virnet(vnet, arg);
424 break;
425 case BT_IOC_DISABLE:
426 ret = bt_cmd_disable_virnet(vnet, arg);
427 break;
428 case BT_IOC_PEEK_PACKET:
429 ret = bt_cmd_peek_packet(vnet, arg);
430 break;
431 default:
432 btdev_dbg_err("not a valid cmd(%u)", cmd);
433 return -ENOIOCTLCMD;
434 }
435
436 return ret;
437 }
438
bt_io_file_poll(struct file *filp, poll_table *wait)439 static unsigned int bt_io_file_poll(struct file *filp, poll_table *wait)
440 {
441 struct bt_virnet *vnet = NULL;
442 unsigned int mask = 0;
443
444 if (unlikely(!filp) || unlikely(!wait) || unlikely(!filp->private_data)) {
445 btdev_dbg_err("invalid parameter");
446 return -EINVAL;
447 }
448 vnet = filp->private_data;
449 poll_wait(filp, &vnet->rx_queue, wait);
450
451 if (!bt_ring_is_empty(vnet->tx_ring)) // readable
452 mask |= POLLIN | POLLRDNORM;
453
454 if (!bt_ring_is_full(vnet->tx_ring)) // writable
455 mask |= POLLOUT | POLLWRNORM;
456
457 return mask;
458 }
459
460 static const struct file_operations g_bt_io_file_ops = {
461 .owner = THIS_MODULE,
462 .open = bt_io_file_open,
463 .release = bt_io_file_release,
464 .read = bt_io_file_read,
465 .write = bt_io_file_write,
466 .poll = bt_io_file_poll,
467 .unlocked_ioctl = bt_io_file_ioctl,
468 .compat_ioctl = bt_io_file_ioctl};
469
bt_mng_file_open(struct inode *node, struct file *filp)470 static int bt_mng_file_open(struct inode *node, struct file *filp)
471 {
472 if (unlikely(!filp)) {
473 btdev_dbg_err("bt mng file open: invalid filp");
474 return -EINVAL;
475 }
476
477 /* Check whether open_limit is equal to 0 after subtracting 1. If so, return true */
478 if (unlikely(!atomic_dec_and_test(&g_bt_drv->mng_file->open_limit))) {
479 /* If the file is not opened for the first time, an error occurs
480 * and open_limit is restored to the open state. (set to 0)
481 */
482 atomic_inc(&g_bt_drv->mng_file->open_limit);
483 btdev_dbg_err("file %s has been opened already",
484 g_bt_drv->mng_file->bt_cdev->dev_filename);
485 return -EBUSY;
486 }
487
488 /* open_limit becomes 0 after the file is first opened */
489 filp->private_data = g_bt_drv;
490
491 btdev_dbg("%s has been opened", g_bt_drv->mng_file->bt_cdev->dev_filename);
492 return OK;
493 }
494
bt_mng_file_release(struct inode *node, struct file *filp)495 static int bt_mng_file_release(struct inode *node, struct file *filp)
496 {
497 struct bt_drv *drv = NULL;
498
499 if (unlikely(!filp) || unlikely(!filp->private_data)) {
500 btdev_dbg_err("invalid parameter");
501 return -EINVAL;
502 }
503 drv = filp->private_data;
504
505 /* Set open_limit to 1 when the file is closed */
506 atomic_inc(&drv->mng_file->open_limit);
507
508 btdev_dbg("%s has been released", g_bt_drv->mng_file->bt_cdev->dev_filename);
509 return OK;
510 }
511
bt_cmd_create_virnet(struct bt_drv *bt_mng, unsigned long arg)512 static int bt_cmd_create_virnet(struct bt_drv *bt_mng, unsigned long arg)
513 {
514 int id;
515 int ret;
516 struct bt_virnet *vnet = NULL;
517 struct bt_uioc_args vp;
518 unsigned long size;
519
520 mutex_lock(&bt_mng->bitmap_lock);
521 id = bt_get_unused_id(bt_mng->bitmap);
522 if ((unlikely(bt_mng->devices_table->num >= BT_VIRNET_MAX_NUM)) ||
523 (unlikely(id < 0))) {
524 btdev_dbg_err("reach the limit of max virnets");
525 goto virnet_create_failed;
526 }
527 vnet = bt_virnet_create(bt_mng, id);
528 if (unlikely(!vnet)) {
529 btdev_dbg_err("bt virnet create failed");
530 goto virnet_create_failed;
531 }
532
533 ret = bt_table_add_device(bt_mng->devices_table, vnet);
534 if (unlikely(ret < 0)) {
535 btdev_dbg_err("bt table add device failed: ret=%d", ret);
536 goto add_device_failed;
537 }
538
539 bt_set_bit(&bt_mng->bitmap, id);
540 mutex_unlock(&bt_mng->bitmap_lock);
541
542 memcpy(vp.ifa_name, ndev_name(vnet), sizeof(vp.ifa_name));
543 memcpy(vp.cfile_name, cdev_name(vnet), sizeof(vp.cfile_name));
544
545 mdelay(DELAY_100_MS);
546
547 size = copy_to_user((void __user *)arg, &vp, sizeof(struct bt_uioc_args));
548 if (unlikely(size)) {
549 btdev_dbg_err("copy_to_user failed: left size=%lu", size);
550 goto copy_to_user_failed;
551 }
552
553 btdev_dbg("%s has been created", ndev_name(vnet));
554 return OK;
555
556 copy_to_user_failed:
557 mutex_lock(&bt_mng->bitmap_lock);
558 bt_table_remove_device(bt_mng->devices_table, vnet);
559 bt_clear_bit(&bt_mng->bitmap, id);
560
561 add_device_failed:
562 bt_virnet_destroy(vnet);
563
564 virnet_create_failed:
565 mutex_unlock(&bt_mng->bitmap_lock);
566 return -EIO;
567 }
568
bt_cmd_delete_virnet(struct bt_drv *bt_mng, unsigned long arg)569 static int bt_cmd_delete_virnet(struct bt_drv *bt_mng, unsigned long arg)
570 {
571 int err;
572 struct bt_virnet *vnet = NULL;
573 struct bt_uioc_args vp;
574 unsigned long size;
575 dev_t number;
576
577 size = copy_from_user(&vp, (void __user *)arg,
578 sizeof(struct bt_uioc_args));
579 if (unlikely(size)) {
580 btdev_dbg_err("copy_from_user failed: left size=%lu", size);
581 return -EIO;
582 }
583
584 vnet = bt_table_find(bt_mng->devices_table, vp.ifa_name);
585 if (unlikely(!vnet)) {
586 btdev_dbg_err("virnet: %s cannot be found in bt table", vp.ifa_name);
587 return -EIO; // not found
588 }
589
590 btdev_dbg("%s has been deleted", ndev_name(vnet));
591 mutex_lock(&bt_mng->bitmap_lock);
592 err = bt_virnet_get_cdev_number(vnet, &number);
593 if (likely(!err))
594 bt_clear_bit(&bt_mng->bitmap, (u32)MINOR(number));
595 bt_table_remove_device(bt_mng->devices_table, vnet);
596 bt_virnet_destroy(vnet);
597 mutex_unlock(&bt_mng->bitmap_lock);
598 return OK;
599 }
600
bt_cmd_query_all_virnets(struct bt_drv *bt_mng, unsigned long arg)601 static int bt_cmd_query_all_virnets(struct bt_drv *bt_mng, unsigned long arg)
602 {
603 if (unlikely(put_user(bt_mng->bitmap, (u32 *)arg))) {
604 btdev_dbg_err("put_user failed");
605 return -EIO;
606 }
607 return OK;
608 }
609
bt_cmd_delete_all_virnets(struct bt_drv *bt_mng, unsigned long arg)610 static int bt_cmd_delete_all_virnets(struct bt_drv *bt_mng, unsigned long arg)
611 {
612 return bt_table_delete_all(bt_mng);
613 }
614
bt_mng_file_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)615 static long bt_mng_file_ioctl(struct file *filep,
616 unsigned int cmd,
617 unsigned long arg)
618 {
619 int ret;
620 struct bt_drv *bt_mng = NULL;
621
622 if (unlikely(!filep) || unlikely(!filep->private_data)) {
623 btdev_dbg_err("invalid parameter");
624 return -EINVAL;
625 }
626 bt_mng = filep->private_data;
627
628 switch (cmd) {
629 case BT_IOC_CREATE:
630 ret = bt_cmd_create_virnet(bt_mng, arg);
631 break;
632 case BT_IOC_DELETE:
633 ret = bt_cmd_delete_virnet(bt_mng, arg);
634 break;
635 case BT_IOC_QUERY_ALL:
636 ret = bt_cmd_query_all_virnets(bt_mng, arg);
637 break;
638 case BT_IOC_DELETE_ALL:
639 ret = bt_cmd_delete_all_virnets(bt_mng, arg);
640 break;
641 default:
642 btdev_dbg_err("not a valid cmd(%u)", cmd);
643 return -ENOIOCTLCMD;
644 }
645 return ret;
646 }
647
648 static const struct file_operations g_bt_mng_file_ops = {
649 .owner = THIS_MODULE,
650 .open = bt_mng_file_open,
651 .release = bt_mng_file_release,
652 .unlocked_ioctl = bt_mng_file_ioctl,
653 .compat_ioctl = bt_mng_file_ioctl};
654
bt_virnet_xmit(struct sk_buff *skb, struct net_device *dev)655 static netdev_tx_t bt_virnet_xmit(struct sk_buff *skb,
656 struct net_device *dev)
657 {
658 int ret;
659 struct bt_virnet *vnet = NULL;
660
661 if (unlikely(!skb) || unlikely(!dev)) {
662 btdev_dbg_err("invalid parameter");
663 return -EINVAL;
664 }
665
666 vnet = bt_table_find(g_bt_drv->devices_table, dev->name);
667 if (unlikely(!vnet)) {
668 btdev_dbg_err("bt_table_find %s failed", ndev_name(vnet));
669 return -EINVAL;
670 }
671
672 ret = bt_virnet_produce_data(vnet, (void *)skb);
673 if (unlikely(ret < 0)) {
674 btdev_dbg("%s produce data failed: ring is full, need to stop queue",
675 ndev_name(vnet));
676 netif_stop_queue(vnet->ndev);
677 return NETDEV_TX_BUSY;
678 }
679
680 vnet->ndev->stats.tx_packets++;
681 vnet->ndev->stats.tx_bytes += skb->len;
682
683 btdev_dbg("%s send success, skb-len=%u", ndev_name(vnet), skb->len);
684 return NETDEV_TX_OK;
685 }
686
687 static const struct net_device_ops g_bt_virnet_ops = {
688 .ndo_start_xmit = bt_virnet_xmit,
689 .ndo_change_mtu = bt_virnet_change_mtu};
690
bt_table_init(void)691 static struct bt_table *bt_table_init(void)
692 {
693 struct bt_table *tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
694
695 if (unlikely(!tbl)) {
696 btdev_dbg_err("alloc failed");
697 return NULL;
698 }
699
700 INIT_LIST_HEAD(&tbl->head);
701 mutex_init(&tbl->tbl_lock);
702 tbl->num = 0;
703 return tbl;
704 }
705
bt_table_add_device(struct bt_table *tbl, struct bt_virnet *vn)706 static int bt_table_add_device(struct bt_table *tbl, struct bt_virnet *vn)
707 {
708 struct bt_virnet *vnet = NULL;
709
710 if (unlikely(!tbl)) {
711 btdev_dbg_err("invalid parameter");
712 return -EINVAL;
713 }
714
715 vnet = bt_table_find(tbl, ndev_name(vn));
716 if (unlikely(vnet)) {
717 btdev_dbg_err("found duplicated device %s", ndev_name(vn));
718 return -ENOIOCTLCMD; // duplicated
719 }
720
721 btdev_dbg("%s has been added", ndev_name(vn));
722 mutex_lock(&tbl->tbl_lock);
723 list_add_tail(&vn->virnet_entry, &tbl->head);
724 if (tbl->num < UINT32_MAX)
725 ++tbl->num;
726 mutex_unlock(&tbl->tbl_lock);
727
728 return OK;
729 }
730
bt_table_remove_device(struct bt_table *tbl, struct bt_virnet *vn)731 static void bt_table_remove_device(struct bt_table *tbl, struct bt_virnet *vn)
732 {
733 if (unlikely(!tbl))
734 return;
735
736 btdev_dbg("%s has been removed", ndev_name(vn));
737 mutex_lock(&tbl->tbl_lock);
738 list_del(&vn->virnet_entry);
739 if (tbl->num)
740 --tbl->num;
741 mutex_unlock(&tbl->tbl_lock);
742 }
743
bt_table_find(struct bt_table *tbl, const char *ifa_name)744 static struct bt_virnet *bt_table_find(struct bt_table *tbl, const char *ifa_name)
745 {
746 struct bt_virnet *vnet = NULL;
747
748 if (unlikely(!tbl) || unlikely(!ifa_name)) {
749 btdev_dbg_err("invalid parameter");
750 return NULL;
751 }
752
753 list_for_each_entry(vnet, &tbl->head, virnet_entry) {
754 if (!strcmp(ndev_name(vnet), ifa_name))
755 return vnet;
756 }
757
758 return NULL;
759 }
760
__bt_table_delete_all(struct bt_drv *drv)761 static void __bt_table_delete_all(struct bt_drv *drv)
762 {
763 dev_t number;
764 struct bt_virnet *vnet = NULL;
765 struct bt_virnet *tmp_vnet = NULL;
766
767 if (unlikely(!g_bt_drv->devices_table))
768 return;
769
770 list_for_each_entry_safe(vnet,
771 tmp_vnet,
772 &drv->devices_table->head,
773 virnet_entry) {
774 int err = bt_virnet_get_cdev_number(vnet, &number);
775
776 if (likely(!err))
777 bt_clear_bit(&drv->bitmap, (u32)MINOR(number));
778 list_del(&vnet->virnet_entry);
779 btdev_dbg("%s has been deleted", ndev_name(vnet));
780 bt_virnet_destroy(vnet);
781 }
782 drv->devices_table->num = 0;
783 }
784
bt_table_delete_all(struct bt_drv *drv)785 static int bt_table_delete_all(struct bt_drv *drv)
786 {
787 if (unlikely(!drv->devices_table))
788 return -EINVAL;
789
790 mutex_lock(&drv->bitmap_lock);
791 mutex_lock(&drv->devices_table->tbl_lock);
792
793 __bt_table_delete_all(drv);
794
795 mutex_unlock(&drv->devices_table->tbl_lock);
796 mutex_unlock(&drv->bitmap_lock);
797 return OK;
798 }
799
bt_table_destroy(struct bt_drv *drv)800 static void bt_table_destroy(struct bt_drv *drv)
801 {
802 __bt_table_delete_all(drv);
803 kfree(drv->devices_table);
804 drv->devices_table = NULL;
805 }
806
__bt_ring_create(int size)807 static struct bt_ring *__bt_ring_create(int size)
808 {
809 struct bt_ring *ring;
810
811 if (unlikely(size < 0))
812 return NULL;
813
814 ring = kmalloc(sizeof(*ring), GFP_KERNEL);
815 if (unlikely(!ring)) {
816 btdev_dbg_err("ring alloc failed");
817 return NULL;
818 }
819
820 ring->head = 0;
821 ring->tail = 0;
822 ring->data = kmalloc_array(size, sizeof(void *), GFP_KERNEL);
823 if (unlikely(!ring->data)) {
824 btdev_dbg_err("ring data allocfailed");
825 kfree(ring);
826 return NULL;
827 }
828 ring->size = size;
829
830 return ring;
831 }
832
bt_ring_create(void)833 static struct bt_ring *bt_ring_create(void)
834 {
835 return __bt_ring_create(BT_RING_BUFFER_SIZE);
836 }
837
bt_ring_is_empty(const struct bt_ring *ring)838 static int bt_ring_is_empty(const struct bt_ring *ring)
839 {
840 if (unlikely(!ring))
841 return TRUE;
842
843 return ring->head == ring->tail;
844 }
845
bt_ring_is_full(const struct bt_ring *ring)846 static int bt_ring_is_full(const struct bt_ring *ring)
847 {
848 if (unlikely(!ring))
849 return TRUE;
850
851 return (ring->head + 1) % ring->size == ring->tail;
852 }
853
bt_ring_produce(struct bt_ring *ring, void *data)854 static void bt_ring_produce(struct bt_ring *ring, void *data)
855 {
856 smp_mb(); // Make sure the read and write order is correct
857 if (likely(ring->head < ring->size)) {
858 ring->data[ring->head] = data;
859 ring->head = (ring->head + 1) % ring->size;
860 }
861 smp_wmb(); // Make sure the write order is correct
862 }
863
bt_ring_current(struct bt_ring *ring)864 static void *bt_ring_current(struct bt_ring *ring)
865 {
866 void *data = NULL;
867
868 if (unlikely(!ring) || unlikely(ring->tail > ring->size))
869 return data;
870
871 data = ring->data[ring->tail];
872 return data;
873 }
874
bt_ring_consume(struct bt_ring *ring)875 static void bt_ring_consume(struct bt_ring *ring)
876 {
877 if (unlikely(!ring))
878 return;
879
880 smp_rmb(); // Make sure the read order is correct
881 ring->tail = (ring->tail + 1) % ring->size;
882 smp_mb(); // Make sure the read and write order is correct
883 }
884
bt_ring_destroy(struct bt_ring *ring)885 static void bt_ring_destroy(struct bt_ring *ring)
886 {
887 if (unlikely(!ring))
888 return;
889
890 kfree(ring->data);
891 kfree(ring);
892 }
893
bt_virnet_produce_data(struct bt_virnet *dev, void *data)894 static int bt_virnet_produce_data(struct bt_virnet *dev, void *data)
895 {
896 if (unlikely(bt_ring_is_full(dev->tx_ring))) {
897 btdev_dbg("ring is full");
898 return -ENFILE;
899 }
900
901 /* There is a memory barrier inside the function */
902 bt_ring_produce(dev->tx_ring, data);
903 wake_up(&dev->rx_queue);
904 return OK;
905 }
906
907 /**
908 * register all the region
909 */
bt_cdev_region_init(int major, int count)910 static int bt_cdev_region_init(int major, int count)
911 {
912 return register_chrdev_region(MKDEV(major, 0), count, "bt");
913 }
914
bt_dev_class_create(void)915 static struct class *bt_dev_class_create(void)
916 {
917 struct class *cls = class_create(THIS_MODULE, "bt");
918
919 if (IS_ERR(cls)) {
920 btdev_dbg_err("create struct class failed");
921 return NULL;
922 }
923 return cls;
924 }
925
bt_dev_class_destroy(struct class *cls)926 static void bt_dev_class_destroy(struct class *cls)
927 {
928 if (unlikely(!cls))
929 return;
930
931 class_destroy(cls);
932 }
933
bt_cdev_device_destroy(struct bt_cdev *dev)934 static void bt_cdev_device_destroy(struct bt_cdev *dev)
935 {
936 device_destroy(dev->bt_class, dev->cdev->dev);
937 }
938
bt_cdev_device_create(struct bt_cdev *dev, struct class *cls, u32 id)939 static int bt_cdev_device_create(struct bt_cdev *dev,
940 struct class *cls,
941 u32 id)
942 {
943 struct device *device = NULL;
944 dev_t devno = MKDEV(BT_DEV_MAJOR, id);
945 int ret;
946
947 if (unlikely(!cls)) {
948 btdev_dbg_err("not a valid class");
949 return -EINVAL;
950 }
951
952 dev->bt_class = cls;
953 device = device_create(cls, NULL, devno, NULL, "%s%u", BT_DEV_NAME_PREFIX, id);
954 if (IS_ERR(device)) {
955 btdev_dbg_err("create device failed, id=%d", id);
956 return -EIO;
957 }
958 ret = snprintf(dev->dev_filename, sizeof(dev->dev_filename),
959 "%s%u", BT_DEV_PATH_PREFIX, id);
960 if (ret < 0) {
961 btdev_dbg_err("snprintf failed, id=%d", id);
962 bt_cdev_device_destroy(dev);
963 return -EFAULT;
964 }
965
966 btdev_dbg("%s has been created", dev->dev_filename);
967 return OK;
968 }
969
bt_cdev_create(const struct file_operations *ops, u32 id)970 static struct bt_cdev *bt_cdev_create(const struct file_operations *ops,
971 u32 id)
972 {
973 int ret;
974 int minor = id;
975 struct bt_cdev *dev = NULL;
976 struct cdev *chrdev = NULL;
977
978 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
979 if (unlikely(!dev)) {
980 btdev_dbg_err("dev alloc failed, id=%d", id);
981 goto dev_alloc_failed;
982 }
983
984 chrdev = cdev_alloc();
985 if (unlikely(!chrdev)) {
986 btdev_dbg_err("cdev alloc failed, id=%d", id);
987 goto cdev_alloc_failed;
988 }
989
990 cdev_init(chrdev, ops);
991 dev->cdev = chrdev;
992
993 ret = cdev_add(chrdev, MKDEV(BT_DEV_MAJOR, minor), 1);
994 if (unlikely(ret < 0)) {
995 btdev_dbg_err("cdev add failed, id=%d", id);
996 goto cdev_add_failed;
997 }
998
999 if (unlikely(bt_cdev_device_create(dev, g_bt_drv->bt_class, minor) < 0)) {
1000 btdev_dbg_err("bt cdev device create failed, id=%d", id);
1001 goto cdev_device_create_failed;
1002 }
1003
1004 return dev;
1005
1006 cdev_device_create_failed:
1007 cdev_add_failed:
1008 cdev_del(chrdev);
1009
1010 cdev_alloc_failed:
1011 kfree(dev);
1012
1013 dev_alloc_failed:
1014 return NULL;
1015 }
1016
1017 /**
1018 * delete one char device
1019 */
bt_cdev_delete(struct bt_cdev *bt_cdev)1020 static void bt_cdev_delete(struct bt_cdev *bt_cdev)
1021 {
1022 dev_t devno;
1023
1024 if (likely(bt_cdev)) {
1025 devno = bt_cdev->cdev->dev;
1026
1027 /* BT_DEV_PATH_PREFIX + ID --> /dev/btdev1 */
1028 unregister_chrdev(MAJOR(devno), bt_cdev->dev_filename + strlen(BT_DEV_PATH_PREFIX));
1029 bt_cdev_device_destroy(bt_cdev);
1030
1031 cdev_del(bt_cdev->cdev);
1032 } else {
1033 btdev_dbg_err("cdev is null");
1034 }
1035 }
1036
1037 /**
1038 * create and add data char device
1039 */
bt_create_io_file(u32 id)1040 static struct bt_io_file *bt_create_io_file(u32 id)
1041 {
1042 struct bt_io_file *file = kmalloc(sizeof(*file), GFP_KERNEL);
1043
1044 if (unlikely(!file)) {
1045 btdev_dbg_err("file alloc failed, id=%d", id);
1046 return NULL;
1047 }
1048 file->bt_cdev = bt_cdev_create(&g_bt_io_file_ops, id);
1049 if (unlikely(!file->bt_cdev)) {
1050 btdev_dbg_err("create cdev failed, id=%d", id);
1051 kfree(file);
1052 return NULL;
1053 }
1054 atomic_set(&file->read_open_limit, 1);
1055 atomic_set(&file->write_open_limit, 1);
1056 return file;
1057 }
1058
bt_create_io_files(void)1059 static struct bt_io_file **bt_create_io_files(void)
1060 {
1061 int i;
1062 struct bt_io_file **all_files = kmalloc(BT_VIRNET_MAX_NUM * sizeof(struct bt_io_file *),
1063 GFP_KERNEL);
1064
1065 if (unlikely(!all_files)) {
1066 btdev_dbg_err("all_files alloc failed");
1067 return NULL;
1068 }
1069 for (i = 0; i < BT_VIRNET_MAX_NUM; ++i)
1070 all_files[i] = bt_create_io_file(i + 1);
1071
1072 return all_files;
1073 }
1074
bt_delete_io_file(struct bt_io_file *file)1075 static void bt_delete_io_file(struct bt_io_file *file)
1076 {
1077 if (unlikely(!file))
1078 return;
1079
1080 bt_cdev_delete(file->bt_cdev);
1081 kfree(file);
1082 }
1083
bt_delete_io_files(struct bt_drv *bt_mng)1084 static void bt_delete_io_files(struct bt_drv *bt_mng)
1085 {
1086 int i;
1087
1088 for (i = 0; i < BT_VIRNET_MAX_NUM; ++i)
1089 bt_delete_io_file(bt_mng->io_files[i]);
1090
1091 kfree(bt_mng->io_files);
1092 bt_mng->io_files = NULL;
1093 }
1094
1095 /**
1096 * create and add management char device
1097 */
bt_create_mng_file(int id)1098 static struct bt_mng_file *bt_create_mng_file(int id)
1099 {
1100 struct bt_mng_file *file = kmalloc(sizeof(*file), GFP_KERNEL);
1101
1102 if (unlikely(!file)) {
1103 btdev_dbg_err("file alloc failed");
1104 return NULL;
1105 }
1106
1107 file->bt_cdev = bt_cdev_create(&g_bt_mng_file_ops, id);
1108 if (unlikely(!file->bt_cdev)) {
1109 btdev_dbg_err("create cdev failed");
1110 kfree(file);
1111 return NULL;
1112 }
1113
1114 atomic_set(&file->open_limit, 1);
1115
1116 btdev_dbg("mng file has been created");
1117 return file;
1118 }
1119
bt_delete_mng_file(struct bt_mng_file *file)1120 static void bt_delete_mng_file(struct bt_mng_file *file)
1121 {
1122 if (unlikely(!file))
1123 return;
1124
1125 bt_cdev_delete(file->bt_cdev);
1126 kfree(file);
1127 }
1128
1129 /**
1130 * unregister the region
1131 */
bt_cdev_region_destroy(int major, int count)1132 static void bt_cdev_region_destroy(int major, int count)
1133 {
1134 return unregister_chrdev_region(MKDEV(major, 0), count);
1135 }
1136
1137 /**
1138 * create one net device
1139 */
bt_net_device_create(u32 id)1140 static struct net_device *bt_net_device_create(u32 id)
1141 {
1142 struct net_device *ndev = NULL;
1143 int err;
1144 char ifa_name[IFNAMSIZ];
1145
1146 if (unlikely(id < 0) || unlikely(id > BT_VIRNET_MAX_NUM)) {
1147 btdev_dbg_err("invalid id");
1148 return NULL;
1149 }
1150 err = snprintf(ifa_name, sizeof(ifa_name), "%s%d", BT_VIRNET_NAME_PREFIX, id);
1151 if (err < 0) {
1152 btdev_dbg_err("snprintf failed, id=%d", id);
1153 return NULL;
1154 }
1155 ndev = alloc_netdev(0, ifa_name, NET_NAME_UNKNOWN, ether_setup);
1156 if (unlikely(!ndev)) {
1157 btdev_dbg_err("%s ndev alloc failed", ifa_name);
1158 return NULL;
1159 }
1160
1161 ndev->netdev_ops = &g_bt_virnet_ops;
1162 ndev->flags |= IFF_NOARP;
1163 ndev->flags &= ~IFF_BROADCAST & ~IFF_MULTICAST;
1164 ndev->min_mtu = 1;
1165 ndev->max_mtu = ETH_MAX_MTU;
1166
1167 err = register_netdev(ndev);
1168 if (unlikely(err)) {
1169 btdev_dbg_err("%s register netdev failed", ifa_name);
1170 free_netdev(ndev);
1171 return NULL;
1172 }
1173
1174 btdev_dbg("%s has been created", ifa_name);
1175 return ndev;
1176 }
1177
1178 /**
1179 * destroy one net device
1180 */
bt_net_device_destroy(struct net_device *dev)1181 static void bt_net_device_destroy(struct net_device *dev)
1182 {
1183 btdev_dbg("%s has been destroyed", dev->name);
1184 unregister_netdev(dev);
1185 free_netdev(dev);
1186 }
1187
bt_get_io_file(struct bt_drv *drv, int id)1188 static struct bt_io_file *bt_get_io_file(struct bt_drv *drv, int id)
1189 {
1190 if (id >= 1 && id <= BT_VIRNET_MAX_NUM)
1191 return drv->io_files[id - 1];
1192
1193 return NULL;
1194 }
1195
1196 /**
1197 * create an virtual net_device
1198 */
bt_virnet_create(struct bt_drv *bt_mng, u32 id)1199 static struct bt_virnet *bt_virnet_create(struct bt_drv *bt_mng, u32 id)
1200 {
1201 struct bt_virnet *vnet = kmalloc(sizeof(*vnet), GFP_KERNEL);
1202
1203 if (unlikely(!vnet)) {
1204 btdev_dbg_err("invalid parameter");
1205 goto out_of_memory;
1206 }
1207
1208 vnet->tx_ring = bt_ring_create();
1209 if (unlikely(!vnet->tx_ring)) {
1210 btdev_dbg_err("create ring failed");
1211 goto bt_ring_create_failed;
1212 }
1213
1214 vnet->ndev = bt_net_device_create(id);
1215 if (unlikely(!vnet->ndev)) {
1216 btdev_dbg_err("create net device failed");
1217 goto net_device_create_failed;
1218 }
1219
1220 vnet->io_file = bt_get_io_file(bt_mng, id);
1221 if (unlikely(!vnet->io_file)) {
1222 btdev_dbg_err("create cdev failed");
1223 goto get_io_file_failed;
1224 }
1225
1226 init_waitqueue_head(&vnet->rx_queue);
1227
1228 set_state(vnet, BT_VIRNET_STATE_CREATED);
1229 btdev_dbg("%s has been created", cdev_name(vnet));
1230 return vnet;
1231
1232 get_io_file_failed:
1233 bt_net_device_destroy(vnet->ndev);
1234
1235 net_device_create_failed:
1236 bt_ring_destroy(vnet->tx_ring);
1237
1238 bt_ring_create_failed:
1239 kfree(vnet);
1240
1241 out_of_memory:
1242 return NULL;
1243 }
1244
bt_virnet_destroy(struct bt_virnet *vnet)1245 static void bt_virnet_destroy(struct bt_virnet *vnet)
1246 {
1247 btdev_dbg("%s has been destroyed", ndev_name(vnet));
1248 bt_ring_destroy(vnet->tx_ring);
1249 bt_net_device_destroy(vnet->ndev);
1250
1251 set_state(vnet, BT_VIRNET_STATE_DELETED);
1252
1253 kfree(vnet);
1254 }
1255
bt_module_release(void)1256 static void __exit bt_module_release(void)
1257 {
1258 if (likely(g_bt_drv)) {
1259 bt_table_destroy(g_bt_drv);
1260 bt_delete_io_files(g_bt_drv);
1261 bt_delete_mng_file(g_bt_drv->mng_file);
1262 bt_dev_class_destroy(g_bt_drv->bt_class);
1263
1264 kfree(g_bt_drv);
1265 g_bt_drv = NULL;
1266 }
1267
1268 bt_cdev_region_destroy(BT_DEV_MAJOR, BT_VIRNET_MAX_NUM);
1269 remove_proc_entry("bt_info_proc", NULL);
1270 btdev_dbg("success");
1271 }
1272
__bt_module_base_init(void)1273 static int __bt_module_base_init(void)
1274 {
1275 int ret = 0;
1276
1277 g_bt_drv = kmalloc(sizeof(*g_bt_drv), GFP_KERNEL);
1278 if (unlikely(!g_bt_drv)) {
1279 btdev_dbg_err("bt_drv alloc failed");
1280 ret = -ENOMEM;
1281 goto btdrv_alloc_failed;
1282 }
1283
1284 if (unlikely(bt_cdev_region_init(BT_DEV_MAJOR, BT_VIRNET_MAX_NUM) < 0)) {
1285 btdev_dbg_err("bt cdev region init failed");
1286 ret = -EFAULT;
1287 goto cdev_region_fail;
1288 }
1289
1290 g_bt_drv->devices_table = bt_table_init();
1291 if (unlikely(!g_bt_drv->devices_table)) {
1292 btdev_dbg_err("bt table init failed");
1293 ret = -ENOMEM;
1294 goto table_init_fail;
1295 }
1296
1297 g_bt_drv->bt_class = bt_dev_class_create();
1298 if (unlikely(!g_bt_drv->bt_class)) {
1299 btdev_dbg_err("class create failed");
1300 ret = -ENOMEM;
1301 goto class_create_fail;
1302 }
1303
1304 g_bt_drv->io_files = bt_create_io_files();
1305 if (unlikely(!g_bt_drv->io_files)) {
1306 btdev_dbg_err("bt create io files failed");
1307 ret = -ENOMEM;
1308 goto io_files_create_fail;
1309 }
1310
1311 mutex_init(&g_bt_drv->bitmap_lock);
1312 g_bt_drv->bitmap = 0;
1313 return ret;
1314
1315 io_files_create_fail:
1316 bt_dev_class_destroy(g_bt_drv->bt_class);
1317
1318 class_create_fail:
1319 bt_table_destroy(g_bt_drv);
1320
1321 table_init_fail:
1322 bt_cdev_region_destroy(BT_DEV_MAJOR, BT_VIRNET_MAX_NUM);
1323
1324 cdev_region_fail:
1325 kfree(g_bt_drv);
1326 g_bt_drv = NULL;
1327
1328 btdrv_alloc_failed:
1329 return ret;
1330 }
1331
__bt_module_dev_create(void)1332 static int __bt_module_dev_create(void)
1333 {
1334 int mid = 0;
1335 struct proc_dir_entry *entry = NULL;
1336 int ret = 0;
1337
1338 mutex_lock(&g_bt_drv->bitmap_lock);
1339 g_bt_drv->mng_file = bt_create_mng_file(mid);
1340 if (unlikely(!g_bt_drv->mng_file)) {
1341 btdev_dbg_err("bt create mng file failed");
1342 ret = -ENOMEM;
1343 mutex_unlock(&g_bt_drv->bitmap_lock);
1344 goto mng_file_create_fail;
1345 }
1346 bt_set_bit(&g_bt_drv->bitmap, mid);
1347 mutex_unlock(&g_bt_drv->bitmap_lock);
1348
1349 entry = proc_create_data("bt_info_proc", 0, NULL, &g_bt_proc_fops, NULL);
1350 if (unlikely(!entry)) {
1351 btdev_dbg_err("create proc data failed");
1352 ret = -ENOMEM;
1353 goto proc_create_fail;
1354 }
1355
1356 return ret;
1357 proc_create_fail:
1358 bt_delete_mng_file(g_bt_drv->mng_file);
1359
1360 mng_file_create_fail:
1361 bt_delete_io_files(g_bt_drv);
1362 bt_dev_class_destroy(g_bt_drv->bt_class);
1363 bt_table_destroy(g_bt_drv);
1364 bt_cdev_region_destroy(BT_DEV_MAJOR, BT_VIRNET_MAX_NUM);
1365 kfree(g_bt_drv);
1366 g_bt_drv = NULL;
1367
1368 return ret;
1369 }
1370
1371 /**
1372 * module init function
1373 */
bt_module_init(void)1374 static int __init bt_module_init(void)
1375 {
1376 int ret;
1377
1378 ret = __bt_module_base_init();
1379 if (ret < 0)
1380 return ret;
1381
1382 return __bt_module_dev_create();
1383 }
1384
1385 module_init(bt_module_init);
1386 module_exit(bt_module_release);
1387 MODULE_LICENSE("GPL");
1388