Lines Matching defs:cdev

18 /* cdev list lock */
23 struct cxgbit_device *cdev;
25 cdev = container_of(kref, struct cxgbit_device, kref);
27 cxgbi_ppm_release(cdev2ppm(cdev));
28 kfree(cdev);
31 static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
33 struct cxgb4_lld_info *lldi = &cdev->lldi;
43 cdev->mdsl = mdsl;
48 struct cxgbit_device *cdev;
53 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
54 if (!cdev)
57 kref_init(&cdev->kref);
58 spin_lock_init(&cdev->np_lock);
60 cdev->lldi = *lldi;
62 cxgbit_set_mdsl(cdev);
64 if (cxgbit_ddp_init(cdev) < 0) {
65 kfree(cdev);
69 if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags))
70 pr_info("cdev %s ddp init failed\n",
74 set_bit(CDEV_ISO_ENABLE, &cdev->flags);
76 spin_lock_init(&cdev->cskq.lock);
77 INIT_LIST_HEAD(&cdev->cskq.list);
80 list_add_tail(&cdev->list, &cdev_list_head);
83 pr_info("cdev %s added for iSCSI target transport\n",
86 return cdev;
89 static void cxgbit_close_conn(struct cxgbit_device *cdev)
95 spin_lock_bh(&cdev->cskq.lock);
96 list_for_each_entry(csk, &cdev->cskq.list, list) {
112 spin_unlock_bh(&cdev->cskq.lock);
115 static void cxgbit_detach_cdev(struct cxgbit_device *cdev)
119 spin_lock_bh(&cdev->cskq.lock);
120 if (list_empty(&cdev->cskq.list))
122 spin_unlock_bh(&cdev->cskq.lock);
126 list_del(&cdev->list);
129 cxgbit_put_cdev(cdev);
131 cxgbit_close_conn(cdev);
137 struct cxgbit_device *cdev = handle;
141 set_bit(CDEV_STATE_UP, &cdev->flags);
142 pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev));
145 clear_bit(CDEV_STATE_UP, &cdev->flags);
146 cxgbit_close_conn(cdev);
147 pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev));
150 pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev));
153 clear_bit(CDEV_STATE_UP, &cdev->flags);
154 pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev));
155 cxgbit_detach_cdev(cdev);
158 pr_info("cdev %s unknown state %d.\n",
159 pci_name(cdev->lldi.pdev), state);
431 struct cxgbit_device *cdev = hndl;
432 struct cxgb4_lld_info *lldi = &cdev->lldi;
508 pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
509 cdev, op, rpl->ot.opcode_tid,
513 cxgbit_cplhandlers[op](cdev, skb);
531 cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id,
539 spin_lock_bh(&cdev->cskq.lock);
540 list_for_each_entry(csk, &cdev->cskq.list, list) {
577 spin_unlock_bh(&cdev->cskq.lock);
584 struct cxgbit_device *cdev = NULL;
619 cdev = cxgbit_find_device(ndev, &port_id);
623 if (!cdev) {
628 cxgbit_update_dcb_priority(cdev, port_id, priority,
718 struct cxgbit_device *cdev, *tmp;
724 list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) {
725 list_del(&cdev->list);
726 cxgbit_put_cdev(cdev);