Lines Matching defs:cdev

49 cxgbit_wait_for_reply(struct cxgbit_device *cdev,
55 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
63 func, pci_name(cdev->lldi.pdev), tid);
69 pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
79 cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
89 spin_lock(&cdev->np_lock);
90 p->next = cdev->np_hash_tab[bucket];
91 cdev->np_hash_tab[bucket] = p;
92 spin_unlock(&cdev->np_lock);
99 cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
104 spin_lock(&cdev->np_lock);
105 for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
111 spin_unlock(&cdev->np_lock);
116 static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
119 struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
121 spin_lock(&cdev->np_lock);
130 spin_unlock(&cdev->np_lock);
144 cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
153 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
158 ret = cxgb4_clip_get(cdev->lldi.ports[0],
170 ret = cxgb4_create_server6(cdev->lldi.ports[0],
173 cdev->lldi.rxq_ids[0]);
175 ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
184 cxgb4_clip_release(cdev->lldi.ports[0],
196 cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
204 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
209 ret = cxgb4_create_server(cdev->lldi.ports[0],
212 cdev->lldi.rxq_ids[0]);
214 ret = cxgbit_wait_for_reply(cdev,
230 struct cxgbit_device *cdev;
233 list_for_each_entry(cdev, &cdev_list_head, list) {
234 struct cxgb4_lld_info *lldi = &cdev->lldi;
240 return cdev;
295 struct cxgbit_device *cdev = NULL;
312 cdev = cxgbit_find_device(ndev, NULL);
315 return cdev;
343 __cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
348 if (!test_bit(CDEV_STATE_UP, &cdev->flags))
351 stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
355 if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
356 cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
361 ret = cxgbit_create_server4(cdev, stid, cnp);
363 ret = cxgbit_create_server6(cdev, stid, cnp);
367 cxgb4_free_stid(cdev->lldi.tids, stid,
369 cxgbit_np_hash_del(cdev, cnp);
377 struct cxgbit_device *cdev;
381 cdev = cxgbit_find_np_cdev(cnp);
382 if (!cdev)
385 if (cxgbit_np_hash_find(cdev, cnp) >= 0)
388 if (__cxgbit_setup_cdev_np(cdev, cnp))
391 cnp->com.cdev = cdev;
400 struct cxgbit_device *cdev;
405 list_for_each_entry(cdev, &cdev_list_head, list) {
406 if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
412 list_for_each_entry(cdev, &cdev_list_head, list) {
413 ret = __cxgbit_setup_cdev_np(cdev, cnp);
450 cnp->com.cdev = NULL;
518 __cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
523 stid = cxgbit_np_hash_del(cdev, cnp);
526 if (!test_bit(CDEV_STATE_UP, &cdev->flags))
534 ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
535 cdev->lldi.rxq_ids[0], ipv6);
545 ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
550 if (ipv6 && cnp->com.cdev) {
554 cxgb4_clip_release(cdev->lldi.ports[0],
559 cxgb4_free_stid(cdev->lldi.tids, stid,
566 struct cxgbit_device *cdev;
570 list_for_each_entry(cdev, &cdev_list_head, list) {
571 ret = __cxgbit_free_cdev_np(cdev, cnp);
580 struct cxgbit_device *cdev;
584 list_for_each_entry(cdev, &cdev_list_head, list) {
585 if (cdev == cnp->com.cdev) {
593 __cxgbit_free_cdev_np(cdev, cnp);
606 if (cnp->com.cdev)
650 struct cxgbit_device *cdev = handle;
653 pr_debug("%s cdev %p\n", __func__, cdev);
655 cxgbit_ofld_send(cdev, skb);
673 csk->com.cdev, cxgbit_abort_arp_failure);
675 return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
714 cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
761 csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
796 struct cxgbit_device *cdev;
805 cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
810 cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
815 cdev = csk->com.cdev;
816 spin_lock_bh(&cdev->cskq.lock);
818 spin_unlock_bh(&cdev->cskq.lock);
822 cxgbit_put_cdev(cdev);
899 struct cxgbit_device *cdev)
931 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
939 step = cdev->lldi.ntxq /
940 cdev->lldi.nchan;
942 step = cdev->lldi.nrxq /
943 cdev->lldi.nchan;
945 csk->rss_qid = cdev->lldi.rxq_ids[
964 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
966 csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
975 step = cdev->lldi.ntxq /
976 cdev->lldi.nports;
978 (cdev->selectq[port_id][0]++ % step);
980 step = cdev->lldi.nrxq /
981 cdev->lldi.nports;
983 (cdev->selectq[port_id][1]++ % step);
984 csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
996 int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
1000 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1006 ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
1012 static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
1022 cxgbit_ofld_send(cdev, skb);
1026 cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1031 if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1037 ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1050 cxgbit_ofld_send(csk->com.cdev, skb);
1133 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1154 cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1212 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1216 cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1222 struct tid_info *t = cdev->lldi.tids;
1233 pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1234 __func__, cdev, stid, tid);
1256 cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1266 dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1277 dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1297 dst, cdev);
1317 csk->com.cdev = cdev;
1322 csk->wr_cred = cdev->lldi.wr_cred -
1345 cxgb4_clip_get(cdev->lldi.ports[0],
1372 cxgbit_get_cdev(cdev);
1374 spin_lock(&cdev->cskq.lock);
1375 list_add_tail(&csk->list, &cdev->cskq.list);
1376 spin_unlock(&cdev->cskq.lock);
1382 cxgbit_release_tid(cdev, tid);
1417 struct cxgbit_device *cdev = csk->com.cdev;
1438 (csk->com.cdev->lldi.pf));
1455 if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1483 cxgbit_ofld_send(csk->com.cdev, skb);
1501 cxgbit_ofld_send(csk->com.cdev, skb);
1535 ret = cxgbit_wait_for_reply(csk->com.cdev,
1568 ret = cxgbit_wait_for_reply(csk->com.cdev,
1578 cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1581 struct tid_info *t = cdev->lldi.tids;
1600 cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1603 struct tid_info *t = cdev->lldi.tids;
1622 cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1625 struct tid_info *t = cdev->lldi.tids;
1774 cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1899 static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1904 struct cxgb4_lld_info *lldi = &cdev->lldi;
1920 static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1925 struct cxgb4_lld_info *lldi = &cdev->lldi;
1961 static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1965 struct cxgb4_lld_info *lldi = &cdev->lldi;