Lines Matching defs:adap

485 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
490 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
496 * @adap: the adapter
505 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
516 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
525 pci_dma_sync_single_for_device(adap->pdev, mapping,
538 q->gen, adap->pdev);
540 clear_rx_desc(adap->pdev, q, sd);
558 ring_fl_db(adap, q);
563 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
565 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
571 * @adap: the adapter
578 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
598 ring_fl_db(adap, q);
649 if (q->adap &&
650 !(q->adap->flags & NAPI_INIT)) {
655 q->adap = NULL;
770 * @adap: the adapter that received the packet
783 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
796 pci_dma_sync_single_for_cpu(adap->pdev,
800 pci_dma_sync_single_for_device(adap->pdev,
806 recycle_rx_buf(adap, fl, fl->cidx);
811 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
816 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
820 __refill_fl(adap, fl);
826 * @adap: the adapter that received the packet
843 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
857 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
860 pci_dma_sync_single_for_device(adap->pdev, dma_addr,
867 recycle_rx_buf(adap, fl, fl->cidx);
886 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
890 pci_unmap_page(adap->pdev,
1040 * @adap: the adapter
1050 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1056 t3_write_reg(adap, A_SG_KDOORBELL,
1061 t3_write_reg(adap, A_SG_KDOORBELL,
1169 * @adap: the adapter
1181 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1270 struct adapter *adap = pi->adapter;
1290 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1297 dev_err(&adap->pdev->dev,
1305 if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1368 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1369 check_ring_tx_db(adap, q);
1407 * @adap: the adapter
1423 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1470 * @adap: the adapter
1478 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1496 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1514 t3_write_reg(adap, A_SG_KDOORBELL,
1558 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1565 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1569 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1621 * @adap: the adapter
1632 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1660 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1691 * @adap: the adapter
1697 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1704 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1706 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1717 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1732 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1733 check_ring_tx_db(adap, q);
1749 struct adapter *adap = pi->adapter;
1753 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1771 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1786 write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1798 t3_write_reg(adap, A_SG_KDOORBELL,
1837 struct adapter *adap = tdev2adap(tdev);
1838 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1841 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1843 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1902 struct adapter *adapter = qs->adap;
2072 * @adap: the adapter
2082 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2090 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2098 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2123 * @adap: the adapter
2132 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2150 pci_dma_sync_single_for_cpu(adap->pdev,
2157 pci_unmap_page(adap->pdev,
2200 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2246 * @adap: the adapter
2254 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2263 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2274 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2311 * @adap: the adapter
2324 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2380 __refill_fl(adap, fl);
2382 lro_add_page(adap, qs, fl,
2388 skb = get_packet_pg(adap, fl, q,
2394 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2422 refill_rspq(adap, q, q->credits);
2432 rx_eth(adap, q, skb, ethpad, lro);
2438 ngathered = rx_offload(&adap->tdev, q, skb,
2449 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2452 check_ring_db(adap, qs, sleeping);
2479 struct adapter *adap = qs->adap;
2480 int work_done = process_responses(adap, qs, budget);
2499 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2516 * @adap: the adapter
2528 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2552 refill_rspq(adap, q, q->credits);
2561 check_ring_db(adap, qs, sleeping);
2572 * @adap: the adapter
2585 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2593 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2594 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2609 struct adapter *adap = qs->adap;
2613 if (process_responses(adap, qs, -1) == 0)
2615 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2632 if (handle_responses(qs->adap, q) < 0)
2647 struct adapter *adap = cookie;
2648 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2652 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2653 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2658 if (adap->params.nports == 2 &&
2659 process_responses(adap, &adap->sge.qs[1], -1)) {
2660 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2662 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2668 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2697 struct adapter *adap = cookie;
2698 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2702 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2703 if (adap->params.nports == 2)
2704 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2705 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2715 static inline int process_responses_gts(struct adapter *adap,
2720 work = process_responses(adap, rspq_to_qset(rq), -1);
2721 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2735 struct adapter *adap = cookie;
2736 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2737 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2742 w1 = adap->params.nports == 2 &&
2746 t3_write_reg(adap, A_PL_CLI, 0);
2747 t3_read_reg(adap, A_PL_CLI); /* flush */
2750 process_responses_gts(adap, q0);
2753 process_responses_gts(adap, q1);
2757 work_done = t3_slow_intr_handler(adap);
2773 struct adapter *adap = cookie;
2774 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2776 t3_write_reg(adap, A_PL_CLI, 0);
2777 map = t3_read_reg(adap, A_SG_DATA_INTR);
2785 t3_slow_intr_handler(adap);
2788 process_responses_gts(adap, q0);
2791 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2807 struct adapter *adap = cookie;
2808 struct sge_qset *qs0 = &adap->sge.qs[0];
2811 t3_write_reg(adap, A_PL_CLI, 0);
2812 map = t3_read_reg(adap, A_SG_DATA_INTR);
2820 t3_slow_intr_handler(adap);
2826 napi_schedule(&adap->sge.qs[1].napi);
2834 * @adap: the adapter
2841 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2843 if (adap->flags & USING_MSIX)
2845 if (adap->flags & USING_MSI)
2847 if (adap->params.rev > 0)
2926 struct adapter *adap = pi->adapter;
2931 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2937 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2967 struct adapter *adap = pi->adapter;
2970 lock = adap->params.rev > 0 ?
2971 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2979 if (adap->params.rev < 4) {
2980 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2986 refill_rspq(adap, &qs->rspq, 1);
2988 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2995 __refill_fl(adap, &qs->fl[0]);
2997 __refill_fl(adap, &qs->fl[1]);
3169 q->adap = adapter;
3206 * @adap: the adapter
3210 void t3_start_sge_timers(struct adapter *adap)
3215 struct sge_qset *q = &adap->sge.qs[i];
3229 * @adap: the adapter
3233 void t3_stop_sge_timers(struct adapter *adap)
3238 struct sge_qset *q = &adap->sge.qs[i];
3249 * @adap: the adapter
3253 void t3_free_sge_resources(struct adapter *adap)
3258 t3_free_qset(adap, &adap->sge.qs[i]);
3263 * @adap: the adapter
3268 void t3_sge_start(struct adapter *adap)
3270 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3275 * @adap: the adapter
3285 void t3_sge_stop_dma(struct adapter *adap)
3287 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3292 * @adap: the adapter
3297 void t3_sge_stop(struct adapter *adap)
3301 t3_sge_stop_dma(adap);
3304 struct sge_qset *qs = &adap->sge.qs[i];
3313 * @adap: the adapter
3321 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3323 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3332 if (adap->params.rev > 0) {
3333 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3336 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3337 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3339 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3340 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3341 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3342 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3343 adap->params.rev < T3_REV_C ? 1000 : 500);
3344 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3345 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3346 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3347 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3348 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3353 * @adap: the associated adapter
3360 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3370 q->polling = adap->params.rev > 0;
3381 spin_lock_init(&adap->sge.reg_lock);