Lines Matching defs:adap
481 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
486 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
492 * @adap: the adapter
501 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
512 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
521 dma_sync_single_for_device(&adap->pdev->dev, mapping,
534 q->gen, adap->pdev);
536 clear_rx_desc(adap->pdev, q, sd);
554 ring_fl_db(adap, q);
559 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
561 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
567 * @adap: the adapter
574 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
594 ring_fl_db(adap, q);
645 if (q->adap &&
646 !(q->adap->flags & NAPI_INIT)) {
651 q->adap = NULL;
766 * @adap: the adapter that received the packet
779 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
792 dma_sync_single_for_cpu(&adap->pdev->dev,
796 dma_sync_single_for_device(&adap->pdev->dev,
802 recycle_rx_buf(adap, fl, fl->cidx);
807 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
812 dma_unmap_single(&adap->pdev->dev, dma_unmap_addr(sd, dma_addr),
816 __refill_fl(adap, fl);
822 * @adap: the adapter that received the packet
839 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
853 dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr,
856 dma_sync_single_for_device(&adap->pdev->dev, dma_addr,
862 recycle_rx_buf(adap, fl, fl->cidx);
881 dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, len,
885 dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
1035 * @adap: the adapter
1045 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1051 t3_write_reg(adap, A_SG_KDOORBELL,
1056 t3_write_reg(adap, A_SG_KDOORBELL,
1164 * @adap: the adapter
1176 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1265 struct adapter *adap = pi->adapter;
1285 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1292 dev_err(&adap->pdev->dev,
1300 if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1363 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1364 check_ring_tx_db(adap, q);
1402 * @adap: the adapter
1418 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1465 * @adap: the adapter
1473 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1491 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1509 t3_write_reg(adap, A_SG_KDOORBELL,
1554 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1561 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1565 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1618 * @adap: the adapter
1629 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1657 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1688 * @adap: the adapter
1694 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1701 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1703 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1714 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1729 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1730 check_ring_tx_db(adap, q);
1747 struct adapter *adap = pi->adapter;
1751 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1769 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1784 write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1796 t3_write_reg(adap, A_SG_KDOORBELL,
1835 struct adapter *adap = tdev2adap(tdev);
1836 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1839 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1841 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1900 struct adapter *adapter = qs->adap;
2074 * @adap: the adapter
2084 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2092 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2100 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2125 * @adap: the adapter
2134 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2152 dma_sync_single_for_cpu(&adap->pdev->dev,
2158 dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
2198 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2244 * @adap: the adapter
2252 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2261 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2272 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2309 * @adap: the adapter
2322 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2378 __refill_fl(adap, fl);
2380 lro_add_page(adap, qs, fl,
2386 skb = get_packet_pg(adap, fl, q,
2392 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2420 refill_rspq(adap, q, q->credits);
2430 rx_eth(adap, q, skb, ethpad, lro);
2436 ngathered = rx_offload(&adap->tdev, q, skb,
2447 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2450 check_ring_db(adap, qs, sleeping);
2477 struct adapter *adap = qs->adap;
2478 int work_done = process_responses(adap, qs, budget);
2497 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2514 * @adap: the adapter
2526 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2550 refill_rspq(adap, q, q->credits);
2559 check_ring_db(adap, qs, sleeping);
2570 * @adap: the adapter
2583 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2591 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2592 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2607 struct adapter *adap = qs->adap;
2611 if (process_responses(adap, qs, -1) == 0)
2613 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2630 if (handle_responses(qs->adap, q) < 0)
2645 struct adapter *adap = cookie;
2646 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2650 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2651 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2656 if (adap->params.nports == 2 &&
2657 process_responses(adap, &adap->sge.qs[1], -1)) {
2658 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2660 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2666 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2695 struct adapter *adap = cookie;
2696 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2700 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2701 if (adap->params.nports == 2)
2702 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2703 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2713 static inline int process_responses_gts(struct adapter *adap,
2718 work = process_responses(adap, rspq_to_qset(rq), -1);
2719 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2733 struct adapter *adap = cookie;
2734 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2735 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2740 w1 = adap->params.nports == 2 &&
2744 t3_write_reg(adap, A_PL_CLI, 0);
2745 t3_read_reg(adap, A_PL_CLI); /* flush */
2748 process_responses_gts(adap, q0);
2751 process_responses_gts(adap, q1);
2755 work_done = t3_slow_intr_handler(adap);
2771 struct adapter *adap = cookie;
2772 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2774 t3_write_reg(adap, A_PL_CLI, 0);
2775 map = t3_read_reg(adap, A_SG_DATA_INTR);
2783 t3_slow_intr_handler(adap);
2786 process_responses_gts(adap, q0);
2789 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2805 struct adapter *adap = cookie;
2806 struct sge_qset *qs0 = &adap->sge.qs[0];
2809 t3_write_reg(adap, A_PL_CLI, 0);
2810 map = t3_read_reg(adap, A_SG_DATA_INTR);
2818 t3_slow_intr_handler(adap);
2824 napi_schedule(&adap->sge.qs[1].napi);
2832 * @adap: the adapter
2839 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2841 if (adap->flags & USING_MSIX)
2843 if (adap->flags & USING_MSI)
2845 if (adap->params.rev > 0)
2924 struct adapter *adap = pi->adapter;
2929 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2935 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2965 struct adapter *adap = pi->adapter;
2968 lock = adap->params.rev > 0 ?
2969 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2977 if (adap->params.rev < 4) {
2978 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2984 refill_rspq(adap, &qs->rspq, 1);
2986 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2993 __refill_fl(adap, &qs->fl[0]);
2995 __refill_fl(adap, &qs->fl[1]);
3167 q->adap = adapter;
3204 * @adap: the adapter
3208 void t3_start_sge_timers(struct adapter *adap)
3213 struct sge_qset *q = &adap->sge.qs[i];
3227 * @adap: the adapter
3231 void t3_stop_sge_timers(struct adapter *adap)
3236 struct sge_qset *q = &adap->sge.qs[i];
3247 * @adap: the adapter
3251 void t3_free_sge_resources(struct adapter *adap)
3256 t3_free_qset(adap, &adap->sge.qs[i]);
3261 * @adap: the adapter
3266 void t3_sge_start(struct adapter *adap)
3268 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3273 * @adap: the adapter
3283 void t3_sge_stop_dma(struct adapter *adap)
3285 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3290 * @adap: the adapter
3295 void t3_sge_stop(struct adapter *adap)
3299 t3_sge_stop_dma(adap);
3302 if (!(adap->flags & FULL_INIT_DONE))
3305 struct sge_qset *qs = &adap->sge.qs[i];
3314 * @adap: the adapter
3322 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3324 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3333 if (adap->params.rev > 0) {
3334 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3337 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3338 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3340 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3341 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3342 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3343 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3344 adap->params.rev < T3_REV_C ? 1000 : 500);
3345 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3346 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3347 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3348 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3349 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3354 * @adap: the associated adapter
3361 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3371 q->polling = adap->params.rev > 0;
3382 spin_lock_init(&adap->sge.reg_lock);