Lines Matching defs:adap
305 * @adap: the adapter
313 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
323 unmap_skb(adap->pdev_dev, d->skb, d->addr);
350 * @adap: the adapter
359 static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
374 free_tx_desc(adap, q, reclaim, unmap);
383 * @adap: the adapter
391 void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
394 (void)reclaim_completed_tx(adap, q, -1, unmap);
431 * @adap: the adapter
438 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
445 get_buf_size(adap, d),
457 * @adap: the adapter
466 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
472 get_buf_size(adap, d), DMA_FROM_DEVICE);
479 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
482 u32 val = adap->params.arch.sge_fl_db;
484 if (is_t4(adap->params.chip))
499 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
523 * @adap: the adapter
535 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
538 struct sge *s = &adap->sge;
547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
552 node = dev_to_node(adap->pdev_dev);
567 mapping = dma_map_page(adap->pdev_dev, pg, 0,
570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
620 ring_fl_db(adap, q);
622 if (unlikely(fl_starving(adap, q))) {
625 set_bit(q->cntxt_id - adap->sge.egr_start,
626 adap->sge.starving_fl);
632 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
1019 * @adap: the adapter
1025 inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
1044 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1248 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1265 if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1377 static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
1398 if (is_t4(adap->params.chip))
1408 * @adap: the adapter
1418 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
1429 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
1518 struct adapter *adap;
1525 adap = pi->adapter;
1529 return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev);
1535 return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
1540 if (!(adap->ptp_tx_skb)) {
1542 adap->ptp_tx_skb = skb_get(skb);
1546 q = &adap->sge.ptptxq;
1548 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1552 reclaim_completed_tx(adap, &q->q, -1, true);
1556 ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1561 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1568 dev_err(adap->pdev_dev,
1586 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1643 cntrl = hwcsum(adap->params.chip, skb);
1645 cpl = write_tso_wr(adap, skb, lso);
1646 cntrl = hwcsum(adap->params.chip, skb);
1660 cntrl = hwcsum(adap->params.chip, skb);
1682 cntrl = hwcsum(adap->params.chip, skb) |
1709 TXPKT_PF_V(adap->pf);
1713 if (is_t4(adap->params.chip))
1735 cxgb4_ring_tx_db(adap, &q->q, ndesc);
2130 void cxgb4_eosw_txq_free_desc(struct adapter *adap,
2139 unmap_skb(adap->pdev_dev, d->skb, d->addr);
2172 static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
2200 static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
2222 (adap->params.ofldq_wr_cred / 2)) {
2250 cpl = write_tso_wr(adap, skb, lso);
2266 struct adapter *adap = netdev2adap(dev);
2279 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
2301 flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
2330 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
2331 cntrl = hwcsum(adap->params.chip, skb);
2337 TXPKT_PF_V(adap->pf));
2348 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
2390 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
2437 struct adapter *adap = netdev2adap(dev);
2446 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
2487 struct adapter *adap = netdev2adap(dev);
2490 spin_lock(&adap->ptp_lock);
2492 spin_unlock(&adap->ptp_lock);
2540 struct adapter *adap = netdev2adap(dev);
2553 entry = cxgb4_lookup_eotid(&adap->tids, eotid);
2561 if (!(adap->flags & CXGB4_FW_OK)) {
2587 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
2594 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
2668 struct adapter *adap = pi->adapter;
2686 q = &adap->sge.ethtxq[pi->first_qset];
2689 reclaim_completed_tx(adap, &q->q, -1, true);
2708 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) |
2726 cxgb4_ring_tx_db(adap, &q->q, ndesc);
2777 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2825 cxgb4_ring_tx_db(q->adap, &q->q, written);
2833 cxgb4_ring_tx_db(q->adap, &q->q, written);
2839 * @adap: the adapter
2844 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
2849 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
2911 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
2912 q->adap->sge.txq_maperr);
2979 cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
2991 else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
3027 skb->dev = q->adap->port[0];
3039 cxgb4_ring_tx_db(q->adap, &q->q, written);
3054 cxgb4_ring_tx_db(q->adap, &q->q, written);
3130 static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
3139 if (adap->tids.nsftids)
3141 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
3144 txq_info = adap->sge.uld_txq_info[tx_uld_type];
3157 * @adap: the adapter
3164 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
3169 ret = uld_send(adap, skb, CXGB4_TX_OFLD);
3254 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
3265 struct adapter *adap;
3268 adap = netdev2adap(dev);
3271 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3287 * @adap: the adapter
3294 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
3299 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
3398 static noinline int handle_trace_pkt(struct adapter *adap,
3409 if (is_t4(adap->params.chip))
3416 skb->dev = adap->port[0];
3423 * @adap: the adapter
3430 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
3435 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
3437 ns = div_u64(tmp, adap->params.vpd.cclk);
3446 struct adapter *adapter = rxq->rspq.adap;
3596 struct adapter *adapter = rspq->adap;
3644 struct adapter *adap = pi->adapter;
3646 struct sge *s = &adap->sge;
3651 netdev = adap->port[pi->port_id];
3682 struct adapter *adapter = q->adap;
3683 struct sge *s = &q->adap->sge;
3684 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
3701 return handle_trace_pkt(q->adap, si);
3705 if (q->adap->params.tp.rx_pkt_encap) {
3764 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
3791 if (q->adap->params.tp.rx_pkt_encap)
3895 struct adapter *adapter = q->adap;
3916 free_rx_bufs(q->adap, &rxq->fl, 1);
3933 unmap_rx_buf(q->adap, &rxq->fl);
3942 dma_sync_single_for_cpu(q->adap->pdev_dev,
3973 __refill_fl(q->adap, &rxq->fl);
4027 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
4088 q->adap->tids.eotid_base;
4089 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
4120 flits = ethofld_calc_tx_flits(q->adap, skb,
4159 static unsigned int process_intrq(struct adapter *adap)
4163 struct sge_rspq *q = &adap->sge.intrq;
4166 spin_lock(&adap->sge.intrq_lock);
4176 qid -= adap->sge.ingr_start;
4177 napi_schedule(&adap->sge.ingr_map[qid]->napi);
4189 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
4196 spin_unlock(&adap->sge.intrq_lock);
4206 struct adapter *adap = cookie;
4208 if (adap->flags & CXGB4_MASTER_PF)
4209 t4_slow_intr_handler(adap);
4210 process_intrq(adap);
4221 struct adapter *adap = cookie;
4223 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
4224 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
4225 process_intrq(adap))
4232 * @adap: the adapter
4237 irq_handler_t t4_intr_handler(struct adapter *adap)
4239 if (adap->flags & CXGB4_USING_MSIX)
4241 if (adap->flags & CXGB4_USING_MSI)
4250 struct adapter *adap = from_timer(adap, t, sge.rx_timer);
4251 struct sge *s = &adap->sge;
4262 if (fl_starving(adap, fl)) {
4274 if (!(adap->flags & CXGB4_MASTER_PF))
4277 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
4285 struct adapter *adap = from_timer(adap, t, sge.tx_timer);
4286 struct sge *s = &adap->sge;
4299 if (!is_t4(adap->params.chip)) {
4303 spin_lock(&adap->ptp_lock);
4307 free_tx_desc(adap, &q->q, avail, false);
4310 spin_unlock(&adap->ptp_lock);
4316 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
4373 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
4380 struct sge *s = &adap->sge;
4382 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
4387 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
4389 dev_to_node(adap->pdev_dev));
4396 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
4418 CHELSIO_CHIP_VERSION(adap->params.chip);
4430 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
4433 dev_to_node(adap->pdev_dev));
4466 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4477 iq->bar2_addr = bar2_address(adap,
4492 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
4499 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
4504 fl->bar2_addr = bar2_address(adap,
4508 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
4519 if (!is_t4(adap->params.chip) && cong >= 0) {
4522 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
4538 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
4541 dev_warn(adap->pdev_dev, "Failed to set Congestion"
4552 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
4559 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
4566 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
4569 q->bar2_addr = bar2_address(adap,
4578 adap->sge.egr_map[id - adap->sge.egr_start] = q;
4583 * @adap: the adapter
4590 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
4594 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4596 struct sge *s = &adap->sge;
4603 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
4613 FW_EQ_ETH_CMD_PFN_V(adap->pf) |
4659 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4663 dma_free_coherent(adap->pdev_dev,
4671 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
4683 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
4687 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4689 struct sge *s = &adap->sge;
4696 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
4698 NULL, 0, dev_to_node(adap->pdev_dev));
4704 FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
4723 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4725 dma_free_coherent(adap->pdev_dev,
4733 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
4734 txq->adap = adap;
4741 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
4750 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
4753 static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
4756 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4758 struct sge *s = &adap->sge;
4765 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
4779 FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
4794 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4798 dma_free_coherent(adap->pdev_dev,
4805 init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
4809 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
4819 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
4824 txq->adap = adap;
4832 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
4837 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
4843 txq->adap = adap;
4852 void free_txq(struct adapter *adap, struct sge_txq *q)
4854 struct sge *s = &adap->sge;
4856 dma_free_coherent(adap->pdev_dev,
4864 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
4867 struct sge *s = &adap->sge;
4870 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
4871 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
4873 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
4881 free_rx_bufs(adap, fl, fl->avail);
4882 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
4893 * @adap: the adapter
4899 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
4903 free_rspq_fl(adap, &q->rspq,
4907 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
4910 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
4912 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
4914 free_txq(adap, &txq->q);
4920 * @adap: the adapter
4924 void t4_free_sge_resources(struct adapter *adap)
4931 for (i = 0; i < adap->sge.ethqsets; i++) {
4932 eq = &adap->sge.ethrxq[i];
4934 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
4942 for (i = 0; i < adap->sge.ethqsets; i++) {
4943 eq = &adap->sge.ethrxq[i];
4945 free_rspq_fl(adap, &eq->rspq,
4948 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
4952 etq = &adap->sge.ethtxq[i];
4954 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4957 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4960 free_txq(adap, &etq->q);
4965 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
4966 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
4970 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
4973 free_txq(adap, &cq->q);
4977 if (adap->sge.fw_evtq.desc) {
4978 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
4979 if (adap->sge.fwevtq_msix_idx >= 0)
4980 cxgb4_free_msix_idx_in_bmap(adap,
4981 adap->sge.fwevtq_msix_idx);
4984 if (adap->sge.nd_msix_idx >= 0)
4985 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
4987 if (adap->sge.intrq.desc)
4988 free_rspq_fl(adap, &adap->sge.intrq, NULL);
4990 if (!is_t4(adap->params.chip)) {
4991 etq = &adap->sge.ptptxq;
4993 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4995 spin_lock_bh(&adap->ptp_lock);
4996 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4997 spin_unlock_bh(&adap->ptp_lock);
4999 free_txq(adap, &etq->q);
5004 memset(adap->sge.egr_map, 0,
5005 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
5008 void t4_sge_start(struct adapter *adap)
5010 adap->sge.ethtxq_rover = 0;
5011 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
5012 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
5017 * @adap: the adapter
5023 void t4_sge_stop(struct adapter *adap)
5026 struct sge *s = &adap->sge;
5033 if (is_offload(adap)) {
5036 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
5040 for_each_ofldtxq(&adap->sge, i) {
5047 if (is_pci_uld(adap)) {
5050 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
5054 for_each_ofldtxq(&adap->sge, i) {
5071 * @adap: the adapter
5077 static int t4_sge_init_soft(struct adapter *adap)
5079 struct sge *s = &adap->sge;
5089 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
5091 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
5104 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
5124 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
5131 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
5132 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
5133 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
5142 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
5143 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
5144 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
5145 s->timer_val[0] = core_ticks_to_us(adap,
5147 s->timer_val[1] = core_ticks_to_us(adap,
5149 s->timer_val[2] = core_ticks_to_us(adap,
5151 s->timer_val[3] = core_ticks_to_us(adap,
5153 s->timer_val[4] = core_ticks_to_us(adap,
5155 s->timer_val[5] = core_ticks_to_us(adap,
5158 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
5169 * @adap: the adapter
5174 int t4_sge_init(struct adapter *adap)
5176 struct sge *s = &adap->sge;
5184 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
5188 s->fl_align = t4_fl_pkt_align(adap);
5189 ret = t4_sge_init_soft(adap);
5205 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
5206 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
5217 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
5218 CHELSIO_CHIP_VERSION(adap->params.chip));
5223 t4_idma_monitor_init(adap, &s->idma_monitor);