Lines Matching defs:adap
305 * @adap: the adapter
313 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
323 unmap_skb(adap->pdev_dev, d->skb, d->addr);
350 * @adap: the adapter
359 static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
374 free_tx_desc(adap, q, reclaim, unmap);
383 * @adap: the adapter
391 void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
394 (void)reclaim_completed_tx(adap, q, -1, unmap);
431 * @adap: the adapter
438 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
445 get_buf_size(adap, d),
457 * @adap: the adapter
466 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
472 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
479 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
482 u32 val = adap->params.arch.sge_fl_db;
484 if (is_t4(adap->params.chip))
499 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
523 * @adap: the adapter
535 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
538 struct sge *s = &adap->sge;
547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
552 node = dev_to_node(adap->pdev_dev);
567 mapping = dma_map_page(adap->pdev_dev, pg, 0,
570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
620 ring_fl_db(adap, q);
622 if (unlikely(fl_starving(adap, q))) {
625 set_bit(q->cntxt_id - adap->sge.egr_start,
626 adap->sge.starving_fl);
632 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
1019 * @adap: the adapter
1025 inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
1044 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1248 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1265 if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1377 static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
1398 if (is_t4(adap->params.chip))
1408 * @adap: the adapter
1418 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
1429 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
1518 struct adapter *adap;
1525 adap = pi->adapter;
1529 return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev);
1535 return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
1540 if (!(adap->ptp_tx_skb)) {
1542 adap->ptp_tx_skb = skb_get(skb);
1546 q = &adap->sge.ptptxq;
1548 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1552 reclaim_completed_tx(adap, &q->q, -1, true);
1556 ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1561 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1568 dev_err(adap->pdev_dev,
1586 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1642 cntrl = hwcsum(adap->params.chip, skb);
1644 cpl = write_tso_wr(adap, skb, lso);
1645 cntrl = hwcsum(adap->params.chip, skb);
1659 cntrl = hwcsum(adap->params.chip, skb);
1681 cntrl = hwcsum(adap->params.chip, skb) |
1708 TXPKT_PF_V(adap->pf);
1712 if (is_t4(adap->params.chip))
1734 cxgb4_ring_tx_db(adap, &q->q, ndesc);
2124 void cxgb4_eosw_txq_free_desc(struct adapter *adap,
2133 unmap_skb(adap->pdev_dev, d->skb, d->addr);
2166 static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
2194 static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
2216 (adap->params.ofldq_wr_cred / 2)) {
2244 cpl = write_tso_wr(adap, skb, lso);
2260 struct adapter *adap = netdev2adap(dev);
2273 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
2295 flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
2324 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
2325 cntrl = hwcsum(adap->params.chip, skb);
2331 TXPKT_PF_V(adap->pf));
2342 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
2384 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
2431 struct adapter *adap = netdev2adap(dev);
2440 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
2481 struct adapter *adap = netdev2adap(dev);
2484 spin_lock(&adap->ptp_lock);
2486 spin_unlock(&adap->ptp_lock);
2534 struct adapter *adap = netdev2adap(dev);
2547 entry = cxgb4_lookup_eotid(&adap->tids, eotid);
2555 if (!(adap->flags & CXGB4_FW_OK)) {
2581 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
2588 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
2662 struct adapter *adap = pi->adapter;
2680 q = &adap->sge.ethtxq[pi->first_qset];
2683 reclaim_completed_tx(adap, &q->q, -1, true);
2702 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) |
2720 cxgb4_ring_tx_db(adap, &q->q, ndesc);
2771 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2819 cxgb4_ring_tx_db(q->adap, &q->q, written);
2827 cxgb4_ring_tx_db(q->adap, &q->q, written);
2833 * @adap: the adapter
2838 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
2843 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
2905 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
2906 q->adap->sge.txq_maperr);
2973 cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
2985 else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
3021 skb->dev = q->adap->port[0];
3033 cxgb4_ring_tx_db(q->adap, &q->q, written);
3048 cxgb4_ring_tx_db(q->adap, &q->q, written);
3124 static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
3133 if (adap->tids.nsftids)
3135 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
3138 txq_info = adap->sge.uld_txq_info[tx_uld_type];
3151 * @adap: the adapter
3158 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
3163 ret = uld_send(adap, skb, CXGB4_TX_OFLD);
3248 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
3259 struct adapter *adap;
3262 adap = netdev2adap(dev);
3265 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3281 * @adap: the adapter
3288 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
3293 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
3392 static noinline int handle_trace_pkt(struct adapter *adap,
3403 if (is_t4(adap->params.chip))
3410 skb->dev = adap->port[0];
3417 * @adap: the adapter
3424 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
3429 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
3431 ns = div_u64(tmp, adap->params.vpd.cclk);
3440 struct adapter *adapter = rxq->rspq.adap;
3590 struct adapter *adapter = rspq->adap;
3619 struct adapter *adap = pi->adapter;
3621 struct sge *s = &adap->sge;
3626 netdev = adap->port[pi->port_id];
3657 struct adapter *adapter = q->adap;
3658 struct sge *s = &q->adap->sge;
3659 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
3676 return handle_trace_pkt(q->adap, si);
3680 if (q->adap->params.tp.rx_pkt_encap) {
3739 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
3766 if (q->adap->params.tp.rx_pkt_encap)
3870 struct adapter *adapter = q->adap;
3891 free_rx_bufs(q->adap, &rxq->fl, 1);
3908 unmap_rx_buf(q->adap, &rxq->fl);
3917 dma_sync_single_for_cpu(q->adap->pdev_dev,
3948 __refill_fl(q->adap, &rxq->fl);
4002 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
4063 q->adap->tids.eotid_base;
4064 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
4095 flits = ethofld_calc_tx_flits(q->adap, skb,
4134 static unsigned int process_intrq(struct adapter *adap)
4138 struct sge_rspq *q = &adap->sge.intrq;
4141 spin_lock(&adap->sge.intrq_lock);
4151 qid -= adap->sge.ingr_start;
4152 napi_schedule(&adap->sge.ingr_map[qid]->napi);
4164 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
4171 spin_unlock(&adap->sge.intrq_lock);
4181 struct adapter *adap = cookie;
4183 if (adap->flags & CXGB4_MASTER_PF)
4184 t4_slow_intr_handler(adap);
4185 process_intrq(adap);
4196 struct adapter *adap = cookie;
4198 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
4199 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
4200 process_intrq(adap))
4207 * @adap: the adapter
4212 irq_handler_t t4_intr_handler(struct adapter *adap)
4214 if (adap->flags & CXGB4_USING_MSIX)
4216 if (adap->flags & CXGB4_USING_MSI)
4225 struct adapter *adap = from_timer(adap, t, sge.rx_timer);
4226 struct sge *s = &adap->sge;
4237 if (fl_starving(adap, fl)) {
4249 if (!(adap->flags & CXGB4_MASTER_PF))
4252 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
4260 struct adapter *adap = from_timer(adap, t, sge.tx_timer);
4261 struct sge *s = &adap->sge;
4274 if (!is_t4(adap->params.chip)) {
4278 spin_lock(&adap->ptp_lock);
4282 free_tx_desc(adap, &q->q, avail, false);
4285 spin_unlock(&adap->ptp_lock);
4291 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
4348 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
4355 struct sge *s = &adap->sge;
4357 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
4362 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
4364 dev_to_node(adap->pdev_dev));
4371 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
4393 CHELSIO_CHIP_VERSION(adap->params.chip);
4405 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
4408 dev_to_node(adap->pdev_dev));
4441 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4452 iq->bar2_addr = bar2_address(adap,
4467 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
4474 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
4479 fl->bar2_addr = bar2_address(adap,
4483 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
4494 if (!is_t4(adap->params.chip) && cong >= 0) {
4497 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
4513 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
4516 dev_warn(adap->pdev_dev, "Failed to set Congestion"
4527 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
4534 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
4541 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
4544 q->bar2_addr = bar2_address(adap,
4553 adap->sge.egr_map[id - adap->sge.egr_start] = q;
4558 * @adap: the adapter
4565 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
4569 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4571 struct sge *s = &adap->sge;
4578 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
4588 FW_EQ_ETH_CMD_PFN_V(adap->pf) |
4629 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4633 dma_free_coherent(adap->pdev_dev,
4641 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
4653 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
4657 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4659 struct sge *s = &adap->sge;
4666 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
4668 NULL, 0, dev_to_node(adap->pdev_dev));
4674 FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
4693 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4695 dma_free_coherent(adap->pdev_dev,
4703 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
4704 txq->adap = adap;
4711 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
4720 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
4723 static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
4726 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4728 struct sge *s = &adap->sge;
4735 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
4749 FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
4764 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4768 dma_free_coherent(adap->pdev_dev,
4775 init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
4779 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
4789 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
4794 txq->adap = adap;
4802 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
4807 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
4813 txq->adap = adap;
4822 void free_txq(struct adapter *adap, struct sge_txq *q)
4824 struct sge *s = &adap->sge;
4826 dma_free_coherent(adap->pdev_dev,
4834 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
4837 struct sge *s = &adap->sge;
4840 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
4841 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
4843 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
4851 free_rx_bufs(adap, fl, fl->avail);
4852 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
4863 * @adap: the adapter
4869 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
4873 free_rspq_fl(adap, &q->rspq,
4877 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
4880 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
4882 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
4884 free_txq(adap, &txq->q);
4890 * @adap: the adapter
4894 void t4_free_sge_resources(struct adapter *adap)
4901 for (i = 0; i < adap->sge.ethqsets; i++) {
4902 eq = &adap->sge.ethrxq[i];
4904 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
4912 for (i = 0; i < adap->sge.ethqsets; i++) {
4913 eq = &adap->sge.ethrxq[i];
4915 free_rspq_fl(adap, &eq->rspq,
4918 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
4922 etq = &adap->sge.ethtxq[i];
4924 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4927 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4930 free_txq(adap, &etq->q);
4935 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
4936 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
4940 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
4943 free_txq(adap, &cq->q);
4947 if (adap->sge.fw_evtq.desc) {
4948 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
4949 if (adap->sge.fwevtq_msix_idx >= 0)
4950 cxgb4_free_msix_idx_in_bmap(adap,
4951 adap->sge.fwevtq_msix_idx);
4954 if (adap->sge.nd_msix_idx >= 0)
4955 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
4957 if (adap->sge.intrq.desc)
4958 free_rspq_fl(adap, &adap->sge.intrq, NULL);
4960 if (!is_t4(adap->params.chip)) {
4961 etq = &adap->sge.ptptxq;
4963 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4965 spin_lock_bh(&adap->ptp_lock);
4966 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4967 spin_unlock_bh(&adap->ptp_lock);
4969 free_txq(adap, &etq->q);
4974 memset(adap->sge.egr_map, 0,
4975 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
4978 void t4_sge_start(struct adapter *adap)
4980 adap->sge.ethtxq_rover = 0;
4981 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
4982 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
4987 * @adap: the adapter
4993 void t4_sge_stop(struct adapter *adap)
4996 struct sge *s = &adap->sge;
5003 if (is_offload(adap)) {
5006 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
5010 for_each_ofldtxq(&adap->sge, i) {
5017 if (is_pci_uld(adap)) {
5020 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
5024 for_each_ofldtxq(&adap->sge, i) {
5041 * @adap: the adapter
5047 static int t4_sge_init_soft(struct adapter *adap)
5049 struct sge *s = &adap->sge;
5059 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
5061 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
5074 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
5094 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
5101 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
5102 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
5103 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
5112 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
5113 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
5114 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
5115 s->timer_val[0] = core_ticks_to_us(adap,
5117 s->timer_val[1] = core_ticks_to_us(adap,
5119 s->timer_val[2] = core_ticks_to_us(adap,
5121 s->timer_val[3] = core_ticks_to_us(adap,
5123 s->timer_val[4] = core_ticks_to_us(adap,
5125 s->timer_val[5] = core_ticks_to_us(adap,
5128 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
5139 * @adap: the adapter
5144 int t4_sge_init(struct adapter *adap)
5146 struct sge *s = &adap->sge;
5154 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
5158 s->fl_align = t4_fl_pkt_align(adap);
5159 ret = t4_sge_init_soft(adap);
5175 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
5176 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
5187 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
5188 CHELSIO_CHIP_VERSION(adap->params.chip));
5193 t4_idma_monitor_init(adap, &s->idma_monitor);