Lines Matching refs:qs
721 * @qs: the queue set
726 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
728 qs->rspq.cntxt_id = id;
729 qs->fl[0].cntxt_id = 2 * id;
730 qs->fl[1].cntxt_id = 2 * id + 1;
731 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
732 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
733 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
734 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
735 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1246 struct sge_qset *qs, struct sge_txq *q)
1249 set_bit(TXQ_ETH, &qs->txq_stopped);
1267 struct sge_qset *qs;
1281 qs = &pi->qs[qidx];
1282 q = &qs->txq[TXQ_ETH];
1291 t3_stop_tx_queue(txq, qs, q);
1308 t3_stop_tx_queue(txq, qs, q);
1311 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1330 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1332 qs->port_stats[SGE_PSTAT_TSO]++;
1334 qs->port_stats[SGE_PSTAT_VLANINS]++;
1427 struct sge_qset *qs = txq_to_qset(q, qid);
1429 set_bit(qid, &qs->txq_stopped);
1433 test_and_clear_bit(qid, &qs->txq_stopped))
1523 struct sge_qset *qs = container_of(w, struct sge_qset,
1525 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1543 set_bit(TXQ_CTRL, &qs->txq_stopped);
1547 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1554 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1565 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1743 struct sge_qset *qs = container_of(w, struct sge_qset,
1745 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1746 const struct port_info *pi = netdev_priv(qs->netdev);
1758 set_bit(TXQ_OFLD, &qs->txq_stopped);
1762 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1836 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1839 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1841 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1860 struct sge_qset *qs = rspq_to_qset(q);
1862 napi_schedule(&qs->napi);
1898 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1899 struct sge_rspq *q = &qs->rspq;
1900 struct adapter *adapter = qs->adap;
1980 * @qs: the queue set to resume
1985 static void restart_tx(struct sge_qset *qs)
1987 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1988 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1989 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1990 qs->txq[TXQ_ETH].restarts++;
1991 if (netif_running(qs->netdev))
1992 netif_tx_wake_queue(qs->tx_q);
1995 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1996 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1997 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1998 qs->txq[TXQ_OFLD].restarts++;
2001 queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task);
2003 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
2004 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2005 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2006 qs->txq[TXQ_CTRL].restarts++;
2009 queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
2088 struct sge_qset *qs = rspq_to_qset(rq);
2096 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2100 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2103 qs->port_stats[SGE_PSTAT_VLANEX]++;
2108 napi_gro_receive(&qs->napi, skb);
2126 * @qs: the associated queue set
2134 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2138 struct port_info *pi = netdev_priv(qs->netdev);
2145 if (!qs->nomem) {
2146 skb = napi_get_frags(&qs->napi);
2147 qs->nomem = !skb;
2164 qs->nomem = 0;
2173 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2175 if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2178 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2182 cpl = qs->lro_va;
2198 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2201 qs->port_stats[SGE_PSTAT_VLANEX]++;
2204 napi_gro_frags(&qs->napi);
2209 * @qs: the queue set corresponding to the response
2216 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2222 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2227 qs->txq[TXQ_ETH].processed += credits;
2231 qs->txq[TXQ_CTRL].processed += credits;
2235 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2239 qs->txq[TXQ_OFLD].processed += credits;
2245 * @qs: the queue set whose Tx queues are to be examined
2252 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2256 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2267 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2310 * @qs: the queue set to which the response queue belongs
2322 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2325 struct sge_rspq *q = &qs->rspq;
2336 int lro = !!(qs->netdev->features & NETIF_F_GRO);
2373 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2380 lro_add_page(adap, qs, fl,
2408 handle_rsp_cntrl_info(qs, flags);
2450 check_ring_db(adap, qs, sleeping);
2453 if (unlikely(qs->txq_stopped != 0))
2454 restart_tx(qs);
2476 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2477 struct adapter *adap = qs->adap;
2478 int work_done = process_responses(adap, qs, budget);
2497 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2498 V_NEWTIMER(qs->rspq.next_holdoff) |
2499 V_NEWINDEX(qs->rspq.cidx));
2515 * @qs: the queue set owning the response queue
2526 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2529 struct sge_rspq *q = &qs->rspq;
2545 handle_rsp_cntrl_info(qs, flags);
2559 check_ring_db(adap, qs, sleeping);
2562 if (unlikely(qs->txq_stopped != 0))
2563 restart_tx(qs);
2585 struct sge_qset *qs = rspq_to_qset(q);
2591 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2596 napi_schedule(&qs->napi);
2606 struct sge_qset *qs = cookie;
2607 struct adapter *adap = qs->adap;
2608 struct sge_rspq *q = &qs->rspq;
2611 if (process_responses(adap, qs, -1) == 0)
2625 struct sge_qset *qs = cookie;
2626 struct sge_rspq *q = &qs->rspq;
2630 if (handle_responses(qs->adap, q) < 0)
2646 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2650 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2657 process_responses(adap, &adap->sge.qs[1], -1)) {
2658 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2673 static int rspq_check_napi(struct sge_qset *qs)
2675 struct sge_rspq *q = &qs->rspq;
2677 if (!napi_is_scheduled(&qs->napi) &&
2679 napi_schedule(&qs->napi);
2696 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2700 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2702 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2734 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2735 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2772 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2789 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2806 struct sge_qset *qs0 = &adap->sge.qs[0];
2824 napi_schedule(&adap->sge.qs[1].napi);
2922 struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
2923 struct port_info *pi = netdev_priv(qs->netdev);
2928 if (__netif_tx_trylock(qs->tx_q)) {
2929 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2931 __netif_tx_unlock(qs->tx_q);
2934 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2935 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2937 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2943 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2963 struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
2964 struct port_info *pi = netdev_priv(qs->netdev);
2969 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2974 if (napi_is_scheduled(&qs->napi))
2980 if (status & (1 << qs->rspq.cntxt_id)) {
2981 qs->rspq.starved++;
2982 if (qs->rspq.credits) {
2983 qs->rspq.credits--;
2984 refill_rspq(adap, &qs->rspq, 1);
2985 qs->rspq.restarted++;
2987 1 << qs->rspq.cntxt_id);
2992 if (qs->fl[0].credits < qs->fl[0].size)
2993 __refill_fl(adap, &qs->fl[0]);
2994 if (qs->fl[1].credits < qs->fl[1].size)
2995 __refill_fl(adap, &qs->fl[1]);
3000 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3005 * @qs: the SGE queue set
3011 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
3013 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
3014 qs->rspq.polling = p->polling;
3015 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
3040 struct sge_qset *q = &adapter->sge.qs[id];
3213 struct sge_qset *q = &adap->sge.qs[i];
3236 struct sge_qset *q = &adap->sge.qs[i];
3256 t3_free_qset(adap, &adap->sge.qs[i]);
3305 struct sge_qset *qs = &adap->sge.qs[i];
3307 cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
3308 cancel_work_sync(&qs->txq[TXQ_CTRL].qresume_task);