Lines Matching defs:queue
141 * registers. For a given CPU if the bit associated to a queue is not
149 /* bits 0..7 = TXQ SENT, one bit per queue.
150 * bits 8..15 = RXQ OCCUP, one bit per queue.
151 * bits 16..23 = RXQ FREE, one bit per queue.
661 /* Number of this TX queue, in the range 0-7 */
709 /* rx queue number, in the range 0-7 */
744 * the first one to be used. Therefore, let's just allocate one queue.
944 /* Set rx queue offset */
1254 int queue;
1259 for (queue = 0; queue < txq_number; queue++) {
1260 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1262 q_map |= (1 << queue);
1268 for (queue = 0; queue < rxq_number; queue++) {
1269 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1272 q_map |= (1 << queue);
1374 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1375 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1380 if (queue == -1) {
1383 val = 0x1 | (queue << 1);
1391 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1392 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1397 if (queue == -1) {
1400 val = 0x1 | (queue << 1);
1409 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1410 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1415 if (queue == -1) {
1420 val = 0x1 | (queue << 1);
1432 /* All the queue are unmasked, but actually only the ones
1445 /* All the queue are masked, but actually only the ones
1457 /* All the queue are cleared, but actually only the ones
1477 int queue;
1491 /* Set CPU queue access map. CPUs are assigned to the RX and
1493 * queue then it is assigned to the CPU associated to the
1494 * default RX queue.
1508 /* With only one TX queue we configure a special case
1530 for (queue = 0; queue < txq_number; queue++) {
1531 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1532 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1597 int queue;
1619 for (queue = 0; queue < txq_number; queue++) {
1620 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1627 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1634 int queue)
1651 if (queue == -1) {
1656 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1664 const unsigned char *addr, int queue)
1669 if (queue != -1) {
1679 mvneta_set_ucast_addr(pp, addr[5], queue);
1855 /* Return tx queue pointer (find last set bit) according to <cause> returned
1857 * valid queue for matching the first one found in <cause>.
1862 int queue = fls(cause) - 1;
1864 return &pp->txqs[queue];
1867 /* Free tx queue skbuffs */
2053 pr_err("Can't refill queue %d. Done %d from %d\n",
2528 /* return some buffers to hardware queue, one at a time is too slow */
3073 int queue)
3087 if (queue == -1)
3091 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
3108 int queue)
3119 if (queue == -1) {
3124 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
3140 int queue)
3145 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
3150 if (queue == -1) {
3167 mvneta_set_other_mcast_addr(pp, crc_result, queue);
3267 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
3269 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
3421 int queue;
3424 for (queue = 0; queue < txq_number; queue++)
3425 mvneta_txq_done_force(pp, &pp->txqs[queue]);
3437 /* Rx/Tx queue initialization/cleanup methods */
3459 /* Set Rx descriptors queue starting address */
3488 /* Create a specified RX queue */
3504 /* Cleanup Rx queue */
3531 /* A queue must always have room for at least one skb.
3532 * Therefore, stop the queue when the free entries reaches
3576 /* Set Tx descriptors queue starting address */
3583 /* Create and initialize a tx queue */
3628 /* Set Tx descriptors queue starting address and size */
3643 int queue;
3645 for (queue = 0; queue < txq_number; queue++)
3646 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3652 int queue;
3654 for (queue = 0; queue < rxq_number; queue++)
3655 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3662 int queue;
3664 for (queue = 0; queue < rxq_number; queue++) {
3665 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3669 __func__, queue);
3681 int queue;
3683 for (queue = 0; queue < txq_number; queue++) {
3684 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3687 __func__, queue);
4350 /* Map the default receive queue to the elected CPU */
4353 /* We update the TX queue map only if we have one
4354 * queue. In this case we associate the TX queue to
4355 * the CPU bound to the default RX queue
4506 * queue interrupts
4680 int queue;
4682 for (queue = 0; queue < rxq_number; queue++) {
4683 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4690 for (queue = 0; queue < txq_number; queue++) {
4691 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4758 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
5189 static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
5212 mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
5342 int queue;
5355 for (queue = 0; queue < txq_number; queue++) {
5356 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5357 txq->id = queue;
5367 for (queue = 0; queue < rxq_number; queue++) {
5368 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5369 rxq->id = queue;
5766 int queue;
5788 for (queue = 0; queue < rxq_number; queue++) {
5789 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5794 for (queue = 0; queue < txq_number; queue++) {
5795 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5813 int err, queue;
5840 for (queue = 0; queue < rxq_number; queue++) {
5841 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5847 for (queue = 0; queue < txq_number; queue++) {
5848 struct mvneta_tx_queue *txq = &pp->txqs[queue];