Lines Matching defs:queue
136 * registers. For a given CPU if the bit associated to a queue is not
144 /* bits 0..7 = TXQ SENT, one bit per queue.
145 * bits 8..15 = RXQ OCCUP, one bit per queue.
146 * bits 16..23 = RXQ FREE, one bit per queue.
621 /* Number of this TX queue, in the range 0-7 */
669 /* rx queue number, in the range 0-7 */
704 * the first one to be used. Therefore, let's just allocate one queue.
904 /* Set rx queue offset */
1214 int queue;
1219 for (queue = 0; queue < txq_number; queue++) {
1220 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1222 q_map |= (1 << queue);
1228 for (queue = 0; queue < rxq_number; queue++) {
1229 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1232 q_map |= (1 << queue);
1334 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1335 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1340 if (queue == -1) {
1343 val = 0x1 | (queue << 1);
1351 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1352 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1357 if (queue == -1) {
1360 val = 0x1 | (queue << 1);
1369 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1370 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1375 if (queue == -1) {
1380 val = 0x1 | (queue << 1);
1392 /* All the queue are unmasked, but actually only the ones
1405 /* All the queue are masked, but actually only the ones
1417 /* All the queue are cleared, but actually only the ones
1437 int queue;
1451 /* Set CPU queue access map. CPUs are assigned to the RX and
1453 * queue then it is assigned to the CPU associated to the
1454 * default RX queue.
1468 /* With only one TX queue we configure a special case
1490 for (queue = 0; queue < txq_number; queue++) {
1491 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1492 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1557 int queue;
1579 for (queue = 0; queue < txq_number; queue++) {
1580 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1587 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1594 int queue)
1611 if (queue == -1) {
1616 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1624 int queue)
1629 if (queue != -1) {
1639 mvneta_set_ucast_addr(pp, addr[5], queue);
1819 /* Return tx queue pointer (find last set bit) according to <cause> returned
1821 * valid queue for matching the first one found in <cause>.
1826 int queue = fls(cause) - 1;
1828 return &pp->txqs[queue];
1831 /* Free tx queue skbuffs */
2008 pr_err("Can't refill queue %d. Done %d from %d\n",
2441 /* return some buffers to hardware queue, one at a time is too slow */
2927 int queue)
2941 if (queue == -1)
2945 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2962 int queue)
2973 if (queue == -1) {
2978 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2994 int queue)
2999 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
3004 if (queue == -1) {
3021 mvneta_set_other_mcast_addr(pp, crc_result, queue);
3121 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
3123 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
3274 int queue;
3277 for (queue = 0; queue < txq_number; queue++)
3278 mvneta_txq_done_force(pp, &pp->txqs[queue]);
3290 /* Rx/Tx queue initialization/cleanup methods */
3312 /* Set Rx descriptors queue starting address */
3341 /* Create a specified RX queue */
3357 /* Cleanup Rx queue */
3384 /* A queue must always have room for at least one skb.
3385 * Therefore, stop the queue when the free entries reaches
3431 /* Set Tx descriptors queue starting address */
3438 /* Create and initialize a tx queue */
3485 /* Set Tx descriptors queue starting address and size */
3500 int queue;
3502 for (queue = 0; queue < txq_number; queue++)
3503 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3509 int queue;
3511 for (queue = 0; queue < rxq_number; queue++)
3512 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3519 int queue;
3521 for (queue = 0; queue < rxq_number; queue++) {
3522 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3526 __func__, queue);
3538 int queue;
3540 for (queue = 0; queue < txq_number; queue++) {
3541 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3544 __func__, queue);
4157 /* Map the default receive queue queue to the
4162 /* We update the TX queue map only if we have one
4163 * queue. In this case we associate the TX queue to
4164 * the CPU bound to the default RX queue
4317 * queue interrupts
4487 int queue;
4489 for (queue = 0; queue < rxq_number; queue++) {
4490 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4497 for (queue = 0; queue < txq_number; queue++) {
4498 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4556 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4950 int queue;
4963 for (queue = 0; queue < txq_number; queue++) {
4964 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4965 txq->id = queue;
4975 for (queue = 0; queue < rxq_number; queue++) {
4976 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4977 rxq->id = queue;
5334 int queue;
5356 for (queue = 0; queue < rxq_number; queue++) {
5357 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5362 for (queue = 0; queue < txq_number; queue++) {
5363 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5381 int err, queue;
5408 for (queue = 0; queue < rxq_number; queue++) {
5409 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5415 for (queue = 0; queue < txq_number; queue++) {
5416 struct mvneta_tx_queue *txq = &pp->txqs[queue];