/kernel/linux/linux-5.10/fs/ocfs2/cluster/ |
H A D | quorum.c | 96 struct o2quo_state *qs = &o2quo_state; in o2quo_make_decision() local 98 spin_lock(&qs->qs_lock); in o2quo_make_decision() 100 lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES); in o2quo_make_decision() 102 lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm); in o2quo_make_decision() 105 "lowest: %d (%sreachable)\n", qs->qs_heartbeating, in o2quo_make_decision() 106 qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un"); in o2quo_make_decision() 108 if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) || in o2quo_make_decision() 109 qs->qs_heartbeating == 1) in o2quo_make_decision() 112 if (qs->qs_heartbeating & 1) { in o2quo_make_decision() 115 quorum = (qs in o2quo_make_decision() 164 o2quo_set_hold(struct o2quo_state *qs, u8 node) o2quo_set_hold() argument 176 o2quo_clear_hold(struct o2quo_state *qs, u8 node) o2quo_clear_hold() argument 199 struct o2quo_state *qs = &o2quo_state; o2quo_hb_up() local 223 struct o2quo_state *qs = &o2quo_state; o2quo_hb_down() local 248 struct o2quo_state *qs = &o2quo_state; o2quo_hb_still_up() local 267 struct o2quo_state *qs = &o2quo_state; o2quo_conn_up() local 293 struct o2quo_state *qs = &o2quo_state; o2quo_conn_err() local 317 struct o2quo_state *qs = &o2quo_state; o2quo_init() local 325 struct o2quo_state *qs = &o2quo_state; o2quo_exit() local [all...] |
/kernel/linux/linux-6.6/fs/ocfs2/cluster/ |
H A D | quorum.c | 94 struct o2quo_state *qs = &o2quo_state; in o2quo_make_decision() local 96 spin_lock_bh(&qs->qs_lock); in o2quo_make_decision() 98 lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES); in o2quo_make_decision() 100 lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm); in o2quo_make_decision() 103 "lowest: %d (%sreachable)\n", qs->qs_heartbeating, in o2quo_make_decision() 104 qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un"); in o2quo_make_decision() 106 if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) || in o2quo_make_decision() 107 qs->qs_heartbeating == 1) in o2quo_make_decision() 110 if (qs->qs_heartbeating & 1) { in o2quo_make_decision() 113 quorum = (qs in o2quo_make_decision() 162 o2quo_set_hold(struct o2quo_state *qs, u8 node) o2quo_set_hold() argument 174 o2quo_clear_hold(struct o2quo_state *qs, u8 node) o2quo_clear_hold() argument 197 struct o2quo_state *qs = &o2quo_state; o2quo_hb_up() local 221 struct o2quo_state *qs = &o2quo_state; o2quo_hb_down() local 246 struct o2quo_state *qs = &o2quo_state; o2quo_hb_still_up() local 265 struct o2quo_state *qs = &o2quo_state; o2quo_conn_up() local 291 struct o2quo_state *qs = &o2quo_state; o2quo_conn_err() local 315 struct o2quo_state *qs = &o2quo_state; o2quo_init() local 323 struct o2quo_state *qs = &o2quo_state; o2quo_exit() local [all...] |
/kernel/linux/linux-6.6/kernel/bpf/ |
H A D | queue_stack_maps.c | 30 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs) in queue_stack_map_is_empty() argument 32 return qs->head == qs->tail; in queue_stack_map_is_empty() 35 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs) in queue_stack_map_is_full() argument 37 u32 head = qs->head + 1; in queue_stack_map_is_full() 39 if (unlikely(head >= qs->size)) in queue_stack_map_is_full() 42 return head == qs->tail; in queue_stack_map_is_full() 67 struct bpf_queue_stack *qs; in queue_stack_map_alloc() local 71 queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc() 73 qs in queue_stack_map_alloc() 89 struct bpf_queue_stack *qs = bpf_queue_stack(map); queue_stack_map_free() local 96 struct bpf_queue_stack *qs = bpf_queue_stack(map); __queue_map_get() local 130 struct bpf_queue_stack *qs = bpf_queue_stack(map); __stack_map_get() local 192 struct bpf_queue_stack *qs = bpf_queue_stack(map); queue_stack_map_push_elem() local [all...] |
/kernel/linux/linux-5.10/kernel/bpf/ |
H A D | queue_stack_maps.c | 30 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs) in queue_stack_map_is_empty() argument 32 return qs->head == qs->tail; in queue_stack_map_is_empty() 35 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs) in queue_stack_map_is_full() argument 37 u32 head = qs->head + 1; in queue_stack_map_is_full() 39 if (unlikely(head >= qs->size)) in queue_stack_map_is_full() 42 return head == qs->tail; in queue_stack_map_is_full() 71 struct bpf_queue_stack *qs; in queue_stack_map_alloc() local 75 cost = queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc() 81 qs in queue_stack_map_alloc() 102 struct bpf_queue_stack *qs = bpf_queue_stack(map); queue_stack_map_free() local 109 struct bpf_queue_stack *qs = bpf_queue_stack(map); __queue_map_get() local 143 struct bpf_queue_stack *qs = bpf_queue_stack(map); __stack_map_get() local 205 struct bpf_queue_stack *qs = bpf_queue_stack(map); queue_stack_map_push_elem() local [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/cavium/thunder/ |
H A D | nicvf_queues.c | 381 struct queue_set *qs = nic->qs; in nicvf_refill_rbdr() local 382 int rbdr_idx = qs->rbdr_cnt; in nicvf_refill_rbdr() 394 rbdr = &qs->rbdr[rbdr_idx]; in nicvf_refill_rbdr() 403 if (qcount >= (qs->rbdr_len - 1)) in nicvf_refill_rbdr() 406 refill_rb_cnt = qs->rbdr_len - qcount - 1; in nicvf_refill_rbdr() 627 struct queue_set *qs, int qidx) in nicvf_reclaim_snd_queue() 639 struct queue_set *qs, int qidx) in nicvf_reclaim_rcv_queue() 649 struct queue_set *qs, int qidx) in nicvf_reclaim_cmp_queue() 744 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, in nicvf_rcv_queue_config() argument 626 nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) nicvf_reclaim_snd_queue() argument 638 nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) nicvf_reclaim_rcv_queue() argument 648 nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) nicvf_reclaim_cmp_queue() argument 817 nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) nicvf_cmp_queue_config() argument 858 nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) nicvf_snd_queue_config() argument 916 nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) nicvf_rbdr_config() argument 956 struct queue_set *qs = nic->qs; nicvf_qset_config() local 991 struct queue_set *qs = nic->qs; nicvf_free_resources() local 1009 struct queue_set *qs = nic->qs; nicvf_alloc_resources() local 1038 struct queue_set *qs; nicvf_set_qset_resources() local 1066 struct queue_set *qs = nic->qs; nicvf_config_data_transfer() local [all...] |
H A D | nicvf_ethtool.c | 216 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_strings() 224 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_strings() 281 (nic->qs->rq_cnt + nic->qs->sq_cnt); in nicvf_get_sset_count() 289 (snic->qs->rq_cnt + snic->qs->sq_cnt); in nicvf_get_sset_count() 305 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_stats() 308 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) in nicvf_get_qset_stats() 312 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_stats() 315 *((*data)++) = ((u64 *)&nic->qs in nicvf_get_qset_stats() 470 struct queue_set *qs = nic->qs; nicvf_get_ringparam() local 482 struct queue_set *qs = nic->qs; nicvf_set_ringparam() local [all...] |
H A D | nicvf_main.c | 320 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; in nicvf_config_cpi() 442 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS; in nicvf_request_sqs() 445 nic->snicvf[sqs]->qs->rq_cnt = rx_queues; in nicvf_request_sqs() 450 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS; in nicvf_request_sqs() 453 nic->snicvf[sqs]->qs->sq_cnt = tx_queues; in nicvf_request_sqs() 457 nic->snicvf[sqs]->qs->cq_cnt = in nicvf_request_sqs() 458 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt); in nicvf_request_sqs() 670 sq = &nic->qs->sq[cqe_tx->sq_idx]; in nicvf_snd_pkt_handler() 855 struct queue_set *qs in nicvf_cq_intr_handler() local 991 struct queue_set *qs = nic->qs; nicvf_handle_qs_err() local 1331 struct queue_set *qs = nic->qs; nicvf_stop() local 1456 struct queue_set *qs = nic->qs; nicvf_open() local 1666 struct queue_set *qs = nic->qs; nicvf_update_stats() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/cavium/thunder/ |
H A D | nicvf_queues.c | 382 struct queue_set *qs = nic->qs; in nicvf_refill_rbdr() local 383 int rbdr_idx = qs->rbdr_cnt; in nicvf_refill_rbdr() 395 rbdr = &qs->rbdr[rbdr_idx]; in nicvf_refill_rbdr() 404 if (qcount >= (qs->rbdr_len - 1)) in nicvf_refill_rbdr() 407 refill_rb_cnt = qs->rbdr_len - qcount - 1; in nicvf_refill_rbdr() 628 struct queue_set *qs, int qidx) in nicvf_reclaim_snd_queue() 640 struct queue_set *qs, int qidx) in nicvf_reclaim_rcv_queue() 650 struct queue_set *qs, int qidx) in nicvf_reclaim_cmp_queue() 745 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, in nicvf_rcv_queue_config() argument 627 nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) nicvf_reclaim_snd_queue() argument 639 nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) nicvf_reclaim_rcv_queue() argument 649 nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) nicvf_reclaim_cmp_queue() argument 818 nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) nicvf_cmp_queue_config() argument 859 nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) nicvf_snd_queue_config() argument 917 nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) nicvf_rbdr_config() argument 957 struct queue_set *qs = nic->qs; nicvf_qset_config() local 992 struct queue_set *qs = nic->qs; nicvf_free_resources() local 1010 struct queue_set *qs = nic->qs; nicvf_alloc_resources() local 1039 struct queue_set *qs; nicvf_set_qset_resources() local 1067 struct queue_set *qs = nic->qs; nicvf_config_data_transfer() local [all...] |
H A D | nicvf_ethtool.c | 217 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_strings() 225 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_strings() 282 (nic->qs->rq_cnt + nic->qs->sq_cnt); in nicvf_get_sset_count() 290 (snic->qs->rq_cnt + snic->qs->sq_cnt); in nicvf_get_sset_count() 306 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_stats() 309 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) in nicvf_get_qset_stats() 313 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_stats() 316 *((*data)++) = ((u64 *)&nic->qs in nicvf_get_qset_stats() 475 struct queue_set *qs = nic->qs; nicvf_get_ringparam() local 489 struct queue_set *qs = nic->qs; nicvf_set_ringparam() local [all...] |
H A D | nicvf_main.c | 319 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; in nicvf_config_cpi() 441 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS; in nicvf_request_sqs() 444 nic->snicvf[sqs]->qs->rq_cnt = rx_queues; in nicvf_request_sqs() 449 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS; in nicvf_request_sqs() 452 nic->snicvf[sqs]->qs->sq_cnt = tx_queues; in nicvf_request_sqs() 456 nic->snicvf[sqs]->qs->cq_cnt = in nicvf_request_sqs() 457 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt); in nicvf_request_sqs() 667 sq = &nic->qs->sq[cqe_tx->sq_idx]; in nicvf_snd_pkt_handler() 852 struct queue_set *qs in nicvf_cq_intr_handler() local 988 struct queue_set *qs = nic->qs; nicvf_handle_qs_err() local 1328 struct queue_set *qs = nic->qs; nicvf_stop() local 1453 struct queue_set *qs = nic->qs; nicvf_open() local 1662 struct queue_set *qs = nic->qs; nicvf_update_stats() local [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb3/ |
H A D | sge.c | 725 * @qs: the queue set 730 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) in init_qset_cntxt() argument 732 qs->rspq.cntxt_id = id; in init_qset_cntxt() 733 qs->fl[0].cntxt_id = 2 * id; in init_qset_cntxt() 734 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt() 735 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; in init_qset_cntxt() 736 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; in init_qset_cntxt() 737 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; in init_qset_cntxt() 738 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; in init_qset_cntxt() 739 qs in init_qset_cntxt() 1250 t3_stop_tx_queue(struct netdev_queue *txq, struct sge_qset *qs, struct sge_txq *q) t3_stop_tx_queue() argument 1272 struct sge_qset *qs; t3_eth_xmit() local 1432 struct sge_qset *qs = txq_to_qset(q, qid); check_desc_avail() local 1528 struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_CTRL].qresume_tsk); restart_ctrlq() local 1746 struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_OFLD].qresume_tsk); restart_offloadq() local 1838 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; t3_offload_tx() local 1862 struct sge_qset *qs = rspq_to_qset(q); offload_enqueue() local 1900 struct sge_qset *qs = container_of(napi, struct sge_qset, napi); ofld_poll() local 1987 restart_tx(struct sge_qset *qs) restart_tx() argument 2086 struct sge_qset *qs = rspq_to_qset(rq); rx_eth() local 2132 lro_add_page(struct adapter *adap, struct sge_qset *qs, struct sge_fl *fl, int len, int complete) lro_add_page() argument 2218 handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) handle_rsp_cntrl_info() argument 2254 check_ring_db(struct adapter *adap, struct sge_qset *qs, unsigned int sleeping) check_ring_db() argument 2324 process_responses(struct adapter *adap, struct sge_qset *qs, int budget) process_responses() argument 2478 struct sge_qset *qs = container_of(napi, struct sge_qset, napi); napi_rx_handler() local 2528 process_pure_responses(struct adapter *adap, struct sge_qset *qs, struct rsp_desc *r) process_pure_responses() argument 2587 struct sge_qset *qs = rspq_to_qset(q); handle_responses() local 2608 struct sge_qset *qs = cookie; t3_sge_intr_msix() local 2627 struct sge_qset *qs = cookie; t3_sge_intr_msix_napi() local 2675 rspq_check_napi(struct sge_qset *qs) rspq_check_napi() argument 2924 struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer); sge_timer_tx() local 2965 struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer); sge_timer_rx() local 3013 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) t3_update_qset_coalesce() argument 3304 struct sge_qset *qs = &adap->sge.qs[i]; t3_sge_stop() local [all...] |
H A D | cxgb3_main.c | 410 adap->sge.qs[qidx]. in request_msix_data_irqs() 413 &adap->sge.qs[qidx]); in request_msix_data_irqs() 417 &adap->sge.qs[qidx]); in request_msix_data_irqs() 437 &adapter->sge.qs[i]); in free_irq_resources() 447 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { in await_mgmt_replies() 460 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; in init_tp_parity() 596 struct sge_qset *qs = &adap->sge.qs[i]; in ring_dbs() local 598 if (qs->adap) in ring_dbs() 600 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs in ring_dbs() 609 struct sge_qset *qs = &adap->sge.qs[i]; init_napi() local 2005 struct sge_qset *qs; set_coalesce() local 2219 struct sge_qset *qs = cxgb_extension_ioctl() local 2623 struct sge_qset *qs = &adapter->sge.qs[qidx]; cxgb_netpoll() local 2771 struct sge_qset *qs = &adapter->sge.qs[0]; t3_adap_check_task() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/cxgb3/ |
H A D | sge.c | 721 * @qs: the queue set 726 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) in init_qset_cntxt() argument 728 qs->rspq.cntxt_id = id; in init_qset_cntxt() 729 qs->fl[0].cntxt_id = 2 * id; in init_qset_cntxt() 730 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt() 731 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; in init_qset_cntxt() 732 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; in init_qset_cntxt() 733 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; in init_qset_cntxt() 734 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; in init_qset_cntxt() 735 qs in init_qset_cntxt() 1245 t3_stop_tx_queue(struct netdev_queue *txq, struct sge_qset *qs, struct sge_txq *q) t3_stop_tx_queue() argument 1267 struct sge_qset *qs; t3_eth_xmit() local 1427 struct sge_qset *qs = txq_to_qset(q, qid); check_desc_avail() local 1523 struct sge_qset *qs = container_of(w, struct sge_qset, restart_ctrlq() local 1743 struct sge_qset *qs = container_of(w, struct sge_qset, restart_offloadq() local 1836 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; t3_offload_tx() local 1860 struct sge_qset *qs = rspq_to_qset(q); offload_enqueue() local 1898 struct sge_qset *qs = container_of(napi, struct sge_qset, napi); ofld_poll() local 1985 restart_tx(struct sge_qset *qs) restart_tx() argument 2088 struct sge_qset *qs = rspq_to_qset(rq); rx_eth() local 2134 lro_add_page(struct adapter *adap, struct sge_qset *qs, struct sge_fl *fl, int len, int complete) lro_add_page() argument 2216 handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) handle_rsp_cntrl_info() argument 2252 check_ring_db(struct adapter *adap, struct sge_qset *qs, unsigned int sleeping) check_ring_db() argument 2322 process_responses(struct adapter *adap, struct sge_qset *qs, int budget) process_responses() argument 2476 struct sge_qset *qs = container_of(napi, struct sge_qset, napi); napi_rx_handler() local 2526 process_pure_responses(struct adapter *adap, struct sge_qset *qs, struct rsp_desc *r) process_pure_responses() argument 2585 struct sge_qset *qs = rspq_to_qset(q); handle_responses() local 2606 struct sge_qset *qs = cookie; t3_sge_intr_msix() local 2625 struct sge_qset *qs = cookie; t3_sge_intr_msix_napi() local 2673 rspq_check_napi(struct sge_qset *qs) rspq_check_napi() argument 2922 struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer); sge_timer_tx() local 2963 struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer); sge_timer_rx() local 3011 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) t3_update_qset_coalesce() argument 3305 struct sge_qset *qs = &adap->sge.qs[i]; t3_sge_stop() local [all...] |
/kernel/linux/linux-5.10/drivers/soc/qcom/ |
H A D | socinfo.c | 487 struct qcom_socinfo *qs; in qcom_socinfo_probe() local 498 qs = devm_kzalloc(&pdev->dev, sizeof(*qs), GFP_KERNEL); in qcom_socinfo_probe() 499 if (!qs) in qcom_socinfo_probe() 502 qs->attr.family = "Snapdragon"; in qcom_socinfo_probe() 503 qs->attr.machine = socinfo_machine(&pdev->dev, in qcom_socinfo_probe() 505 qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u", in qcom_socinfo_probe() 507 qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", in qcom_socinfo_probe() 511 qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL, in qcom_socinfo_probe() 515 qs in qcom_socinfo_probe() 531 struct qcom_socinfo *qs = platform_get_drvdata(pdev); qcom_socinfo_remove() local [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4vf/ |
H A D | cxgb4vf_main.c | 367 int qs, msi; in name_msix_vecs() local 369 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { in name_msix_vecs() 371 "%s-%d", dev->name, qs); in name_msix_vecs() 634 int qs; in setup_sge_queues() local 636 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues() 644 netdev_get_tx_queue(dev, qs), in setup_sge_queues() 665 int qs; setup_sge_queues() local 707 int qs, err; setup_rss() local 1622 int qs; cxgb4vf_set_ringparam() local 1803 int qs; collect_sge_port_stats() local 2037 int qs, r = (uintptr_t)v - 1; sge_qinfo_show() local 2173 int qs, r = (uintptr_t)v - 1; sge_qstats_show() local 2663 int q10g, n10g, qidx, pidx, qs; cfg_queues() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/cxgb4vf/ |
H A D | cxgb4vf_main.c | 367 int qs, msi; in name_msix_vecs() local 369 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { in name_msix_vecs() 371 "%s-%d", dev->name, qs); in name_msix_vecs() 634 int qs; in setup_sge_queues() local 636 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues() 644 netdev_get_tx_queue(dev, qs), in setup_sge_queues() 665 int qs; setup_sge_queues() local 707 int qs, err; setup_rss() local 1626 int qs; cxgb4vf_set_ringparam() local 1811 int qs; collect_sge_port_stats() local 2045 int qs, r = (uintptr_t)v - 1; sge_qinfo_show() local 2181 int qs, r = (uintptr_t)v - 1; sge_qstats_show() local 2671 int q10g, n10g, qidx, pidx, qs; cfg_queues() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/hisilicon/hns/ |
H A D | hns_ae_adapt.c | 84 vf_cb = kzalloc(struct_size(vf_cb, ae_handle.qs, qnum_per_vf), in hns_ae_get_handle() 112 ae_handle->qs[i] = &ring_pair_cb->q; in hns_ae_get_handle() 113 ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i]; in hns_ae_get_handle() 114 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; in hns_ae_get_handle() 144 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; in hns_ae_put_handle() 158 ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]); in hns_ae_wait_flow_down() 190 hns_rcb_ring_enable_hw(handle->qs[i], val); in hns_ae_ring_enable_all() 316 q = handle->qs[ in hns_ae_set_mtu() [all...] |
/kernel/linux/linux-6.6/drivers/soc/qcom/ |
H A D | socinfo.c | 737 struct qcom_socinfo *qs; in qcom_socinfo_probe() local 748 qs = devm_kzalloc(&pdev->dev, sizeof(*qs), GFP_KERNEL); in qcom_socinfo_probe() 749 if (!qs) in qcom_socinfo_probe() 752 qs->attr.family = "Snapdragon"; in qcom_socinfo_probe() 753 qs->attr.machine = socinfo_machine(&pdev->dev, in qcom_socinfo_probe() 755 qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u", in qcom_socinfo_probe() 757 qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", in qcom_socinfo_probe() 761 qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL, in qcom_socinfo_probe() 765 qs in qcom_socinfo_probe() 781 struct qcom_socinfo *qs = platform_get_drvdata(pdev); qcom_socinfo_remove() local [all...] |
/kernel/linux/linux-5.10/fs/qnx4/ |
H A D | inode.c | 46 struct qnx4_sb_info *qs; in qnx4_remount() local 49 qs = qnx4_sb(sb); in qnx4_remount() 50 qs->Version = QNX4_VERSION; in qnx4_remount() 191 struct qnx4_sb_info *qs; in qnx4_fill_super() local 193 qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL); in qnx4_fill_super() 194 if (!qs) in qnx4_fill_super() 196 s->s_fs_info = qs; in qnx4_fill_super() 240 struct qnx4_sb_info *qs = qnx4_sb(sb); in qnx4_kill_sb() local 242 if (qs) { in qnx4_kill_sb() 243 kfree(qs in qnx4_kill_sb() [all...] |
/kernel/linux/linux-6.6/fs/qnx4/ |
H A D | inode.c | 46 struct qnx4_sb_info *qs; in qnx4_remount() local 49 qs = qnx4_sb(sb); in qnx4_remount() 50 qs->Version = QNX4_VERSION; in qnx4_remount() 191 struct qnx4_sb_info *qs; in qnx4_fill_super() local 193 qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL); in qnx4_fill_super() 194 if (!qs) in qnx4_fill_super() 196 s->s_fs_info = qs; in qnx4_fill_super() 240 struct qnx4_sb_info *qs = qnx4_sb(sb); in qnx4_kill_sb() local 242 if (qs) { in qnx4_kill_sb() 243 kfree(qs in qnx4_kill_sb() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/hisilicon/hns/ |
H A D | hns_ae_adapt.c | 111 ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1); in hns_ae_get_handle() 113 ae_handle->qs[i] = &ring_pair_cb->q; in hns_ae_get_handle() 114 ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i]; in hns_ae_get_handle() 115 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; in hns_ae_get_handle() 145 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; in hns_ae_put_handle() 159 ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]); in hns_ae_wait_flow_down() 191 hns_rcb_ring_enable_hw(handle->qs[ in hns_ae_ring_enable_all() [all...] |
/kernel/linux/linux-5.10/drivers/staging/fieldbus/anybuss/ |
H A D | host.c | 382 struct kfifo qs[3]; member 875 static bool qs_have_work(struct kfifo *qs, size_t num) in qs_have_work() argument 881 for (i = 0; i < num; i++, qs++) { in qs_have_work() 882 ret = kfifo_out_peek(qs, &t, sizeof(t)); in qs_have_work() 892 struct kfifo *qs = cd->qs; in process_qs() local 893 size_t nqs = ARRAY_SIZE(cd->qs); in process_qs() 895 for (i = 0; i < nqs; i++, qs++) in process_qs() 896 process_q(cd, qs); in process_qs() 968 struct kfifo *qs in qthread_fn() local [all...] |
/kernel/linux/linux-6.6/drivers/staging/fieldbus/anybuss/ |
H A D | host.c | 382 struct kfifo qs[3]; member 875 static bool qs_have_work(struct kfifo *qs, size_t num) in qs_have_work() argument 881 for (i = 0; i < num; i++, qs++) { in qs_have_work() 882 ret = kfifo_out_peek(qs, &t, sizeof(t)); in qs_have_work() 892 struct kfifo *qs = cd->qs; in process_qs() local 893 size_t nqs = ARRAY_SIZE(cd->qs); in process_qs() 895 for (i = 0; i < nqs; i++, qs++) in process_qs() 896 process_q(cd, qs); in process_qs() 968 struct kfifo *qs in qthread_fn() local [all...] |
/kernel/linux/linux-5.10/fs/qnx6/ |
H A D | inode.c | 303 struct qnx6_sb_info *qs; in qnx6_fill_super() local 308 qs = kzalloc(sizeof(struct qnx6_sb_info), GFP_KERNEL); in qnx6_fill_super() 309 if (!qs) in qnx6_fill_super() 311 s->s_fs_info = qs; in qnx6_fill_super() 478 kfree(qs); in qnx6_fill_super() 485 struct qnx6_sb_info *qs = QNX6_SB(sb); in qnx6_put_super() local 486 brelse(qs->sb_buf); in qnx6_put_super() 487 iput(qs->longfile); in qnx6_put_super() 488 iput(qs->inodes); in qnx6_put_super() 489 kfree(qs); in qnx6_put_super() [all...] |
/kernel/linux/linux-6.6/fs/qnx6/ |
H A D | inode.c | 303 struct qnx6_sb_info *qs; in qnx6_fill_super() local 308 qs = kzalloc(sizeof(struct qnx6_sb_info), GFP_KERNEL); in qnx6_fill_super() 309 if (!qs) in qnx6_fill_super() 311 s->s_fs_info = qs; in qnx6_fill_super() 476 kfree(qs); in qnx6_fill_super() 483 struct qnx6_sb_info *qs = QNX6_SB(sb); in qnx6_put_super() local 484 brelse(qs->sb_buf); in qnx6_put_super() 485 iput(qs->longfile); in qnx6_put_super() 486 iput(qs->inodes); in qnx6_put_super() 487 kfree(qs); in qnx6_put_super() [all...] |