/kernel/linux/linux-5.10/arch/arm/mach-omap1/ |
H A D | devices.c | 133 unsigned rx_req, unsigned tx_req, in omap_mmc_add() 153 res[3].start = tx_req; in omap_mmc_add() 190 unsigned rx_req, tx_req; in omap1_init_mmc() local 203 tx_req = 21; in omap1_init_mmc() 211 tx_req = 54; in omap1_init_mmc() 219 rx_req, tx_req, mmc_data[i]); in omap1_init_mmc() 131 omap_mmc_add(const char *name, int id, unsigned long base, unsigned long size, unsigned int irq, unsigned rx_req, unsigned tx_req, struct omap_mmc_platform_data *data) omap_mmc_add() argument
|
/kernel/linux/linux-6.6/arch/arm/mach-omap1/ |
H A D | devices.c | 123 unsigned rx_req, unsigned tx_req, in omap_mmc_add() 143 res[3].start = tx_req; in omap_mmc_add() 178 unsigned rx_req, tx_req; in omap1_init_mmc() local 191 tx_req = 21; in omap1_init_mmc() 199 tx_req = 54; in omap1_init_mmc() 207 rx_req, tx_req, mmc_data[i]); in omap1_init_mmc() 121 omap_mmc_add(const char *name, int id, unsigned long base, unsigned long size, unsigned int irq, unsigned rx_req, unsigned tx_req, struct omap_mmc_platform_data *data) omap_mmc_add() argument
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_ib.c | 275 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) in ipoib_dma_map_tx() argument 277 struct sk_buff *skb = tx_req->skb; in ipoib_dma_map_tx() 278 u64 *mapping = tx_req->mapping; in ipoib_dma_map_tx() 318 struct ipoib_tx_buf *tx_req) in ipoib_dma_unmap_tx() 320 struct sk_buff *skb = tx_req->skb; in ipoib_dma_unmap_tx() 321 u64 *mapping = tx_req->mapping; in ipoib_dma_unmap_tx() 389 struct ipoib_tx_buf *tx_req; in ipoib_ib_handle_tx_wc() local 400 tx_req = &priv->tx_ring[wr_id]; in ipoib_ib_handle_tx_wc() 402 ipoib_dma_unmap_tx(priv, tx_req); in ipoib_ib_handle_tx_wc() 405 dev->stats.tx_bytes += tx_req in ipoib_ib_handle_tx_wc() 317 ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, struct ipoib_tx_buf *tx_req) ipoib_dma_unmap_tx() argument 541 post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 dqpn, struct ipoib_tx_buf *tx_req, void *head, int hlen) post_send() argument 570 struct ipoib_tx_buf *tx_req; ipoib_send() local 777 struct ipoib_tx_buf *tx_req; ipoib_ib_dev_stop_default() local [all...] |
H A D | ipoib_cm.c | 699 struct ipoib_tx_buf *tx_req) in post_send() 701 ipoib_build_sge(priv, tx_req); in post_send() 711 struct ipoib_tx_buf *tx_req; in ipoib_cm_send() local 750 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; in ipoib_cm_send() 751 tx_req->skb = skb; in ipoib_cm_send() 753 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { in ipoib_cm_send() 778 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req); in ipoib_cm_send() 782 ipoib_dma_unmap_tx(priv, tx_req); in ipoib_cm_send() 799 struct ipoib_tx_buf *tx_req; in ipoib_cm_handle_tx_wc() local 811 tx_req in ipoib_cm_handle_tx_wc() 696 post_send(struct ipoib_dev_priv *priv, struct ipoib_cm_tx *tx, unsigned int wr_id, struct ipoib_tx_buf *tx_req) post_send() argument 1206 struct ipoib_tx_buf *tx_req; ipoib_cm_tx_destroy() local [all...] |
H A D | ipoib.h | 535 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req); 537 struct ipoib_tx_buf *tx_req); 542 struct ipoib_tx_buf *tx_req) in ipoib_build_sge() 545 struct sk_buff *skb = tx_req->skb; in ipoib_build_sge() 548 u64 *mapping = tx_req->mapping; in ipoib_build_sge() 541 ipoib_build_sge(struct ipoib_dev_priv *priv, struct ipoib_tx_buf *tx_req) ipoib_build_sge() argument
|
/kernel/linux/linux-6.6/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_ib.c | 275 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) in ipoib_dma_map_tx() argument 277 struct sk_buff *skb = tx_req->skb; in ipoib_dma_map_tx() 278 u64 *mapping = tx_req->mapping; in ipoib_dma_map_tx() 318 struct ipoib_tx_buf *tx_req) in ipoib_dma_unmap_tx() 320 struct sk_buff *skb = tx_req->skb; in ipoib_dma_unmap_tx() 321 u64 *mapping = tx_req->mapping; in ipoib_dma_unmap_tx() 389 struct ipoib_tx_buf *tx_req; in ipoib_ib_handle_tx_wc() local 400 tx_req = &priv->tx_ring[wr_id]; in ipoib_ib_handle_tx_wc() 402 ipoib_dma_unmap_tx(priv, tx_req); in ipoib_ib_handle_tx_wc() 405 dev->stats.tx_bytes += tx_req in ipoib_ib_handle_tx_wc() 317 ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, struct ipoib_tx_buf *tx_req) ipoib_dma_unmap_tx() argument 541 post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 dqpn, struct ipoib_tx_buf *tx_req, void *head, int hlen) post_send() argument 570 struct ipoib_tx_buf *tx_req; ipoib_send() local 777 struct ipoib_tx_buf *tx_req; ipoib_ib_dev_stop_default() local [all...] |
H A D | ipoib_cm.c | 699 struct ipoib_tx_buf *tx_req) in post_send() 701 ipoib_build_sge(priv, tx_req); in post_send() 711 struct ipoib_tx_buf *tx_req; in ipoib_cm_send() local 750 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; in ipoib_cm_send() 751 tx_req->skb = skb; in ipoib_cm_send() 753 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { in ipoib_cm_send() 778 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req); in ipoib_cm_send() 782 ipoib_dma_unmap_tx(priv, tx_req); in ipoib_cm_send() 799 struct ipoib_tx_buf *tx_req; in ipoib_cm_handle_tx_wc() local 811 tx_req in ipoib_cm_handle_tx_wc() 696 post_send(struct ipoib_dev_priv *priv, struct ipoib_cm_tx *tx, unsigned int wr_id, struct ipoib_tx_buf *tx_req) post_send() argument 1202 struct ipoib_tx_buf *tx_req; ipoib_cm_tx_destroy() local [all...] |
H A D | ipoib.h | 535 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req); 537 struct ipoib_tx_buf *tx_req); 542 struct ipoib_tx_buf *tx_req) in ipoib_build_sge() 545 struct sk_buff *skb = tx_req->skb; in ipoib_build_sge() 548 u64 *mapping = tx_req->mapping; in ipoib_build_sge() 541 ipoib_build_sge(struct ipoib_dev_priv *priv, struct ipoib_tx_buf *tx_req) ipoib_build_sge() argument
|
/kernel/linux/linux-6.6/drivers/net/wireless/realtek/rtw89/ |
H A D | core.c | 437 struct rtw89_core_tx_request *tx_req, in rtw89_core_tx_update_ampdu_info() 440 struct ieee80211_sta *sta = tx_req->sta; in rtw89_core_tx_update_ampdu_info() 441 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_core_tx_update_ampdu_info() 442 struct sk_buff *skb = tx_req->skb; in rtw89_core_tx_update_ampdu_info() 474 struct rtw89_core_tx_request *tx_req) in rtw89_core_tx_update_sec_key() 477 struct ieee80211_vif *vif = tx_req->vif; in rtw89_core_tx_update_sec_key() 478 struct ieee80211_sta *sta = tx_req->sta; in rtw89_core_tx_update_sec_key() 485 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_core_tx_update_sec_key() 486 struct sk_buff *skb = tx_req->skb; in rtw89_core_tx_update_sec_key() 552 struct rtw89_core_tx_request *tx_req, in rtw89_core_get_mgmt_rate() 436 rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, enum btc_pkt_type pkt_type) rtw89_core_tx_update_ampdu_info() argument 473 rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) rtw89_core_tx_update_sec_key() argument 551 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, const struct rtw89_chan *chan) rtw89_core_get_mgmt_rate() argument 574 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) rtw89_core_tx_get_mac_id() argument 590 rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) rtw89_core_tx_update_mgmt_info() argument 623 rtw89_core_tx_update_h2c_info(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) rtw89_core_tx_update_h2c_info() argument 666 __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, enum btc_pkt_type pkt_type) __rtw89_core_tx_check_he_qos_htc() argument 696 __rtw89_core_tx_adjust_he_qos_htc(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) __rtw89_core_tx_adjust_he_qos_htc() argument 725 rtw89_core_tx_update_he_qos_htc(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, enum btc_pkt_type pkt_type) rtw89_core_tx_update_he_qos_htc() argument 749 rtw89_core_get_data_rate(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) rtw89_core_get_data_rate() argument 777 rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) rtw89_core_tx_update_data_info() argument 811 rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) rtw89_core_tx_btc_spec_pkt_notify() argument 859 rtw89_core_tx_wake(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) rtw89_core_tx_wake() argument 878 rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) rtw89_core_tx_update_desc_info() argument 964 struct rtw89_core_tx_request tx_req = {0}; rtw89_h2c_tx() local 1005 struct rtw89_core_tx_request tx_req = {0}; rtw89_core_tx_write() local [all...] |
H A D | pci.c | 1194 struct rtw89_core_tx_request *tx_req) in rtw89_pci_txwd_submit() 1198 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_pci_txwd_submit() 1203 struct sk_buff *skb = tx_req->skb; in rtw89_pci_txwd_submit() 1254 struct rtw89_core_tx_request *tx_req) in rtw89_pci_fwcmd_submit() 1258 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; in rtw89_pci_fwcmd_submit() 1262 struct sk_buff *skb = tx_req->skb; in rtw89_pci_fwcmd_submit() 1290 struct rtw89_core_tx_request *tx_req) in rtw89_pci_txbd_submit() 1300 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); in rtw89_pci_txbd_submit() 1309 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); in rtw89_pci_txbd_submit() 1331 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, in rtw89_pci_tx_write() argument 1191 rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, struct rtw89_pci_tx_wd *txwd, struct rtw89_core_tx_request *tx_req) rtw89_pci_txwd_submit() argument 1251 rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, struct rtw89_pci_tx_bd_32 *txbd, struct rtw89_core_tx_request *tx_req) rtw89_pci_fwcmd_submit() argument 1287 rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, struct rtw89_pci_tx_bd_32 *txbd, struct rtw89_core_tx_request *tx_req) rtw89_pci_txbd_submit() argument 1374 rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) rtw89_pci_ops_tx_write() argument [all...] |
H A D | core.h | 2974 int (*tx_write)(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req); 4468 struct rtw89_core_tx_request *tx_req) in rtw89_hci_tx_write() 4470 return rtwdev->hci.ops->tx_write(rtwdev, tx_req); in rtw89_hci_tx_write() 4467 rtw89_hci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) rtw89_hci_tx_write() argument
|
/kernel/linux/linux-5.10/drivers/media/platform/qcom/venus/ |
H A D | hfi_venus.c | 68 u32 tx_req; member 197 qhdr->tx_req = 1; in venus_write_queue() 198 /* ensure tx_req is updated in memory */ in venus_write_queue() 203 qhdr->tx_req = 0; in venus_write_queue() 204 /* ensure tx_req is updated in memory */ in venus_write_queue() 238 struct iface_queue *queue, void *pkt, u32 *tx_req) in venus_read_queue() 274 *tx_req = 0; in venus_read_queue() 325 *tx_req = qhdr->tx_req ? 1 : 0; in venus_read_queue() 327 /* ensure rx_req is stored to memory and tx_req i in venus_read_queue() 237 venus_read_queue(struct venus_hfi_device *hdev, struct iface_queue *queue, void *pkt, u32 *tx_req) venus_read_queue() argument 625 u32 tx_req; venus_iface_msgq_read_nolock() local 658 u32 tx_req; venus_iface_dbgq_read_nolock() local [all...] |
/kernel/linux/linux-5.10/drivers/usb/gadget/udc/ |
H A D | fsl_qe_udc.c | 1059 /* send data from a frame, no matter what tx_req */ 1131 * handle the tx_req, not include ep0 */ 1134 if (ep->tx_req != NULL) { in txcomplete() 1135 struct qe_req *req = ep->tx_req; in txcomplete() 1160 if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) { in txcomplete() 1161 done(ep, ep->tx_req, 0); in txcomplete() 1162 ep->tx_req = NULL; in txcomplete() 1168 /* we should gain a new tx_req fot this endpoint */ in txcomplete() 1169 if (ep->tx_req == NULL) { in txcomplete() 1171 ep->tx_req in txcomplete() 1299 struct qe_req *tx_req = NULL; ep0_txcomplete() local [all...] |
H A D | fsl_qe_udc.h | 285 struct qe_req *tx_req; member
|
/kernel/linux/linux-6.6/drivers/usb/gadget/udc/ |
H A D | fsl_qe_udc.c | 1065 /* send data from a frame, no matter what tx_req */ 1137 * handle the tx_req, not include ep0 */ 1140 if (ep->tx_req != NULL) { in txcomplete() 1141 struct qe_req *req = ep->tx_req; in txcomplete() 1166 if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) { in txcomplete() 1167 done(ep, ep->tx_req, 0); in txcomplete() 1168 ep->tx_req = NULL; in txcomplete() 1174 /* we should gain a new tx_req fot this endpoint */ in txcomplete() 1175 if (ep->tx_req == NULL) { in txcomplete() 1177 ep->tx_req in txcomplete() 1305 struct qe_req *tx_req = NULL; ep0_txcomplete() local [all...] |
H A D | fsl_qe_udc.h | 285 struct qe_req *tx_req; member
|
/kernel/linux/linux-6.6/drivers/media/platform/qcom/venus/ |
H A D | hfi_venus.c | 68 u32 tx_req; member 196 qhdr->tx_req = 1; in venus_write_queue() 197 /* ensure tx_req is updated in memory */ in venus_write_queue() 202 qhdr->tx_req = 0; in venus_write_queue() 203 /* ensure tx_req is updated in memory */ in venus_write_queue() 237 struct iface_queue *queue, void *pkt, u32 *tx_req) in venus_read_queue() 273 *tx_req = 0; in venus_read_queue() 324 *tx_req = qhdr->tx_req ? 1 : 0; in venus_read_queue() 326 /* ensure rx_req is stored to memory and tx_req i in venus_read_queue() 236 venus_read_queue(struct venus_hfi_device *hdev, struct iface_queue *queue, void *pkt, u32 *tx_req) venus_read_queue() argument 681 u32 tx_req; venus_iface_msgq_read_nolock() local 714 u32 tx_req; venus_iface_dbgq_read_nolock() local [all...] |
/kernel/linux/linux-6.6/drivers/net/wwan/t7xx/ |
H A D | t7xx_hif_cldma.c | 844 static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req, in t7xx_cldma_gpd_handle_tx_request() argument 848 struct cldma_gpd *gpd = tx_req->gpd; in t7xx_cldma_gpd_handle_tx_request() 852 tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE); in t7xx_cldma_gpd_handle_tx_request() 854 if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) { in t7xx_cldma_gpd_handle_tx_request() 859 t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff); in t7xx_cldma_gpd_handle_tx_request() 871 tx_req->skb = skb; in t7xx_cldma_gpd_handle_tx_request() 924 struct cldma_request *tx_req; in t7xx_cldma_send_skb() local 949 tx_req = queue->tx_next; in t7xx_cldma_send_skb() 950 if (queue->budget > 0 && !tx_req->skb) { in t7xx_cldma_send_skb() 954 t7xx_cldma_gpd_handle_tx_request(queue, tx_req, sk in t7xx_cldma_send_skb() [all...] |
/kernel/linux/linux-6.6/drivers/perf/hisilicon/ |
H A D | hisi_uncore_pa_pmu.c | 323 HISI_PMU_EVENT_ATTR(tx_req, 0x5c), 334 HISI_PMU_EVENT_ATTR(tx_req, 0x0),
|
H A D | hisi_uncore_sllc_pmu.c | 339 HISI_PMU_EVENT_ATTR(tx_req, 0x34),
|
/kernel/linux/linux-5.10/drivers/rapidio/ |
H A D | rio_cm.c | 138 struct tx_req { struct 672 struct tx_req *req, *_req; in rio_txcq_handler() 707 struct tx_req *treq; in riocm_queue_req()
|
/kernel/linux/linux-6.6/drivers/rapidio/ |
H A D | rio_cm.c | 138 struct tx_req { struct 672 struct tx_req *req, *_req; in rio_txcq_handler() 707 struct tx_req *treq; in riocm_queue_req()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/hisilicon/hns3/hns3pf/ |
H A D | hclge_main.c | 8726 struct hclge_tx_vlan_type_cfg_cmd *tx_req; in hclge_set_vlan_protocol_type() local 8751 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; in hclge_set_vlan_protocol_type() 8752 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); in hclge_set_vlan_protocol_type() 8753 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); in hclge_set_vlan_protocol_type()
|
/kernel/linux/linux-6.6/drivers/net/ethernet/hisilicon/hns3/hns3pf/ |
H A D | hclge_main.c | 9856 struct hclge_tx_vlan_type_cfg_cmd *tx_req; in hclge_set_vlan_protocol_type() local 9881 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data; in hclge_set_vlan_protocol_type() 9882 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); in hclge_set_vlan_protocol_type() 9883 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); in hclge_set_vlan_protocol_type()
|