/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/octeon_ep/ |
H A D | octep_tx.c | 16 static void octep_iq_reset_indices(struct octep_iq *iq) in octep_iq_reset_indices() argument 18 iq->fill_cnt = 0; in octep_iq_reset_indices() 19 iq->host_write_index = 0; in octep_iq_reset_indices() 20 iq->octep_read_index = 0; in octep_iq_reset_indices() 21 iq->flush_index = 0; in octep_iq_reset_indices() 22 iq->pkts_processed = 0; in octep_iq_reset_indices() 23 iq->pkt_in_done = 0; in octep_iq_reset_indices() 24 atomic_set(&iq->instr_pending, 0); in octep_iq_reset_indices() 30 * @iq: Octeon Tx queue data structure. 33 int octep_iq_process_completions(struct octep_iq *iq, u1 argument 105 octep_iq_free_pending(struct octep_iq *iq) octep_iq_free_pending() argument 181 struct octep_iq *iq; octep_setup_iq() local 265 octep_free_iq(struct octep_iq *iq) octep_free_iq() argument [all...] |
H A D | octep_main.c | 59 ioq_vector->iq = oct->iq[i]; in octep_alloc_ioq_vectors() 352 * @iq: Octeon Tx queue data structure. 355 static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) in octep_enable_ioq_irq() argument 359 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); in octep_enable_ioq_irq() 360 if (iq->pkts_processed) { in octep_enable_ioq_irq() 361 writel(iq->pkts_processed, iq->inst_cnt_reg); in octep_enable_ioq_irq() 362 iq in octep_enable_ioq_irq() 595 octep_iq_full_check(struct octep_iq *iq) octep_iq_full_check() argument 635 struct octep_iq *iq; octep_start_xmit() local 775 struct octep_iq *iq = oct->iq[q]; octep_get_stats64() local [all...] |
H A D | octep_config.h | 53 #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) 54 #define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs) 55 #define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) 56 #define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind) 58 #define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) 59 #define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold) 187 struct octep_iq_config iq; member
|
H A D | octep_cn9k_pf.c | 229 conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS; in octep_init_config_cn93_pf() 230 conf->iq.instr_type = OCTEP_64BYTE_INSTR; in octep_init_config_cn93_pf() 231 conf->iq.pkind = 0; in octep_init_config_cn93_pf() 232 conf->iq.db_min = OCTEP_DB_MIN; in octep_init_config_cn93_pf() 233 conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD; in octep_init_config_cn93_pf() 264 struct octep_iq *iq = oct->iq[iq_no]; in octep_setup_iq_regs_cn93_pf() local 285 iq->desc_ring_dma); in octep_setup_iq_regs_cn93_pf() 287 iq->max_count); in octep_setup_iq_regs_cn93_pf() 292 iq in octep_setup_iq_regs_cn93_pf() 596 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq) octep_update_iq_read_index_cn93_pf() argument [all...] |
H A D | octep_ethtool.c | 161 struct octep_iq *iq = oct->iq[q]; in octep_get_ethtool_stats() local 164 tx_packets += iq->stats.instr_completed; in octep_get_ethtool_stats() 165 tx_bytes += iq->stats.bytes_sent; in octep_get_ethtool_stats() 166 tx_busy_errors += iq->stats.tx_busy; in octep_get_ethtool_stats() 209 struct octep_iq *iq = oct->iq[q]; in octep_get_ethtool_stats() local 211 data[i++] = iq->stats.instr_posted; in octep_get_ethtool_stats() 212 data[i++] = iq->stats.instr_completed; in octep_get_ethtool_stats() 213 data[i++] = iq in octep_get_ethtool_stats() [all...] |
/kernel/linux/linux-5.10/drivers/crypto/cavium/zip/ |
H A D | zip_device.c | 59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * in zip_cmd_queue_consumed() 98 spin_lock(&zip_dev->iq[queue].lock); in zip_load_instr() 109 zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head); in zip_load_instr() 110 zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail); in zip_load_instr() 117 memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr, in zip_load_instr() 119 zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */ in zip_load_instr() 122 ncb_ptr = zip_dev->iq[queue].sw_head; in zip_load_instr() 125 ncb_ptr, zip_dev->iq[queue].sw_head - 16); in zip_load_instr() 128 zip_dev->iq[queu in zip_load_instr() [all...] |
H A D | zip_mem.c | 59 zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA), in zip_cmd_qbuf_alloc() 62 if (!zip->iq[q].sw_head) in zip_cmd_qbuf_alloc() 65 memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE); in zip_cmd_qbuf_alloc() 67 zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head); in zip_cmd_qbuf_alloc() 78 zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail); in zip_cmd_qbuf_free() 80 free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE)); in zip_cmd_qbuf_free()
|
/kernel/linux/linux-6.6/drivers/crypto/cavium/zip/ |
H A D | zip_device.c | 59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * in zip_cmd_queue_consumed() 98 spin_lock(&zip_dev->iq[queue].lock); in zip_load_instr() 109 zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head); in zip_load_instr() 110 zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail); in zip_load_instr() 117 memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr, in zip_load_instr() 119 zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */ in zip_load_instr() 122 ncb_ptr = zip_dev->iq[queue].sw_head; in zip_load_instr() 125 ncb_ptr, zip_dev->iq[queue].sw_head - 16); in zip_load_instr() 128 zip_dev->iq[queu in zip_load_instr() [all...] |
H A D | zip_mem.c | 59 zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA), in zip_cmd_qbuf_alloc() 62 if (!zip->iq[q].sw_head) in zip_cmd_qbuf_alloc() 65 memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE); in zip_cmd_qbuf_alloc() 67 zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head); in zip_cmd_qbuf_alloc() 78 zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail); in zip_cmd_qbuf_free() 80 free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE)); in zip_cmd_qbuf_free()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/cavium/liquidio/ |
H A D | request_manager.c | 45 struct octeon_instr_queue *iq = in IQ_INSTR_MODE_64B() local 47 return iq->iqcmd_64B; in IQ_INSTR_MODE_64B() 60 struct octeon_instr_queue *iq; in octeon_init_instr_queue() local 82 iq = oct->instr_queue[iq_no]; in octeon_init_instr_queue() 84 iq->oct_dev = oct; in octeon_init_instr_queue() 86 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma); in octeon_init_instr_queue() 87 if (!iq->base_addr) { in octeon_init_instr_queue() 93 iq->max_count = num_descs; in octeon_init_instr_queue() 98 iq in octeon_init_instr_queue() 168 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; octeon_delete_instr_queue() local 272 ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) ring_doorbell() argument 286 struct octeon_instr_queue *iq; octeon_ring_doorbell_locked() local 295 __copy_cmd_into_iq(struct octeon_instr_queue *iq, u8 *cmd) __copy_cmd_into_iq() argument 307 __post_command2(struct octeon_instr_queue *iq, u8 *cmd) __post_command2() argument 359 __add_to_request_list(struct octeon_instr_queue *iq, int idx, void *buf, int reqtype) __add_to_request_list() argument 368 lio_process_iq_request_list(struct octeon_device *oct, struct octeon_instr_queue *iq, u32 napi_budget) lio_process_iq_request_list() argument 445 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, u32 napi_budget) octeon_flush_iq() argument 499 struct octeon_instr_queue *iq; __check_db_timeout() local 546 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; octeon_send_command() local 689 struct octeon_instr_queue *iq; octeon_send_soft_command() local [all...] |
H A D | cn23xx_vf_regs.h | 70 #define CN23XX_VF_SLI_IQ_PKT_CONTROL64(iq) \ 71 (CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) 73 #define CN23XX_VF_SLI_IQ_BASE_ADDR64(iq) \ 74 (CN23XX_VF_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) 76 #define CN23XX_VF_SLI_IQ_SIZE(iq) \ 77 (CN23XX_VF_SLI_IQ_SIZE_START + ((iq) * CN23XX_VF_IQ_OFFSET)) 79 #define CN23XX_VF_SLI_IQ_DOORBELL(iq) \ 80 (CN23XX_VF_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_VF_IQ_OFFSET)) 82 #define CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq) \ 83 (CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_VF_IQ_OFFSE [all...] |
H A D | cn23xx_vf_device.c | 104 struct octeon_instr_queue *iq; in cn23xx_vf_setup_global_input_regs() local 116 iq = oct->instr_queue[q_no]; in cn23xx_vf_setup_global_input_regs() 118 if (iq) in cn23xx_vf_setup_global_input_regs() 119 inst_cnt_reg = iq->inst_cnt_reg; in cn23xx_vf_setup_global_input_regs() 214 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in cn23xx_setup_vf_iq_regs() local 219 iq->base_addr_dma); in cn23xx_setup_vf_iq_regs() 220 octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count); in cn23xx_setup_vf_iq_regs() 225 iq->doorbell_reg = in cn23xx_setup_vf_iq_regs() 227 iq->inst_cnt_reg = in cn23xx_setup_vf_iq_regs() 230 iq_no, iq in cn23xx_setup_vf_iq_regs() 524 cn23xx_update_read_index(struct octeon_instr_queue *iq) cn23xx_update_read_index() argument [all...] |
H A D | cn66xx_regs.h | 143 #define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \ 144 (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET)) 146 #define CN6XXX_SLI_IQ_SIZE(iq) \ 147 (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET)) 149 #define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \ 150 (CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET)) 152 #define CN6XXX_SLI_IQ_DOORBELL(iq) \ 153 (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET)) 155 #define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \ 156 (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSE [all...] |
H A D | octeon_config.h | 121 #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) 122 #define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs) 123 #define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size) 124 #define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) 125 #define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) 126 #define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout) 128 #define CFG_GET_IQ_INTR_PKT(cfg) ((cfg)->iq.iq_intr_pkt) 129 #define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val 410 struct octeon_iq_config iq; member
|
H A D | cn66xx_device.c | 266 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in lio_cn6xxx_setup_iq_regs() local 272 iq->base_addr_dma); in lio_cn6xxx_setup_iq_regs() 273 octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count); in lio_cn6xxx_setup_iq_regs() 278 iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no); in lio_cn6xxx_setup_iq_regs() 279 iq->inst_cnt_reg = oct->mmio[0].hw_addr in lio_cn6xxx_setup_iq_regs() 282 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs() 287 iq->reset_instr_cnt = readl(iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs() 339 mask |= oct->io_qmask.iq; in lio_cn6xxx_enable_io_queues() 449 lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq) lio_cn6xxx_update_read_index() argument [all...] |
H A D | cn23xx_pf_regs.h | 170 #define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \ 171 (CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET)) 173 #define CN23XX_SLI_IQ_BASE_ADDR64(iq) \ 174 (CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET)) 176 #define CN23XX_SLI_IQ_SIZE(iq) \ 177 (CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET)) 179 #define CN23XX_SLI_IQ_DOORBELL(iq) \ 180 (CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET)) 182 #define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \ 183 (CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSE [all...] |
H A D | lio_vf_main.c | 122 struct octeon_instr_queue *iq; in pcierror_quiesce_device() local 124 if (!(oct->io_qmask.iq & BIT_ULL(i))) in pcierror_quiesce_device() 126 iq = oct->instr_queue[i]; in pcierror_quiesce_device() 128 if (atomic_read(&iq->instr_pending)) { in pcierror_quiesce_device() 129 spin_lock_bh(&iq->lock); in pcierror_quiesce_device() 130 iq->fill_cnt = 0; in pcierror_quiesce_device() 131 iq->octeon_read_index = iq->host_write_index; in pcierror_quiesce_device() 132 iq->stats.instr_processed += in pcierror_quiesce_device() 133 atomic_read(&iq in pcierror_quiesce_device() 484 struct octeon_instr_queue *iq; octeon_destroy_resources() local 829 int i, frags, iq; free_netsgbuf() local 871 int i, frags, iq; free_netsgbuf_with_resp() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/cavium/liquidio/ |
H A D | request_manager.c | 51 struct octeon_instr_queue *iq; in octeon_init_instr_queue() local 73 iq = oct->instr_queue[iq_no]; in octeon_init_instr_queue() 75 iq->oct_dev = oct; in octeon_init_instr_queue() 77 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma); in octeon_init_instr_queue() 78 if (!iq->base_addr) { in octeon_init_instr_queue() 84 iq->max_count = num_descs; in octeon_init_instr_queue() 89 iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)), in octeon_init_instr_queue() 91 if (!iq in octeon_init_instr_queue() 159 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; octeon_delete_instr_queue() local 265 ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) ring_doorbell() argument 279 struct octeon_instr_queue *iq; octeon_ring_doorbell_locked() local 289 __copy_cmd_into_iq(struct octeon_instr_queue *iq, u8 *cmd) __copy_cmd_into_iq() argument 301 __post_command2(struct octeon_instr_queue *iq, u8 *cmd) __post_command2() argument 354 __add_to_request_list(struct octeon_instr_queue *iq, int idx, void *buf, int reqtype) __add_to_request_list() argument 363 lio_process_iq_request_list(struct octeon_device *oct, struct octeon_instr_queue *iq, u32 napi_budget) lio_process_iq_request_list() argument 441 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, u32 napi_budget) octeon_flush_iq() argument 495 struct octeon_instr_queue *iq; __check_db_timeout() local 542 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; octeon_send_command() local 687 struct octeon_instr_queue *iq; octeon_send_soft_command() local [all...] |
H A D | cn23xx_vf_regs.h | 70 #define CN23XX_VF_SLI_IQ_PKT_CONTROL64(iq) \ 71 (CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) 73 #define CN23XX_VF_SLI_IQ_BASE_ADDR64(iq) \ 74 (CN23XX_VF_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) 76 #define CN23XX_VF_SLI_IQ_SIZE(iq) \ 77 (CN23XX_VF_SLI_IQ_SIZE_START + ((iq) * CN23XX_VF_IQ_OFFSET)) 79 #define CN23XX_VF_SLI_IQ_DOORBELL(iq) \ 80 (CN23XX_VF_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_VF_IQ_OFFSET)) 82 #define CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq) \ 83 (CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_VF_IQ_OFFSE [all...] |
H A D | cn23xx_vf_device.c | 104 struct octeon_instr_queue *iq; in cn23xx_vf_setup_global_input_regs() local 116 iq = oct->instr_queue[q_no]; in cn23xx_vf_setup_global_input_regs() 118 if (iq) in cn23xx_vf_setup_global_input_regs() 119 inst_cnt_reg = iq->inst_cnt_reg; in cn23xx_vf_setup_global_input_regs() 214 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in cn23xx_setup_vf_iq_regs() local 219 iq->base_addr_dma); in cn23xx_setup_vf_iq_regs() 220 octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count); in cn23xx_setup_vf_iq_regs() 225 iq->doorbell_reg = in cn23xx_setup_vf_iq_regs() 227 iq->inst_cnt_reg = in cn23xx_setup_vf_iq_regs() 230 iq_no, iq in cn23xx_setup_vf_iq_regs() 524 cn23xx_update_read_index(struct octeon_instr_queue *iq) cn23xx_update_read_index() argument [all...] |
H A D | cn66xx_regs.h | 143 #define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \ 144 (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET)) 146 #define CN6XXX_SLI_IQ_SIZE(iq) \ 147 (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET)) 149 #define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \ 150 (CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET)) 152 #define CN6XXX_SLI_IQ_DOORBELL(iq) \ 153 (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET)) 155 #define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \ 156 (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSE [all...] |
H A D | octeon_config.h | 121 #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) 122 #define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs) 123 #define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size) 124 #define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) 125 #define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) 126 #define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout) 128 #define CFG_GET_IQ_INTR_PKT(cfg) ((cfg)->iq.iq_intr_pkt) 129 #define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val 410 struct octeon_iq_config iq; member
|
H A D | cn66xx_device.c | 266 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in lio_cn6xxx_setup_iq_regs() local 272 iq->base_addr_dma); in lio_cn6xxx_setup_iq_regs() 273 octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count); in lio_cn6xxx_setup_iq_regs() 278 iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no); in lio_cn6xxx_setup_iq_regs() 279 iq->inst_cnt_reg = oct->mmio[0].hw_addr in lio_cn6xxx_setup_iq_regs() 282 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs() 287 iq->reset_instr_cnt = readl(iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs() 339 mask |= oct->io_qmask.iq; in lio_cn6xxx_enable_io_queues() 449 lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq) lio_cn6xxx_update_read_index() argument [all...] |
H A D | cn23xx_pf_regs.h | 170 #define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \ 171 (CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET)) 173 #define CN23XX_SLI_IQ_BASE_ADDR64(iq) \ 174 (CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET)) 176 #define CN23XX_SLI_IQ_SIZE(iq) \ 177 (CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET)) 179 #define CN23XX_SLI_IQ_DOORBELL(iq) \ 180 (CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET)) 182 #define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \ 183 (CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSE [all...] |
/kernel/linux/linux-6.6/drivers/crypto/marvell/octeontx2/ |
H A D | otx2_cptlf.h | 124 struct otx2_cpt_inst_queue *iq; in otx2_cpt_free_instruction_queues() local 128 iq = &lfs->lf[i].iqueue; in otx2_cpt_free_instruction_queues() 129 if (iq->real_vaddr) in otx2_cpt_free_instruction_queues() 131 iq->size, in otx2_cpt_free_instruction_queues() 132 iq->real_vaddr, in otx2_cpt_free_instruction_queues() 133 iq->real_dma_addr); in otx2_cpt_free_instruction_queues() 134 iq->real_vaddr = NULL; in otx2_cpt_free_instruction_queues() 135 iq->vaddr = NULL; in otx2_cpt_free_instruction_queues() 142 struct otx2_cpt_inst_queue *iq; in otx2_cpt_alloc_instruction_queues() local 149 iq in otx2_cpt_alloc_instruction_queues() [all...] |