/kernel/linux/linux-6.6/drivers/nvme/target/ |
H A D | fabrics-cmd.c | 141 u16 sqsize = le16_to_cpu(c->sqsize); in nvmet_install_queue() local 146 if (!sqsize) { in nvmet_install_queue() 148 req->error_loc = offsetof(struct nvmf_connect_command, sqsize); in nvmet_install_queue() 149 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); in nvmet_install_queue() 160 if (sqsize > mqes) { in nvmet_install_queue() 161 pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n", in nvmet_install_queue() 162 sqsize, mqes, ctrl->cntlid); in nvmet_install_queue() 163 req->error_loc = offsetof(struct nvmf_connect_command, sqsize); in nvmet_install_queue() 164 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); in nvmet_install_queue() [all...] |
H A D | trace.c | 157 u16 sqsize = get_unaligned_le16(spc + 4); in nvmet_trace_fabrics_connect() local 161 trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u", in nvmet_trace_fabrics_connect() 162 recfmt, qid, sqsize, cattr, kato); in nvmet_trace_fabrics_connect()
|
H A D | fc.c | 134 u16 sqsize; member 643 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_prep_fcp_iodlist() 678 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_destroy_fcp_iodlist() 802 u16 qid, u16 sqsize) in nvmet_fc_alloc_target_queue() 810 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); in nvmet_fc_alloc_target_queue() 821 queue->sqsize = sqsize; in nvmet_fc_alloc_target_queue() 897 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_delete_target_queue() 1690 be16_to_cpu(rqst->assoc_cmd.sqsize))) in nvmet_fc_ls_create_association() 1701 be16_to_cpu(rqst->assoc_cmd.sqsize)); in nvmet_fc_ls_create_association() 801 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, u16 qid, u16 sqsize) nvmet_fc_alloc_target_queue() argument [all...] |
/kernel/linux/linux-5.10/drivers/nvme/target/ |
H A D | fabrics-cmd.c | 110 u16 sqsize = le16_to_cpu(c->sqsize); in nvmet_install_queue() local 120 if (!sqsize) { in nvmet_install_queue() 122 req->error_loc = offsetof(struct nvmf_connect_command, sqsize); in nvmet_install_queue() 123 req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); in nvmet_install_queue() 129 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); in nvmet_install_queue() 130 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); in nvmet_install_queue()
|
H A D | trace.c | 141 u16 sqsize = get_unaligned_le16(spc + 4); in nvmet_trace_fabrics_connect() local 145 trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u", in nvmet_trace_fabrics_connect() 146 recfmt, qid, sqsize, cattr, kato); in nvmet_trace_fabrics_connect()
|
H A D | fc.c | 132 u16 sqsize; member 631 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_prep_fcp_iodlist() 666 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_destroy_fcp_iodlist() 790 u16 qid, u16 sqsize) in nvmet_fc_alloc_target_queue() 799 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); in nvmet_fc_alloc_target_queue() 813 queue->sqsize = sqsize; in nvmet_fc_alloc_target_queue() 900 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_delete_target_queue() 1666 be16_to_cpu(rqst->assoc_cmd.sqsize))) in nvmet_fc_ls_create_association() 1677 be16_to_cpu(rqst->assoc_cmd.sqsize)); in nvmet_fc_ls_create_association() 789 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, u16 qid, u16 sqsize) nvmet_fc_alloc_target_queue() argument [all...] |
H A D | loop.c | 608 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_loop_create_ctrl()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | nvme-fc.h | 260 __be16 sqsize; member 288 __be16 sqsize; member
|
/kernel/linux/linux-5.10/drivers/nvme/host/ |
H A D | trace.c | 200 u16 sqsize = get_unaligned_le16(spc + 4); in nvme_trace_fabrics_connect() local 204 trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u", in nvme_trace_fabrics_connect() 205 recfmt, qid, sqsize, cattr, kato); in nvme_trace_fabrics_connect()
|
H A D | rdma.c | 781 ctrl->ctrl.sqsize + 1); in nvme_rdma_alloc_io_queues() 819 set->queue_depth = nctrl->sqsize + 1; in nvme_rdma_alloc_tagset() 1130 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_rdma_setup_ctrl() 1132 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_rdma_setup_ctrl() 1133 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_rdma_setup_ctrl() 1136 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { in nvme_rdma_setup_ctrl() 1138 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_rdma_setup_ctrl() 1139 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); in nvme_rdma_setup_ctrl() 1140 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; in nvme_rdma_setup_ctrl() 1927 * is at minimum you make hrqsize sqsize in nvme_rdma_route_resolved() [all...] |
H A D | fabrics.c | 383 cmd.connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); in nvmf_connect_admin_queue() 453 cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize); in nvmf_connect_io_queue()
|
H A D | tcp.c | 1652 set->queue_depth = nctrl->sqsize + 1; in nvme_tcp_alloc_tagset() 1740 ctrl->sqsize + 1); in __nvme_tcp_alloc_io_queues() 2041 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl() 2043 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_tcp_setup_ctrl() 2044 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl() 2046 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl() 2048 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_tcp_setup_ctrl() 2049 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl() 2050 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl() 2543 ctrl->ctrl.sqsize in nvme_tcp_create_ctrl() [all...] |
H A D | fc.c | 1216 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_admin_queue() 1338 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_queue() 2902 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues() 2906 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues() 2966 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues() 2970 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues() 3127 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_fc_create_association() 3128 /* warn if sqsize is lower than queue_size */ in nvme_fc_create_association() 3130 "queue_size %zu > ctrl sqsize %u, reducing " in nvme_fc_create_association() 3131 "to sqsize\ in nvme_fc_create_association() [all...] |
H A D | nvme.h | 292 u16 sqsize; member
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | nvme-fc.h | 260 __be16 sqsize; member 288 __be16 sqsize; member
|
/kernel/linux/linux-6.6/drivers/nvme/host/ |
H A D | rdma.c | 740 ctrl->ctrl.sqsize + 1); in nvme_rdma_alloc_io_queues() 1027 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_rdma_setup_ctrl() 1029 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_rdma_setup_ctrl() 1030 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_rdma_setup_ctrl() 1033 if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) { in nvme_rdma_setup_ctrl() 1035 "ctrl sqsize %u > max queue size %u, clamping down\n", in nvme_rdma_setup_ctrl() 1036 ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE); in nvme_rdma_setup_ctrl() 1037 ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1; in nvme_rdma_setup_ctrl() 1040 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { in nvme_rdma_setup_ctrl() 1042 "sqsize in nvme_rdma_setup_ctrl() [all...] |
H A D | trace.c | 269 u16 sqsize = get_unaligned_le16(spc + 4); in nvme_trace_fabrics_connect() local 273 trace_seq_printf(p, "recfmt=%u, qid=%u, sqsize=%u, cattr=%u, kato=%u", in nvme_trace_fabrics_connect() 274 recfmt, qid, sqsize, cattr, kato); in nvme_trace_fabrics_connect()
|
H A D | tcp.c | 2036 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl() 2038 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_tcp_setup_ctrl() 2039 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl() 2041 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl() 2043 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_tcp_setup_ctrl() 2044 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl() 2045 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl() 2530 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_tcp_create_ctrl()
|
H A D | fc.c | 1210 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_admin_queue() 1332 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_queue() 2921 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues() 2925 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues() 2981 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues() 2985 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues() 3151 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_create_association() 3517 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_init_ctrl()
|
H A D | fabrics.c | 403 cmd->connect.sqsize = cpu_to_le16(ctrl->sqsize); in nvmf_connect_cmd_prep() 405 cmd->connect.sqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); in nvmf_connect_cmd_prep()
|
H A D | sysfs.c | 196 nvme_show_int_function(sqsize); variable
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/i40iw/ |
H A D | i40iw_main.c | 556 u32 sqsize = I40IW_CQP_SW_SQSIZE_2048; in i40iw_create_cqp() local 564 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); in i40iw_create_cqp() 567 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); in i40iw_create_cqp() 576 (sizeof(struct i40iw_cqp_sq_wqe) * sqsize), in i40iw_create_cqp() 588 cqp_init_info.sq_size = sqsize; in i40iw_create_cqp() 611 for (i = 0; i < sqsize; i++) { in i40iw_create_cqp()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/irdma/ |
H A D | hw.c | 929 u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048; in irdma_create_cqp() local 937 cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); in irdma_create_cqp() 941 cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); in irdma_create_cqp() 949 cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize, in irdma_create_cqp() 967 cqp_init_info.sq_size = sqsize; in irdma_create_cqp() 1005 for (i = 0; i < sqsize; i++) { in irdma_create_cqp()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 2121 unsigned int sqsize, rqsize = 0; in c4iw_create_qp() local 2153 sqsize = attrs->cap.max_send_wr + 1; in c4iw_create_qp() 2154 if (sqsize < 8) in c4iw_create_qp() 2155 sqsize = 8; in c4iw_create_qp() 2167 qhp->wq.sq.size = sqsize; in c4iw_create_qp() 2169 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_qp() 2193 attrs->cap.max_send_wr = sqsize - 1; in c4iw_create_qp()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 2119 unsigned int sqsize, rqsize = 0; in c4iw_create_qp() local 2149 sqsize = attrs->cap.max_send_wr + 1; in c4iw_create_qp() 2150 if (sqsize < 8) in c4iw_create_qp() 2151 sqsize = 8; in c4iw_create_qp() 2157 qhp->wq.sq.size = sqsize; in c4iw_create_qp() 2159 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_qp() 2183 attrs->cap.max_send_wr = sqsize - 1; in c4iw_create_qp()
|