Lines Matching defs:fusion
267 struct fusion_context *fusion;
269 fusion = instance->ctrl_context;
270 return fusion->cmd_list[blk_tag];
335 * This function is only for fusion controllers.
423 struct fusion_context *fusion = instance->ctrl_context;
426 if (fusion->sense)
427 dma_pool_free(fusion->sense_dma_pool, fusion->sense,
428 fusion->sense_phys_addr);
431 if (fusion->cmd_list) {
433 cmd = fusion->cmd_list[i];
436 dma_pool_free(fusion->sg_dma_pool,
442 kfree(fusion->cmd_list);
445 if (fusion->sg_dma_pool) {
446 dma_pool_destroy(fusion->sg_dma_pool);
447 fusion->sg_dma_pool = NULL;
449 if (fusion->sense_dma_pool) {
450 dma_pool_destroy(fusion->sense_dma_pool);
451 fusion->sense_dma_pool = NULL;
462 if (fusion->req_frames_desc)
464 fusion->request_alloc_sz, fusion->req_frames_desc,
465 fusion->req_frames_desc_phys);
466 if (fusion->io_request_frames)
467 dma_pool_free(fusion->io_request_frames_pool,
468 fusion->io_request_frames,
469 fusion->io_request_frames_phys);
470 if (fusion->io_request_frames_pool) {
471 dma_pool_destroy(fusion->io_request_frames_pool);
472 fusion->io_request_frames_pool = NULL;
485 struct fusion_context *fusion;
490 fusion = instance->ctrl_context;
494 fusion->sg_dma_pool =
499 fusion->sense_dma_pool =
503 if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
509 fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
510 GFP_KERNEL, &fusion->sense_phys_addr);
511 if (!fusion->sense) {
528 if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr,
530 dma_pool_free(fusion->sense_dma_pool, fusion->sense,
531 fusion->sense_phys_addr);
532 fusion->sense = NULL;
533 dma_pool_destroy(fusion->sense_dma_pool);
535 fusion->sense_dma_pool =
539 if (!fusion->sense_dma_pool) {
544 fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
546 &fusion->sense_phys_addr);
547 if (!fusion->sense) {
558 cmd = fusion->cmd_list[i];
559 cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
563 cmd->sense = (u8 *)fusion->sense + offset;
564 cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
575 cmd = fusion->cmd_list[i];
577 cmd->sense = (u8 *)fusion->sense + offset;
578 cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
589 struct fusion_context *fusion;
591 fusion = instance->ctrl_context;
596 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
600 fusion->cmd_list =
603 if (!fusion->cmd_list) {
610 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
612 if (!fusion->cmd_list[i]) {
614 kfree(fusion->cmd_list[j]);
615 kfree(fusion->cmd_list);
628 struct fusion_context *fusion;
630 fusion = instance->ctrl_context;
633 fusion->io_request_frames_pool =
635 fusion->io_frames_alloc_sz, 16, 0);
637 if (!fusion->io_request_frames_pool) {
643 fusion->io_request_frames =
644 dma_pool_alloc(fusion->io_request_frames_pool,
646 &fusion->io_request_frames_phys);
647 if (!fusion->io_request_frames) {
650 dma_pool_destroy(fusion->io_request_frames_pool);
661 fusion->io_request_frames_phys,
662 fusion->io_frames_alloc_sz)) {
663 dma_pool_free(fusion->io_request_frames_pool,
664 fusion->io_request_frames,
665 fusion->io_request_frames_phys);
666 fusion->io_request_frames = NULL;
667 dma_pool_destroy(fusion->io_request_frames_pool);
669 fusion->io_request_frames_pool =
672 fusion->io_frames_alloc_sz,
673 roundup_pow_of_two(fusion->io_frames_alloc_sz),
676 if (!fusion->io_request_frames_pool) {
682 fusion->io_request_frames =
683 dma_pool_alloc(fusion->io_request_frames_pool,
685 &fusion->io_request_frames_phys);
687 if (!fusion->io_request_frames) {
694 fusion->req_frames_desc =
696 fusion->request_alloc_sz,
697 &fusion->req_frames_desc_phys, GFP_KERNEL);
698 if (!fusion->req_frames_desc) {
711 struct fusion_context *fusion;
713 fusion = instance->ctrl_context;
718 fusion->reply_frames_desc_pool =
720 fusion->reply_alloc_sz * count, 16, 0);
722 if (!fusion->reply_frames_desc_pool) {
728 fusion->reply_frames_desc[0] =
729 dma_pool_alloc(fusion->reply_frames_desc_pool,
730 GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
731 if (!fusion->reply_frames_desc[0]) {
738 fusion->reply_frames_desc_phys[0],
739 (fusion->reply_alloc_sz * count))) {
740 dma_pool_free(fusion->reply_frames_desc_pool,
741 fusion->reply_frames_desc[0],
742 fusion->reply_frames_desc_phys[0]);
743 fusion->reply_frames_desc[0] = NULL;
744 dma_pool_destroy(fusion->reply_frames_desc_pool);
746 fusion->reply_frames_desc_pool =
749 fusion->reply_alloc_sz * count,
750 roundup_pow_of_two(fusion->reply_alloc_sz * count),
753 if (!fusion->reply_frames_desc_pool) {
759 fusion->reply_frames_desc[0] =
760 dma_pool_alloc(fusion->reply_frames_desc_pool,
762 &fusion->reply_frames_desc_phys[0]);
764 if (!fusion->reply_frames_desc[0]) {
771 reply_desc = fusion->reply_frames_desc[0];
772 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
779 fusion->reply_frames_desc[i + 1] =
780 fusion->reply_frames_desc[i] +
781 (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
790 struct fusion_context *fusion;
797 fusion = instance->ctrl_context;
798 chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
802 fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev,
803 array_size, &fusion->rdpq_phys,
805 if (!fusion->rdpq_virt) {
814 fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
817 fusion->reply_frames_desc_pool_align =
824 if (!fusion->reply_frames_desc_pool ||
825 !fusion->reply_frames_desc_pool_align) {
843 dma_pool_alloc(fusion->reply_frames_desc_pool,
864 dma_pool_free(fusion->reply_frames_desc_pool,
869 dma_pool_alloc(fusion->reply_frames_desc_pool_align,
877 fusion->rdpq_tracker[i].dma_pool_ptr =
878 fusion->reply_frames_desc_pool_align;
880 fusion->rdpq_tracker[i].dma_pool_ptr =
881 fusion->reply_frames_desc_pool;
884 fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i];
885 fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i];
894 offset = fusion->reply_alloc_sz * i;
895 fusion->rdpq_virt[abs_index].RDPQBaseAddress =
897 fusion->reply_frames_desc_phys[abs_index] =
899 fusion->reply_frames_desc[abs_index] =
902 reply_desc = fusion->reply_frames_desc[abs_index];
903 for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
915 struct fusion_context *fusion;
917 fusion = instance->ctrl_context;
920 if (fusion->rdpq_tracker[i].pool_entry_virt)
921 dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr,
922 fusion->rdpq_tracker[i].pool_entry_virt,
923 fusion->rdpq_tracker[i].pool_entry_phys);
927 dma_pool_destroy(fusion->reply_frames_desc_pool);
928 dma_pool_destroy(fusion->reply_frames_desc_pool_align);
930 if (fusion->rdpq_virt)
933 fusion->rdpq_virt, fusion->rdpq_phys);
939 struct fusion_context *fusion;
941 fusion = instance->ctrl_context;
943 if (fusion->reply_frames_desc[0])
944 dma_pool_free(fusion->reply_frames_desc_pool,
945 fusion->reply_frames_desc[0],
946 fusion->reply_frames_desc_phys[0]);
948 dma_pool_destroy(fusion->reply_frames_desc_pool);
973 struct fusion_context *fusion;
980 fusion = instance->ctrl_context;
996 io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
997 io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
1000 * Add all the commands to command pool (fusion->cmd_pool)
1005 cmd = fusion->cmd_list[i];
1088 struct fusion_context *fusion;
1099 fusion = instance->ctrl_context;
1101 ioc_init_handle = fusion->ioc_init_request_phys;
1102 IOCInitMessage = fusion->ioc_init_request;
1104 cmd = fusion->ioc_init_cmd;
1155 IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
1157 cpu_to_le64(fusion->rdpq_phys) :
1158 cpu_to_le64(fusion->reply_frames_desc_phys[0]);
1161 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
1162 IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
1310 struct fusion_context *fusion = instance->ctrl_context;
1314 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
1315 pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
1404 struct fusion_context *fusion;
1413 fusion = instance->ctrl_context;
1415 if (!fusion) {
1422 size_map_info = fusion->current_map_sz;
1424 ci = (void *) fusion->ld_map[(instance->map_id & 1)];
1425 ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
1433 memset(ci, 0, fusion->max_map_sz);
1465 struct fusion_context *fusion = instance->ctrl_context;
1467 fusion->fast_path_io = 0;
1470 fusion->fast_path_io = 1;
1492 struct fusion_context *fusion;
1507 fusion = instance->ctrl_context;
1509 if (!fusion) {
1514 map = fusion->ld_drv_map[instance->map_id & 1];
1523 fusion->ld_map[(instance->map_id - 1) & 1];
1524 memset(ci, 0, fusion->max_map_sz);
1526 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
1536 size_map_info = fusion->current_map_sz;
1639 struct fusion_context *fusion;
1642 fusion = instance->ctrl_context;
1644 fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1647 fusion->ld_map[i] = NULL;
1649 fusion->ld_drv_map[i] = (void *)
1651 fusion->drv_map_pages);
1653 if (!fusion->ld_drv_map[i]) {
1654 fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz);
1656 if (!fusion->ld_drv_map[i]) {
1660 fusion->drv_map_sz);
1667 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1668 fusion->max_map_sz,
1669 &fusion->ld_map_phys[i],
1671 if (!fusion->ld_map[i]) {
1683 if (fusion->ld_map[i])
1685 fusion->max_map_sz,
1686 fusion->ld_map[i],
1687 fusion->ld_map_phys[i]);
1692 if (fusion->ld_drv_map[i]) {
1693 if (is_vmalloc_addr(fusion->ld_drv_map[i]))
1694 vfree(fusion->ld_drv_map[i]);
1696 free_pages((ulong)fusion->ld_drv_map[i],
1697 fusion->drv_map_pages);
1714 struct fusion_context *fusion;
1717 fusion = instance->ctrl_context;
1729 fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16;
1731 fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *
1733 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) *
1734 (fusion->reply_q_depth);
1735 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1742 struct fusion_context *fusion;
1745 fusion = instance->ctrl_context;
1766 fusion->ioc_init_cmd = cmd;
1776 struct fusion_context *fusion;
1778 fusion = instance->ctrl_context;
1780 if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame)
1783 fusion->ioc_init_cmd->frame,
1784 fusion->ioc_init_cmd->frame_phys_addr);
1786 kfree(fusion->ioc_init_cmd);
1798 struct fusion_context *fusion;
1803 fusion = instance->ctrl_context;
1839 fusion->max_sge_in_main_msg =
1843 fusion->max_sge_in_chain =
1848 rounddown_pow_of_two(fusion->max_sge_in_main_msg
1849 + fusion->max_sge_in_chain - 2);
1852 fusion->chain_offset_mfi_pthru =
1855 fusion->chain_offset_io_request =
1863 fusion->last_reply_idx[i] = 0;
1866 * For fusion adapters, 3 commands for IOCTL and 8 commands
1875 atomic_set(&fusion->busy_mq_poll[i], 0);
1916 fusion->fast_path_io = 0;
2022 * @fusion: fusion context
2030 map_cmd_status(struct fusion_context *fusion,
2277 struct fusion_context *fusion;
2279 fusion = instance->ctrl_context;
2283 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
2297 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) &&
2298 (sge_count > fusion->max_sge_in_main_msg)) {
2306 fusion->
2312 fusion->chain_offset_io_request;
2566 struct fusion_context *fusion = instance->ctrl_context;
2569 = fusion->stream_detect_by_ld[device_id];
2638 * @fusion: Fusion context
2647 megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion,
2708 if ((fusion->pcie_bw_limitation) && (raid->level == 1) && (!is_read) &&
2736 struct fusion_context *fusion;
2747 fusion = instance->ctrl_context;
2823 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2829 if (!raid || (!fusion->fast_path_io)) {
2856 } else if (fusion->pcie_bw_limitation &&
2883 megasas_set_raidflag_cpu_affinity(fusion, &io_request->RaidContext,
2912 if (fusion->load_balance_info &&
2913 (fusion->load_balance_info[device_id].loadBalanceFlag) &&
2917 &fusion->load_balance_info[device_id],
2989 struct fusion_context *fusion = instance->ctrl_context;
2999 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
3008 if (fusion->fast_path_io && (
3085 struct fusion_context *fusion = instance->ctrl_context;
3086 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
3147 if (fusion->fast_path_io) {
3149 fusion->ld_drv_map[(instance->map_id & 1)];
3305 struct fusion_context *fusion;
3307 fusion = instance->ctrl_context;
3308 p = fusion->req_frames_desc +
3323 struct fusion_context *fusion;
3324 fusion = instance->ctrl_context;
3330 (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
3458 struct fusion_context *fusion;
3464 fusion = instance->ctrl_context;
3467 r1_cmd = fusion->cmd_list[peer_smid - 1];
3487 map_cmd_status(fusion, scmd_local, status, ex_status,
3550 struct fusion_context *fusion;
3563 fusion = instance->ctrl_context;
3571 desc = fusion->reply_frames_desc[MSIxIndex] +
3572 fusion->last_reply_idx[MSIxIndex];
3592 cmd_fusion = fusion->cmd_list[smid - 1];
3615 if (fusion->load_balance_info &&
3619 lbinfo = &fusion->load_balance_info[device_id];
3628 map_cmd_status(fusion, scmd_local, status,
3655 fusion->last_reply_idx[MSIxIndex]++;
3656 if (fusion->last_reply_idx[MSIxIndex] >=
3657 fusion->reply_q_depth)
3658 fusion->last_reply_idx[MSIxIndex] = 0;
3665 if (!fusion->last_reply_idx[MSIxIndex])
3666 desc = fusion->reply_frames_desc[MSIxIndex];
3688 fusion->last_reply_idx[MSIxIndex],
3692 fusion->last_reply_idx[MSIxIndex],
3711 fusion->last_reply_idx[MSIxIndex],
3715 fusion->last_reply_idx[MSIxIndex],
3730 struct fusion_context *fusion;
3734 fusion = instance->ctrl_context;
3738 if (!atomic_add_unless(&fusion->busy_mq_poll[queue_num], 1, 1))
3742 atomic_dec(&fusion->busy_mq_poll[queue_num]);
3894 struct fusion_context *fusion;
3897 fusion = instance->ctrl_context;
3919 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
3929 io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
4167 /* This function waits for outstanding commands on fusion to complete */
4277 struct fusion_context *fusion;
4280 fusion = instance->ctrl_context;
4285 fusion->last_reply_idx[i] = 0;
4286 reply_desc = fusion->reply_frames_desc[i];
4287 for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++)
4301 struct fusion_context *fusion;
4310 fusion = instance->ctrl_context;
4316 cmd_fusion = fusion->cmd_list[j];
4401 struct fusion_context *fusion;
4404 fusion = instance->ctrl_context;
4407 cmd_fusion = fusion->cmd_list[i];
4436 struct fusion_context *fusion;
4437 fusion = instance->ctrl_context;
4440 cmd_fusion = fusion->cmd_list[i];
4536 struct fusion_context *fusion = NULL;
4542 fusion = instance->ctrl_context;
4618 scsi_lookup = fusion->cmd_list[smid_task - 1];
4656 * megasas_fusion_smid_lookup : Look for fusion command corresponding to SCSI
4666 struct fusion_context *fusion;
4670 fusion = instance->ctrl_context;
4673 cmd_fusion = fusion->cmd_list[i];
4696 struct fusion_context *fusion;
4701 fusion = instance->ctrl_context;
4707 pd_sync = (void *)fusion->pd_seq_sync
4723 * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
4803 * megasas_reset_target_fusion : target reset function for fusion adapters
4903 /* Core fusion reset function */
4909 struct fusion_context *fusion;
4920 fusion = instance->ctrl_context;
4980 dev_warn(&instance->pdev->dev, "resetting fusion "
4990 cmd_fusion = fusion->cmd_list[i];
4993 r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
5081 if (fusion->load_balance_info)
5082 memset(fusion->load_balance_info, 0,
5102 memset(fusion->stream_detect_by_ld[j],
5104 fusion->stream_detect_by_ld[j]->mru_bit_map
5288 /* Allocate fusion context */
5292 struct fusion_context *fusion;
5302 fusion = instance->ctrl_context;
5304 fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
5306 fusion->log_to_span =
5308 fusion->log_to_span_pages);
5309 if (!fusion->log_to_span) {
5310 fusion->log_to_span =
5313 if (!fusion->log_to_span) {
5320 fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
5322 fusion->load_balance_info =
5324 fusion->load_balance_info_pages);
5325 if (!fusion->load_balance_info) {
5326 fusion->load_balance_info =
5329 if (!fusion->load_balance_info)
5340 struct fusion_context *fusion = instance->ctrl_context;
5342 if (fusion) {
5343 if (fusion->load_balance_info) {
5344 if (is_vmalloc_addr(fusion->load_balance_info))
5345 vfree(fusion->load_balance_info);
5347 free_pages((ulong)fusion->load_balance_info,
5348 fusion->load_balance_info_pages);
5351 if (fusion->log_to_span) {
5352 if (is_vmalloc_addr(fusion->log_to_span))
5353 vfree(fusion->log_to_span);
5355 free_pages((ulong)fusion->log_to_span,
5356 fusion->log_to_span_pages);
5359 kfree(fusion);