Lines Matching refs:mhba
53 static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
59 static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
106 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
112 dev_err(&mhba->pdev->dev,
121 dev_err(&mhba->pdev->dev,
130 res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
134 dev_err(&mhba->pdev->dev,
143 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
151 list_add_tail(&res->entry, &mhba->res_list);
156 static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
160 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
163 dma_free_coherent(&mhba->pdev->dev, res->size,
170 dev_err(&mhba->pdev->dev,
177 mhba->fw_flag &= ~MVUMI_FW_ALLOC;
182 * @mhba: Adapter soft state
189 static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
198 *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
200 if (*sg_count > mhba->max_sge) {
201 dev_err(&mhba->pdev->dev,
203 *sg_count, mhba->max_sge);
204 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
213 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg)));
215 m_sg->flags |= 1U << mhba->eot_flag;
217 sgd_inc(mhba, m_sg);
223 static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
233 virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
244 m_sg->flags = 1U << mhba->eot_flag;
245 sgd_setsz(mhba, m_sg, cpu_to_le32(size));
250 static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
257 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
262 cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
265 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
266 " frame,size = %d.\n", mhba->ib_max_size);
272 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
273 dev_err(&mhba->pdev->dev, "failed to allocate memory"
275 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
286 static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
296 sgd_getsz(mhba, m_sg, size);
301 dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
304 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
312 * @mhba: Adapter soft state
316 static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
320 if (likely(!list_empty(&mhba->cmd_pool))) {
321 cmd = list_entry((&mhba->cmd_pool)->next,
325 dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
332 * @mhba: Adapter soft state
335 static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
339 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
344 * @mhba: Adapter soft state
346 static void mvumi_free_cmds(struct mvumi_hba *mhba)
350 while (!list_empty(&mhba->cmd_pool)) {
351 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
354 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
362 * @mhba: Adapter soft state
365 static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
370 for (i = 0; i < mhba->max_io; i++) {
376 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
377 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
378 cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
379 cmd->frame_phys = mhba->ib_frame_phys
380 + i * mhba->ib_max_size;
382 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
389 dev_err(&mhba->pdev->dev,
391 while (!list_empty(&mhba->cmd_pool)) {
392 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
395 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
402 static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
405 struct mvumi_hw_regs *regs = mhba->regs;
407 ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
410 (mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
412 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
413 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
416 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
417 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
420 return mhba->max_io - atomic_read(&mhba->fw_outstanding);
424 static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
427 if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
429 count = ioread32(mhba->ib_shadow);
435 static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
439 cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
441 if (cur_ib_entry >= mhba->list_num_io) {
442 cur_ib_entry -= mhba->list_num_io;
443 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
445 mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
446 mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
447 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
448 *ib_entry = mhba->ib_list + cur_ib_entry *
451 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
453 atomic_inc(&mhba->fw_outstanding);
456 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
458 iowrite32(0xffff, mhba->ib_shadow);
459 iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
462 static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
468 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
471 if (tag > mhba->tag_pool.size) {
472 dev_err(&mhba->pdev->dev, "ob frame data error\n");
475 if (mhba->tag_cmd[tag] == NULL) {
476 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
478 } else if (mhba->tag_cmd[tag]->request_id != request_id &&
479 mhba->request_id_enabled) {
480 dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
482 mhba->tag_cmd[tag]->request_id);
489 static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
493 struct mvumi_hw_regs *regs = mhba->regs;
497 ob_write_shadow = ioread32(mhba->ob_shadow);
500 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
501 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
504 (mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
505 *assign_obf_end += mhba->list_num_io;
510 static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
514 struct mvumi_hw_regs *regs = mhba->regs;
518 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
519 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
521 *assign_obf_end += mhba->list_num_io;
527 static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
532 struct mvumi_hw_regs *regs = mhba->regs;
534 if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
539 if (cur_obf >= mhba->list_num_io) {
540 cur_obf -= mhba->list_num_io;
541 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
544 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
549 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
550 mhba->tag_cmd[p_outb_frame->tag] == NULL ||
552 mhba->tag_cmd[p_outb_frame->tag]->request_id))
553 if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
556 if (!list_empty(&mhba->ob_data_list)) {
558 list_first_entry(&mhba->ob_data_list,
564 cur_obf = mhba->list_num_io - 1;
565 mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
571 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
574 list_add_tail(&ob_data->list, &mhba->free_ob_list);
576 mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
577 mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
578 iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
581 static void mvumi_reset(struct mvumi_hba *mhba)
583 struct mvumi_hw_regs *regs = mhba->regs;
592 static unsigned char mvumi_start(struct mvumi_hba *mhba);
594 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
596 mhba->fw_state = FW_STATE_ABORT;
597 mvumi_reset(mhba);
599 if (mvumi_start(mhba))
605 static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
607 struct mvumi_hw_regs *regs = mhba->regs;
617 dev_err(&mhba->pdev->dev,
630 static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
635 pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
636 &mhba->pci_base[i]);
640 static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
645 if (mhba->pci_base[i])
646 pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
647 mhba->pci_base[i]);
666 static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
668 mhba->fw_state = FW_STATE_ABORT;
670 iowrite32(0, mhba->regs->reset_enable);
671 iowrite32(0xf, mhba->regs->reset_request);
673 iowrite32(0x10, mhba->regs->reset_enable);
674 iowrite32(0x10, mhba->regs->reset_request);
676 pci_disable_device(mhba->pdev);
678 if (pci_enable_device(mhba->pdev)) {
679 dev_err(&mhba->pdev->dev, "enable device failed\n");
682 if (mvumi_pci_set_master(mhba->pdev)) {
683 dev_err(&mhba->pdev->dev, "set master failed\n");
686 mvumi_restore_bar_addr(mhba);
687 if (mvumi_wait_for_fw(mhba) == FAILED)
690 return mvumi_wait_for_outstanding(mhba);
693 static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
695 return mvumi_wait_for_outstanding(mhba);
700 struct mvumi_hba *mhba;
702 mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
707 return mhba->instancet->reset_host(mhba);
710 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
718 dev_err(&mhba->pdev->dev,
725 spin_lock_irqsave(mhba->shost->host_lock, flags);
726 mhba->instancet->fire_cmd(mhba, cmd);
727 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
729 wait_event_timeout(mhba->int_cmd_wait_q,
735 spin_lock_irqsave(mhba->shost->host_lock, flags);
737 if (mhba->tag_cmd[cmd->frame->tag]) {
738 mhba->tag_cmd[cmd->frame->tag] = NULL;
739 dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
741 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
744 dev_warn(&mhba->pdev->dev,
748 atomic_dec(&mhba->fw_outstanding);
750 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
755 static void mvumi_release_fw(struct mvumi_hba *mhba)
757 mvumi_free_cmds(mhba);
758 mvumi_release_mem_resource(mhba);
759 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
760 dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
761 mhba->handshake_page, mhba->handshake_page_phys);
762 kfree(mhba->regs);
763 pci_release_regions(mhba->pdev);
766 static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
773 for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
774 if (!(mhba->target_map[device_id / bitcount] &
777 get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
780 dev_err(&mhba->pdev->dev, "failed to get memory"
802 mvumi_issue_blocked_cmd(mhba, cmd);
804 dev_err(&mhba->pdev->dev,
809 mvumi_delete_internal_cmd(mhba, cmd);
830 static void mvumi_hs_build_page(struct mvumi_hba *mhba,
845 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
874 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
875 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
877 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
878 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
879 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
880 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
881 if (mhba->hba_capability
884 &mhba->list_num_io,
887 &mhba->list_num_io,
890 hs_page4->ob_depth = (u8) mhba->list_num_io;
891 hs_page4->ib_depth = (u8) mhba->list_num_io;
898 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
906 * @mhba: Adapter soft state
908 static int mvumi_init_data(struct mvumi_hba *mhba)
916 if (mhba->fw_flag & MVUMI_FW_ALLOC)
919 tmp_size = mhba->ib_max_size * mhba->max_io;
920 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
921 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
923 tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
926 res_mgnt = mvumi_alloc_mem_resource(mhba,
929 dev_err(&mhba->pdev->dev,
940 mhba->ib_list = v;
941 mhba->ib_list_phys = p;
942 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
943 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
944 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
945 mhba->ib_frame = v;
946 mhba->ib_frame_phys = p;
948 v += mhba->ib_max_size * mhba->max_io;
949 p += mhba->ib_max_size * mhba->max_io;
955 mhba->ib_shadow = v;
956 mhba->ib_shadow_phys = p;
960 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
964 mhba->ob_shadow = v;
965 mhba->ob_shadow_phys = p;
972 mhba->ob_shadow = v;
973 mhba->ob_shadow_phys = p;
983 mhba->ob_list = v;
984 mhba->ob_list_phys = p;
987 tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
990 res_mgnt = mvumi_alloc_mem_resource(mhba,
993 dev_err(&mhba->pdev->dev,
999 for (i = mhba->max_io; i != 0; i--) {
1001 list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1002 virmem += mhba->ob_max_size + sizeof(*ob_pool);
1005 tmp_size = sizeof(unsigned short) * mhba->max_io +
1006 sizeof(struct mvumi_cmd *) * mhba->max_io;
1007 tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1010 res_mgnt = mvumi_alloc_mem_resource(mhba,
1013 dev_err(&mhba->pdev->dev,
1019 mhba->tag_pool.stack = virmem;
1020 mhba->tag_pool.size = mhba->max_io;
1021 tag_init(&mhba->tag_pool, mhba->max_io);
1022 virmem += sizeof(unsigned short) * mhba->max_io;
1024 mhba->tag_cmd = virmem;
1025 virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1027 mhba->target_map = virmem;
1029 mhba->fw_flag |= MVUMI_FW_ALLOC;
1033 mvumi_release_mem_resource(mhba);
1037 static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1046 dev_err(&mhba->pdev->dev, "checksum error\n");
1054 mhba->max_io = hs_page1->max_io_support;
1055 mhba->list_num_io = hs_page1->cl_inout_list_depth;
1056 mhba->max_transfer_size = hs_page1->max_transfer_size;
1057 mhba->max_target_id = hs_page1->max_devices_support;
1058 mhba->hba_capability = hs_page1->capability;
1059 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1060 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1062 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1063 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1065 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1068 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1069 mhba->eot_flag = 22;
1071 mhba->eot_flag = 27;
1072 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1073 mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1076 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1084 * @mhba: Adapter soft state
1091 static int mvumi_handshake(struct mvumi_hba *mhba)
1095 struct mvumi_hw_regs *regs = mhba->regs;
1097 if (mhba->fw_state == FW_STATE_STARTING)
1102 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1104 mhba->fw_state = FW_STATE_STARTING;
1112 mhba->fw_state = FW_STATE_HANDSHAKING;
1121 iowrite32(lower_32_bits(mhba->handshake_page_phys),
1123 iowrite32(upper_32_bits(mhba->handshake_page_phys),
1134 hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1136 mhba->hba_total_pages =
1139 if (mhba->hba_total_pages == 0)
1140 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1144 if (mvumi_hs_process_page(mhba, hs_header)) {
1148 if (mvumi_init_data(mhba)) {
1154 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1157 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1160 mvumi_hs_build_page(mhba, hs_header);
1177 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1179 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1181 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1184 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1186 iowrite32((mhba->list_num_io-1) |
1188 mhba->ob_shadow);
1189 iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1191 iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1195 mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1197 mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1199 mhba->fw_state = FW_STATE_STARTED;
1203 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1210 static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1216 mvumi_handshake(mhba);
1218 isr_status = mhba->instancet->read_fw_status_reg(mhba);
1220 if (mhba->fw_state == FW_STATE_STARTED)
1223 dev_err(&mhba->pdev->dev,
1225 mhba->fw_state);
1226 dev_err(&mhba->pdev->dev,
1228 mhba->global_isr, isr_status);
1238 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1244 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1248 mhba->regs->pciea_to_arm_drbl_reg);
1250 dev_err(&mhba->pdev->dev,
1256 tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1259 mhba->fw_state = FW_STATE_STARTING;
1260 dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1262 if (mvumi_handshake_event(mhba)) {
1263 dev_err(&mhba->pdev->dev,
1265 mhba->fw_state);
1268 } while (mhba->fw_state != FW_STATE_STARTED);
1270 dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1275 static unsigned char mvumi_start(struct mvumi_hba *mhba)
1278 struct mvumi_hw_regs *regs = mhba->regs;
1288 if (mvumi_check_handshake(mhba))
1296 * @mhba: Adapter soft state
1299 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1328 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
1332 mvumi_return_cmd(mhba, cmd);
1335 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1349 wake_up(&mhba->int_cmd_wait_q);
1353 static void mvumi_show_event(struct mvumi_hba *mhba,
1358 dev_warn(&mhba->pdev->dev,
1379 static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1385 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1387 dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1393 dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1396 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1398 scsi_add_device(mhba->shost, 0, devid, 0);
1399 dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1403 dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1411 static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1420 cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1441 mvumi_issue_blocked_cmd(mhba, cmd);
1444 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1450 dev_dbg(&mhba->pdev->dev,
1456 mvumi_delete_internal_cmd(mhba, cmd);
1461 static void mvumi_detach_devices(struct mvumi_hba *mhba)
1466 mutex_lock(&mhba->device_lock);
1470 &mhba->shost_dev_list, list) {
1471 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1473 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1477 list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1479 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1485 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1486 sdev = scsi_device_lookup(mhba->shost, 0,
1487 mhba->max_target_id - 1, 0);
1494 mutex_unlock(&mhba->device_lock);
1497 static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1501 sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1508 static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1512 list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1515 dev_err(&mhba->pdev->dev,
1521 if (mhba->pdev->device ==
1523 mvumi_rescan_devices(mhba, id);
1531 static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1536 &mhba->shost_dev_list, list) {
1538 dev_dbg(&mhba->pdev->dev,
1541 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1548 static int mvumi_probe_devices(struct mvumi_hba *mhba)
1556 cmd = mvumi_create_internal_cmd(mhba, 64);
1560 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1561 maxid = mhba->max_target_id;
1563 maxid = mhba->max_target_id - 1;
1566 wwid = mvumi_inquiry(mhba, id, cmd);
1569 mvumi_remove_devices(mhba, id);
1572 found = mvumi_match_devices(mhba, id, wwid);
1574 mvumi_remove_devices(mhba, id);
1578 dev_err(&mhba->pdev->dev,
1588 &mhba->mhba_dev_list);
1589 dev_dbg(&mhba->pdev->dev,
1600 mvumi_delete_internal_cmd(mhba, cmd);
1608 struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1614 if (!atomic_read(&mhba->pnp_count))
1617 atomic_set(&mhba->pnp_count, 0);
1620 mutex_lock(&mhba->device_lock);
1621 ret = mvumi_probe_devices(mhba);
1624 &mhba->mhba_dev_list, list) {
1625 if (mvumi_handle_hotplug(mhba, mv_dev->id,
1627 dev_err(&mhba->pdev->dev,
1636 &mhba->shost_dev_list);
1640 mutex_unlock(&mhba->device_lock);
1645 static void mvumi_proc_msg(struct mvumi_hba *mhba,
1653 if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1658 mutex_lock(&mhba->sas_discovery_mutex);
1663 mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1671 mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1673 mutex_unlock(&mhba->sas_discovery_mutex);
1677 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1685 dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1692 mvumi_show_event(mhba, param);
1695 mvumi_proc_msg(mhba, buffer);
1699 static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1704 cmd = mvumi_create_internal_cmd(mhba, 512);
1719 mvumi_issue_blocked_cmd(mhba, cmd);
1722 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1725 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1727 mvumi_delete_internal_cmd(mhba, cmd);
1736 mvumi_get_event(mu_ev->mhba, mu_ev->event);
1740 static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1746 atomic_inc(&mhba->pnp_count);
1747 wake_up_process(mhba->dm_thread);
1755 mu_ev->mhba = mhba;
1764 static void mvumi_handle_clob(struct mvumi_hba *mhba)
1770 while (!list_empty(&mhba->free_ob_list)) {
1771 pool = list_first_entry(&mhba->free_ob_list,
1774 list_add_tail(&pool->list, &mhba->ob_data_list);
1777 cmd = mhba->tag_cmd[ob_frame->tag];
1779 atomic_dec(&mhba->fw_outstanding);
1780 mhba->tag_cmd[ob_frame->tag] = NULL;
1781 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1783 mvumi_complete_cmd(mhba, cmd, ob_frame);
1785 mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1787 mhba->instancet->fire_cmd(mhba, NULL);
1792 struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1795 spin_lock_irqsave(mhba->shost->host_lock, flags);
1796 if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1797 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1801 if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1802 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1803 mvumi_launch_events(mhba, mhba->isr_status);
1804 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1805 dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1806 mvumi_handshake(mhba);
1811 if (mhba->global_isr & mhba->regs->int_comaout)
1812 mvumi_receive_ob_list_entry(mhba);
1814 mhba->global_isr = 0;
1815 mhba->isr_status = 0;
1816 if (mhba->fw_state == FW_STATE_STARTED)
1817 mvumi_handle_clob(mhba);
1818 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1822 static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1830 if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1831 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1834 if (tag_is_empty(&mhba->tag_pool)) {
1835 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1838 mvumi_get_ib_list_entry(mhba, &ib_entry);
1840 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1841 cmd->frame->request_id = mhba->io_seq++;
1843 mhba->tag_cmd[cmd->frame->tag] = cmd;
1846 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1860 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1867 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1868 count = mhba->instancet->check_ib_list(mhba);
1869 if (list_empty(&mhba->waiting_req_list) || !count)
1873 cmd = list_first_entry(&mhba->waiting_req_list,
1876 result = mvumi_send_command(mhba, cmd);
1882 list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1884 mvumi_send_ib_list_entry(mhba);
1888 } while (!list_empty(&mhba->waiting_req_list) && count--);
1891 mvumi_send_ib_list_entry(mhba);
1896 * @mhba: Adapter soft state
1898 static void mvumi_enable_intr(struct mvumi_hba *mhba)
1901 struct mvumi_hw_regs *regs = mhba->regs;
1911 * @mhba: Adapter soft state
1913 static void mvumi_disable_intr(struct mvumi_hba *mhba)
1916 struct mvumi_hw_regs *regs = mhba->regs;
1927 struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1929 struct mvumi_hw_regs *regs = mhba->regs;
1936 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1947 status ^= mhba->regs->int_comaerr;
1961 mhba->global_isr = status;
1962 mhba->isr_status = isr_status;
1969 * @mhba: Adapter soft state
1971 static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
1975 status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
1977 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
2005 struct mvumi_hba *mhba;
2008 mhba = (struct mvumi_hba *) sdev->host->hostdata;
2009 if (sdev->id >= mhba->max_target_id)
2012 mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2018 * @mhba: Adapter soft state
2025 static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2049 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2058 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2086 struct mvumi_hba *mhba;
2091 mhba = (struct mvumi_hba *) shost->hostdata;
2093 cmd = mvumi_get_cmd(mhba);
2099 if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2104 mhba->instancet->fire_cmd(mhba, cmd);
2109 mvumi_return_cmd(mhba, cmd);
2119 struct mvumi_hba *mhba = shost_priv(host);
2122 spin_lock_irqsave(mhba->shost->host_lock, flags);
2124 if (mhba->tag_cmd[cmd->frame->tag]) {
2125 mhba->tag_cmd[cmd->frame->tag] = NULL;
2126 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2131 atomic_dec(&mhba->fw_outstanding);
2136 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
2140 mvumi_return_cmd(mhba, cmd);
2141 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2187 static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2192 switch (mhba->pdev->device) {
2194 mhba->mmio = mhba->base_addr[0];
2195 base = mhba->mmio;
2196 if (!mhba->regs) {
2197 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2198 if (mhba->regs == NULL)
2201 regs = mhba->regs;
2246 mhba->mmio = mhba->base_addr[2];
2247 base = mhba->mmio;
2248 if (!mhba->regs) {
2249 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2250 if (mhba->regs == NULL)
2253 regs = mhba->regs;
2307 * @mhba: Adapter soft state
2311 static int mvumi_init_fw(struct mvumi_hba *mhba)
2315 if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2316 dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2319 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2323 switch (mhba->pdev->device) {
2325 mhba->instancet = &mvumi_instance_9143;
2326 mhba->io_seq = 0;
2327 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2328 mhba->request_id_enabled = 1;
2331 mhba->instancet = &mvumi_instance_9580;
2332 mhba->io_seq = 0;
2333 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2336 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2337 mhba->pdev->device);
2338 mhba->instancet = NULL;
2342 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2343 mhba->pdev->device);
2344 ret = mvumi_cfg_hw_reg(mhba);
2346 dev_err(&mhba->pdev->dev,
2351 mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
2352 HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
2353 if (!mhba->handshake_page) {
2354 dev_err(&mhba->pdev->dev,
2360 if (mvumi_start(mhba)) {
2364 ret = mvumi_alloc_cmds(mhba);
2371 mvumi_release_mem_resource(mhba);
2372 dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
2373 mhba->handshake_page, mhba->handshake_page_phys);
2375 kfree(mhba->regs);
2377 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2379 pci_release_regions(mhba->pdev);
2386 * @mhba: Adapter soft state
2388 static int mvumi_io_attach(struct mvumi_hba *mhba)
2390 struct Scsi_Host *host = mhba->shost;
2393 unsigned int max_sg = (mhba->ib_max_size + 4 -
2396 host->irq = mhba->pdev->irq;
2397 host->unique_id = mhba->unique_id;
2398 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2399 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2400 host->max_sectors = mhba->max_transfer_size / 512;
2401 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2402 host->max_id = mhba->max_target_id;
2405 ret = scsi_add_host(host, &mhba->pdev->dev);
2407 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2410 mhba->fw_flag |= MVUMI_FW_ATTACH;
2412 mutex_lock(&mhba->sas_discovery_mutex);
2413 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2414 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2418 dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2419 mutex_unlock(&mhba->sas_discovery_mutex);
2423 mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2424 mhba, "mvumi_scanthread");
2425 if (IS_ERR(mhba->dm_thread)) {
2426 dev_err(&mhba->pdev->dev,
2428 ret = PTR_ERR(mhba->dm_thread);
2429 mutex_unlock(&mhba->sas_discovery_mutex);
2432 atomic_set(&mhba->pnp_count, 1);
2433 wake_up_process(mhba->dm_thread);
2435 mutex_unlock(&mhba->sas_discovery_mutex);
2439 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2440 sdev = scsi_device_lookup(mhba->shost, 0,
2441 mhba->max_target_id - 1, 0);
2447 scsi_remove_host(mhba->shost);
2459 struct mvumi_hba *mhba;
2474 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2480 mhba = shost_priv(host);
2482 INIT_LIST_HEAD(&mhba->cmd_pool);
2483 INIT_LIST_HEAD(&mhba->ob_data_list);
2484 INIT_LIST_HEAD(&mhba->free_ob_list);
2485 INIT_LIST_HEAD(&mhba->res_list);
2486 INIT_LIST_HEAD(&mhba->waiting_req_list);
2487 mutex_init(&mhba->device_lock);
2488 INIT_LIST_HEAD(&mhba->mhba_dev_list);
2489 INIT_LIST_HEAD(&mhba->shost_dev_list);
2490 atomic_set(&mhba->fw_outstanding, 0);
2491 init_waitqueue_head(&mhba->int_cmd_wait_q);
2492 mutex_init(&mhba->sas_discovery_mutex);
2494 mhba->pdev = pdev;
2495 mhba->shost = host;
2496 mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2498 ret = mvumi_init_fw(mhba);
2502 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2503 "mvumi", mhba);
2509 mhba->instancet->enable_intr(mhba);
2510 pci_set_drvdata(pdev, mhba);
2512 ret = mvumi_io_attach(mhba);
2516 mvumi_backup_bar_addr(mhba);
2522 mhba->instancet->disable_intr(mhba);
2523 free_irq(mhba->pdev->irq, mhba);
2525 mvumi_release_fw(mhba);
2539 struct mvumi_hba *mhba;
2541 mhba = pci_get_drvdata(pdev);
2542 if (mhba->dm_thread) {
2543 kthread_stop(mhba->dm_thread);
2544 mhba->dm_thread = NULL;
2547 mvumi_detach_devices(mhba);
2548 host = mhba->shost;
2549 scsi_remove_host(mhba->shost);
2550 mvumi_flush_cache(mhba);
2552 mhba->instancet->disable_intr(mhba);
2553 free_irq(mhba->pdev->irq, mhba);
2554 mvumi_release_fw(mhba);
2566 struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2568 mvumi_flush_cache(mhba);
2573 struct mvumi_hba *mhba = NULL;
2575 mhba = pci_get_drvdata(pdev);
2576 mvumi_flush_cache(mhba);
2578 pci_set_drvdata(pdev, mhba);
2579 mhba->instancet->disable_intr(mhba);
2580 free_irq(mhba->pdev->irq, mhba);
2581 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2593 struct mvumi_hba *mhba = NULL;
2595 mhba = pci_get_drvdata(pdev);
2611 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2614 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2618 if (mvumi_cfg_hw_reg(mhba)) {
2623 mhba->mmio = mhba->base_addr[0];
2624 mvumi_reset(mhba);
2626 if (mvumi_start(mhba)) {
2631 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2632 "mvumi", mhba);
2637 mhba->instancet->enable_intr(mhba);
2642 mvumi_unmap_pci_addr(pdev, mhba->base_addr);