Lines Matching refs:dd
114 static int mtip_block_initialize(struct driver_data *dd);
142 struct driver_data *dd = pci_get_drvdata(pdev);
144 if (dd->sr)
150 dd->sr = true;
151 if (dd->queue)
152 blk_queue_flag_set(QUEUE_FLAG_DEAD, dd->queue);
154 dev_warn(&dd->pdev->dev,
155 "%s: dd->queue is NULL\n", __func__);
162 static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
165 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
173 * @dd Pointer to the driver data structure.
179 static int mtip_hba_reset(struct driver_data *dd)
184 writel(HOST_RESET, dd->mmio + HOST_CTL);
187 readl(dd->mmio + HOST_CTL);
196 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
199 } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
202 if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
338 if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
354 for (i = 0; i < port->dd->slot_groups; i++)
361 writel(readl(port->dd->mmio + HOST_IRQ_STAT),
362 port->dd->mmio + HOST_IRQ_STAT);
389 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
397 dev_warn(&port->dd->pdev->dev,
400 if (mtip_hba_reset(port->dd))
401 dev_err(&port->dd->pdev->dev,
408 dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
420 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
434 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
438 dev_warn(&port->dd->pdev->dev,
446 static int mtip_device_reset(struct driver_data *dd)
450 if (mtip_check_surprise_removal(dd->pdev))
453 if (mtip_hba_reset(dd) < 0)
457 mtip_init_port(dd->port);
458 mtip_start_port(dd->port);
461 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
462 dd->mmio + HOST_CTL);
469 static void print_tags(struct driver_data *dd,
481 dev_warn(&dd->pdev->dev,
502 * @dd Pointer to the DRIVER_DATA structure.
507 static void mtip_handle_tfe(struct driver_data *dd)
520 dev_warn(&dd->pdev->dev, "Taskfile error\n");
522 port = dd->port;
525 cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
535 for (group = 0; group < dd->slot_groups; group++) {
538 dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed);
553 cmd = mtip_cmd_from_tag(dd, tag);
560 print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
567 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
568 dd->port->log_buf,
569 dd->port->log_buf_dma, 1);
571 dev_warn(&dd->pdev->dev,
575 buf = (unsigned char *)dd->port->log_buf;
577 dev_info(&dd->pdev->dev,
579 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
584 dev_info(&dd->pdev->dev,
586 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
591 set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
592 dev_info(&dd->pdev->dev,
603 for (group = 0; group < dd->slot_groups; group++) {
607 cmd = mtip_cmd_from_tag(dd, tag);
619 dev_warn(&dd->pdev->dev,
646 dev_warn(&port->dd->pdev->dev,
652 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
661 struct driver_data *dd = port->dd;
681 command = mtip_cmd_from_tag(dd, tag);
688 if (atomic_dec_return(&dd->irq_workers_active) == 0)
689 writel(0xffffffff, dd->mmio + HOST_IRQ_STAT);
695 static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
697 struct mtip_port *port = dd->port;
698 struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
712 static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
715 dev_warn(&dd->pdev->dev,
717 writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
721 dev_warn(&dd->pdev->dev,
723 writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
727 dev_warn(&dd->pdev->dev,
730 if (mtip_check_surprise_removal(dd->pdev))
734 set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags);
735 wake_up_interruptible(&dd->port->svc_wait);
741 struct driver_data *dd = (struct driver_data *) data;
742 struct mtip_port *port = dd->port;
748 hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
755 mtip_check_surprise_removal(dd->pdev);
763 WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
768 twork = &dd->work[i];
774 atomic_set(&dd->irq_workers_active, workers);
777 twork = &dd->work[i];
781 dd->isr_workq,
785 if (likely(dd->work[0].completed))
787 dd->work[0].completed);
799 if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
804 &dd->dd_flag))
807 mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
811 mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
816 writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
833 struct driver_data *dd = instance;
835 return mtip_handle_irq(dd);
864 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
865 clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
885 for (n = 1; n < port->dd->slot_groups; n++)
906 blk_mq_quiesce_queue(port->dd->queue);
918 if (mtip_check_surprise_removal(port->dd->pdev))
926 blk_mq_unquiesce_queue(port->dd->queue);
929 blk_mq_unquiesce_queue(port->dd->queue);
967 struct driver_data *dd = port->dd;
979 dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
983 if (mtip_check_surprise_removal(dd->pdev))
986 rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
1002 dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n");
1021 dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
1025 if (mtip_check_surprise_removal(dd->pdev) ||
1027 &dd->dd_flag)) {
1028 dev_err(&dd->pdev->dev,
1034 mtip_device_reset(dd); /* recover from timeout issue */
1042 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
1043 mtip_device_reset(dd);
1080 static void mtip_set_timeout(struct driver_data *dd,
1091 *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
1093 *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
1135 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
1182 set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1184 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1225 mtip_set_timeout(port->dd, &fis, &timeout, 0);
1238 dev_warn(&port->dd->pdev->dev,
1337 dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
1341 dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
1345 dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
1352 dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
1364 dev_warn(&port->dd->pdev->dev,
1375 * @dd Pointer to the device data structure.
1382 static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
1384 struct mtip_port *port = dd->port;
1413 dev_info(&port->dd->pdev->dev,
1417 dev_info(&port->dd->pdev->dev,
1421 dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
1423 dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
1427 if (mtip_hw_get_capacity(port->dd, §ors))
1428 dev_info(&port->dd->pdev->dev,
1433 pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
1445 dev_info(&port->dd->pdev->dev,
1458 static inline void fill_command_sg(struct driver_data *dd,
1472 dev_err(&dd->pdev->dev,
1506 mtip_set_timeout(port->dd, &fis, &to, 0);
1571 buf = dma_alloc_coherent(&port->dd->pdev->dev,
1576 dev_err(&port->dd->pdev->dev,
1596 mtip_set_timeout(port->dd, &fis, &to, 0);
1648 dma_free_coherent(&port->dd->pdev->dev,
1704 static int exec_drive_taskfile(struct driver_data *dd,
1738 outbuf_dma = dma_map_single(&dd->pdev->dev, outbuf,
1740 if (dma_mapping_error(&dd->pdev->dev, outbuf_dma)) {
1754 inbuf_dma = dma_map_single(&dd->pdev->dev, inbuf,
1756 if (dma_mapping_error(&dd->pdev->dev, inbuf_dma)) {
1767 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
1770 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
1773 reply = (dd->port->rxfis + RX_FIS_D2H_REG);
1815 dev_warn(&dd->pdev->dev,
1843 mtip_set_timeout(dd, &fis, &timeout, erasemode);
1852 if (mtip_exec_internal_command(dd->port,
1863 task_file_data = readl(dd->port->mmio+PORT_TFDATA);
1866 reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
1869 reply = dd->port->rxfis + RX_FIS_D2H_REG;
1875 dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
1878 dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
1926 dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
1929 dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
1944 * @dd Pointer to the driver data structure.
1954 static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
1960 if (copy_to_user((void __user *)arg, dd->port->identify,
1976 if (exec_drive_command(dd->port,
2000 if (exec_drive_task(dd->port, drive_command))
2021 ret = exec_drive_taskfile(dd, (void __user *) arg,
2044 * @dd Pointer to the driver data structure.
2057 static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
2062 dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
2064 struct mtip_port *port = dd->port;
2072 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
2106 fill_command_sg(dd, command, nents);
2113 if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
2148 struct driver_data *dd = dev_to_disk(dev)->private_data;
2151 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
2153 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
2168 struct driver_data *dd, *tmp;
2175 list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
2176 if (dd->pdev) {
2177 if (dd->port &&
2178 dd->port->identify &&
2179 dd->port->identify_valid) {
2181 (char *) (dd->port->identify + 10), 21);
2182 status = *(dd->port->identify + 141);
2188 if (dd->port &&
2189 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2192 dev_name(&dd->pdev->dev),
2198 dev_name(&dd->pdev->dev),
2205 list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
2206 if (dd->pdev) {
2207 if (dd->port &&
2208 dd->port->identify &&
2209 dd->port->identify_valid) {
2211 (char *) (dd->port->identify+10), 21);
2212 status = *(dd->port->identify + 141);
2218 if (dd->port &&
2219 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
2222 dev_name(&dd->pdev->dev),
2228 dev_name(&dd->pdev->dev),
2241 struct driver_data *dd = (struct driver_data *)f->private_data;
2251 dev_err(&dd->pdev->dev,
2270 struct driver_data *dd = (struct driver_data *)f->private_data;
2281 dev_err(&dd->pdev->dev,
2288 for (n = dd->slot_groups-1; n >= 0; n--)
2290 readl(dd->port->s_active[n]));
2295 for (n = dd->slot_groups-1; n >= 0; n--)
2297 readl(dd->port->cmd_issue[n]));
2302 for (n = dd->slot_groups-1; n >= 0; n--)
2304 readl(dd->port->completed[n]));
2308 readl(dd->port->mmio + PORT_IRQ_STAT));
2310 readl(dd->mmio + HOST_IRQ_STAT));
2315 for (n = dd->slot_groups-1; n >= 0; n--) {
2318 dd->port->cmds_to_issue[n/2] >> (32*(n&1));
2320 group_allocated = dd->port->cmds_to_issue[n];
2337 struct driver_data *dd = (struct driver_data *)f->private_data;
2347 dev_err(&dd->pdev->dev,
2353 dd->port->flags);
2354 size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
2355 dd->dd_flag);
2390 * @dd Pointer to the driver data structure.
2397 static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
2399 if (!kobj || !dd)
2403 dev_warn(&dd->pdev->dev,
2411 * @dd Pointer to the driver data structure.
2418 static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
2420 if (!kobj || !dd)
2428 static int mtip_hw_debugfs_init(struct driver_data *dd)
2433 dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
2434 if (IS_ERR_OR_NULL(dd->dfs_node)) {
2435 dev_warn(&dd->pdev->dev,
2437 dd->disk->disk_name);
2438 dd->dfs_node = NULL;
2442 debugfs_create_file("flags", 0444, dd->dfs_node, dd, &mtip_flags_fops);
2443 debugfs_create_file("registers", 0444, dd->dfs_node, dd,
2449 static void mtip_hw_debugfs_exit(struct driver_data *dd)
2451 debugfs_remove_recursive(dd->dfs_node);
2457 * @dd Pointer to the driver data structure.
2462 static inline void hba_setup(struct driver_data *dd)
2465 hwdata = readl(dd->mmio + HOST_HSORG);
2471 dd->mmio + HOST_HSORG);
2474 static int mtip_device_unaligned_constrained(struct driver_data *dd)
2476 return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
2484 * @dd Pointer to the driver data structure.
2489 static void mtip_detect_product(struct driver_data *dd)
2501 hwdata = readl(dd->mmio + HOST_HSORG);
2503 dd->product_type = MTIP_PRODUCT_UNKNOWN;
2504 dd->slot_groups = 1;
2507 dd->product_type = MTIP_PRODUCT_ASICFPGA;
2510 dev_info(&dd->pdev->dev,
2518 dev_warn(&dd->pdev->dev,
2523 dd->slot_groups = slotgroups;
2527 dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
2533 * @dd Pointer to the DRIVER_DATA structure.
2539 static int mtip_ftl_rebuild_poll(struct driver_data *dd)
2543 dev_warn(&dd->pdev->dev,
2551 &dd->dd_flag)))
2553 if (mtip_check_surprise_removal(dd->pdev))
2556 if (mtip_get_identify(dd->port, NULL) < 0)
2559 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
2564 dev_warn(&dd->pdev->dev,
2570 dev_warn(&dd->pdev->dev,
2573 mtip_block_initialize(dd);
2579 dev_err(&dd->pdev->dev,
2588 struct driver_data *dd = rq->q->queuedata;
2591 dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
2595 atomic_inc(&dd->port->cmd_slot_unal);
2603 struct driver_data *dd = data;
2607 clear_bit(req->tag, dd->port->cmds_to_issue);
2615 struct driver_data *dd = data;
2617 set_bit(req->tag, dd->port->cmds_to_issue);
2633 struct driver_data *dd = (struct driver_data *)data;
2635 unsigned int num_cmd_slots = dd->slot_groups * 32;
2636 struct mtip_port *port = dd->port;
2656 &dd->dd_flag)))
2664 mtip_handle_tfe(dd);
2676 } while (atomic_read(&dd->irq_workers_active) != 0 &&
2679 if (atomic_read(&dd->irq_workers_active) != 0)
2680 dev_warn(&dd->pdev->dev,
2683 blk_mq_quiesce_queue(dd->queue);
2685 blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd);
2687 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
2689 if (mtip_device_reset(dd))
2690 blk_mq_tagset_busy_iter(&dd->tags,
2691 mtip_abort_cmd, dd);
2693 clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
2695 blk_mq_unquiesce_queue(dd->queue);
2730 if (mtip_ftl_rebuild_poll(dd) == 0)
2742 * @dd Pointer to driver_data structure
2747 static void mtip_dma_free(struct driver_data *dd)
2749 struct mtip_port *port = dd->port;
2752 dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2756 dma_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
2764 * @dd Pointer to driver_data structure
2769 static int mtip_dma_alloc(struct driver_data *dd)
2771 struct mtip_port *port = dd->port;
2775 dma_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2782 dma_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
2785 dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2805 static int mtip_hw_get_identify(struct driver_data *dd)
2811 if (mtip_get_identify(dd->port, NULL) < 0)
2814 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
2816 set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
2819 mtip_dump_identify(dd->port);
2822 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
2823 dd->port->log_buf,
2824 dd->port->log_buf_dma, 1);
2826 dev_warn(&dd->pdev->dev,
2830 buf = (unsigned char *)dd->port->log_buf;
2832 dev_info(&dd->pdev->dev,
2834 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
2837 dev_info(&dd->pdev->dev,
2839 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
2842 dev_info(&dd->pdev->dev,
2844 set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
2850 if (mtip_get_smart_attr(dd->port, 242, &attr242))
2851 dev_warn(&dd->pdev->dev,
2854 dev_info(&dd->pdev->dev,
2864 * @dd Pointer to the driver data structure.
2869 static int mtip_hw_init(struct driver_data *dd)
2875 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
2877 mtip_detect_product(dd);
2878 if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
2883 hba_setup(dd);
2885 dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL,
2886 dd->numa_node);
2887 if (!dd->port) {
2888 dev_err(&dd->pdev->dev,
2895 dd->work[i].port = dd->port;
2898 if (mtip_device_unaligned_constrained(dd))
2899 dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS;
2901 dd->unal_qdepth = 0;
2903 atomic_set(&dd->port->cmd_slot_unal, dd->unal_qdepth);
2907 spin_lock_init(&dd->port->cmd_issue_lock[i]);
2910 dd->port->mmio = dd->mmio + PORT_OFFSET;
2911 dd->port->dd = dd;
2914 rv = mtip_dma_alloc(dd);
2919 for (i = 0; i < dd->slot_groups; i++) {
2920 dd->port->s_active[i] =
2921 dd->port->mmio + i*0x80 + PORT_SCR_ACT;
2922 dd->port->cmd_issue[i] =
2923 dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
2924 dd->port->completed[i] =
2925 dd->port->mmio + i*0x80 + PORT_SDBV;
2930 while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
2934 if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
2936 dev_warn(&dd->pdev->dev,
2942 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
2944 dev_warn(&dd->pdev->dev,
2952 if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
2953 if (mtip_hba_reset(dd) < 0) {
2954 dev_err(&dd->pdev->dev,
2961 writel(readl(dd->mmio + HOST_IRQ_STAT),
2962 dd->mmio + HOST_IRQ_STAT);
2965 mtip_init_port(dd->port);
2966 mtip_start_port(dd->port);
2969 rv = request_irq(dd->pdev->irq, mtip_irq_handler, IRQF_SHARED,
2970 dev_driver_string(&dd->pdev->dev), dd);
2972 dev_err(&dd->pdev->dev,
2973 "Unable to allocate IRQ %d\n", dd->pdev->irq);
2976 irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding));
2979 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
2980 dd->mmio + HOST_CTL);
2982 init_waitqueue_head(&dd->port->svc_wait);
2984 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
2993 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
2994 dd->mmio + HOST_CTL);
2997 irq_set_affinity_hint(dd->pdev->irq, NULL);
2998 free_irq(dd->pdev->irq, dd);
3001 mtip_deinit_port(dd->port);
3002 mtip_dma_free(dd);
3006 kfree(dd->port);
3011 static int mtip_standby_drive(struct driver_data *dd)
3015 if (dd->sr || !dd->port)
3021 if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
3022 !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
3023 !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
3024 rv = mtip_standby_immediate(dd->port);
3026 dev_warn(&dd->pdev->dev,
3035 * @dd Pointer to the driver data structure.
3040 static int mtip_hw_exit(struct driver_data *dd)
3042 if (!dd->sr) {
3044 mtip_deinit_port(dd->port);
3047 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
3048 dd->mmio + HOST_CTL);
3052 irq_set_affinity_hint(dd->pdev->irq, NULL);
3053 free_irq(dd->pdev->irq, dd);
3057 mtip_dma_free(dd);
3060 kfree(dd->port);
3061 dd->port = NULL;
3072 * @dd Pointer to the driver data structure.
3077 static int mtip_hw_shutdown(struct driver_data *dd)
3083 mtip_standby_drive(dd);
3094 * @dd Pointer to the driver data structure.
3100 static int mtip_hw_suspend(struct driver_data *dd)
3106 if (mtip_standby_drive(dd) != 0) {
3107 dev_err(&dd->pdev->dev,
3113 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
3114 dd->mmio + HOST_CTL);
3115 mtip_deinit_port(dd->port);
3126 * @dd Pointer to the driver data structure.
3132 static int mtip_hw_resume(struct driver_data *dd)
3135 hba_setup(dd);
3138 if (mtip_hba_reset(dd) != 0) {
3139 dev_err(&dd->pdev->dev,
3148 mtip_init_port(dd->port);
3149 mtip_start_port(dd->port);
3152 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
3153 dd->mmio + HOST_CTL);
3207 struct driver_data *dd = dev->bd_disk->private_data;
3212 if (!dd)
3215 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
3222 return mtip_hw_ioctl(dd, cmd, arg);
3245 struct driver_data *dd = dev->bd_disk->private_data;
3250 if (!dd)
3253 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
3282 ret = exec_drive_taskfile(dd, (void __user *) arg,
3299 return mtip_hw_ioctl(dd, cmd, arg);
3325 struct driver_data *dd = dev->bd_disk->private_data;
3328 if (!dd)
3331 if (!(mtip_hw_get_capacity(dd, &capacity))) {
3332 dev_warn(&dd->pdev->dev,
3346 struct driver_data *dd;
3349 dd = (struct driver_data *) dev->bd_disk->private_data;
3351 if (dd) {
3353 &dd->dd_flag)) {
3383 static inline bool is_se_active(struct driver_data *dd)
3385 if (unlikely(test_bit(MTIP_PF_SE_ACTIVE_BIT, &dd->port->flags))) {
3386 if (dd->port->ic_pause_timer) {
3387 unsigned long to = dd->port->ic_pause_timer +
3391 &dd->port->flags);
3392 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
3393 dd->port->ic_pause_timer = 0;
3394 wake_up_interruptible(&dd->port->svc_wait);
3403 static inline bool is_stopped(struct driver_data *dd, struct request *rq)
3405 if (likely(!(dd->dd_flag & MTIP_DDF_STOP_IO)))
3408 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
3410 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
3412 if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) &&
3415 if (test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
3417 if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
3426 struct driver_data *dd = hctx->queue->queuedata;
3429 if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
3441 if (cmd->unaligned && atomic_dec_if_positive(&dd->port->cmd_slot_unal) >= 0)
3450 struct driver_data *dd = hctx->queue->queuedata;
3454 dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
3457 if (mtip_commands_active(dd->port))
3461 if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
3480 mtip_issue_non_ncq_command(dd->port, rq->tag);
3487 struct driver_data *dd = hctx->queue->queuedata;
3497 if (is_se_active(dd) || is_stopped(dd, rq))
3502 mtip_hw_submit_io(dd, rq, cmd, hctx);
3509 struct driver_data *dd = set->driver_data;
3515 dma_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, cmd->command,
3522 struct driver_data *dd = set->driver_data;
3525 cmd->command = dma_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3537 struct driver_data *dd = req->q->queuedata;
3547 if (test_bit(req->tag, dd->port->cmds_to_issue))
3550 if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
3553 wake_up_interruptible(&dd->port->svc_wait);
3572 * @dd Pointer to the driver data structure.
3577 static int mtip_block_initialize(struct driver_data *dd)
3584 if (dd->disk)
3587 if (mtip_hw_init(dd)) {
3592 dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node);
3593 if (dd->disk == NULL) {
3594 dev_err(&dd->pdev->dev,
3607 dd->disk->disk_name,
3612 dd->disk->major = dd->major;
3613 dd->disk->first_minor = index * MTIP_MAX_MINORS;
3614 dd->disk->minors = MTIP_MAX_MINORS;
3615 dd->disk->fops = &mtip_block_ops;
3616 dd->disk->private_data = dd;
3617 dd->index = index;
3619 mtip_hw_debugfs_init(dd);
3621 memset(&dd->tags, 0, sizeof(dd->tags));
3622 dd->tags.ops = &mtip_mq_ops;
3623 dd->tags.nr_hw_queues = 1;
3624 dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS;
3625 dd->tags.reserved_tags = 1;
3626 dd->tags.cmd_size = sizeof(struct mtip_cmd);
3627 dd->tags.numa_node = dd->numa_node;
3628 dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
3629 dd->tags.driver_data = dd;
3630 dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
3632 rv = blk_mq_alloc_tag_set(&dd->tags);
3634 dev_err(&dd->pdev->dev,
3640 dd->queue = blk_mq_init_queue(&dd->tags);
3641 if (IS_ERR(dd->queue)) {
3642 dev_err(&dd->pdev->dev,
3648 dd->disk->queue = dd->queue;
3649 dd->queue->queuedata = dd;
3653 wait_for_rebuild = mtip_hw_get_identify(dd);
3655 dev_err(&dd->pdev->dev,
3669 blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue);
3670 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue);
3671 blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
3672 blk_queue_physical_block_size(dd->queue, 4096);
3673 blk_queue_max_hw_sectors(dd->queue, 0xffff);
3674 blk_queue_max_segment_size(dd->queue, 0x400000);
3675 dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
3676 blk_queue_io_min(dd->queue, 4096);
3679 if (!(mtip_hw_get_capacity(dd, &capacity))) {
3680 dev_warn(&dd->pdev->dev,
3685 set_capacity(dd->disk, capacity);
3688 device_add_disk(&dd->pdev->dev, dd->disk, NULL);
3690 dd->bdev = bdget_disk(dd->disk, 0);
3695 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
3697 mtip_hw_sysfs_init(dd, kobj);
3701 if (dd->mtip_svc_handler) {
3702 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
3707 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
3708 dd, dd->numa_node,
3711 if (IS_ERR(dd->mtip_svc_handler)) {
3712 dev_err(&dd->pdev->dev, "service thread failed to start\n");
3713 dd->mtip_svc_handler = NULL;
3717 wake_up_process(dd->mtip_svc_handler);
3724 bdput(dd->bdev);
3725 dd->bdev = NULL;
3728 del_gendisk(dd->disk);
3732 blk_cleanup_queue(dd->queue);
3734 blk_mq_free_tag_set(&dd->tags);
3736 mtip_hw_debugfs_exit(dd);
3741 put_disk(dd->disk);
3744 mtip_hw_exit(dd); /* De-initialize the protocol layer. */
3764 * @dd Pointer to the driver data structure.
3769 static int mtip_block_remove(struct driver_data *dd)
3773 mtip_hw_debugfs_exit(dd);
3775 if (dd->mtip_svc_handler) {
3776 set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
3777 wake_up_interruptible(&dd->port->svc_wait);
3778 kthread_stop(dd->mtip_svc_handler);
3782 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
3783 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
3785 mtip_hw_sysfs_exit(dd, kobj);
3790 if (!dd->sr) {
3795 if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
3796 mtip_standby_drive(dd);
3799 dev_info(&dd->pdev->dev, "device %s surprise removal\n",
3800 dd->disk->disk_name);
3802 blk_freeze_queue_start(dd->queue);
3803 blk_mq_quiesce_queue(dd->queue);
3804 blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
3805 blk_mq_unquiesce_queue(dd->queue);
3811 if (dd->bdev) {
3812 bdput(dd->bdev);
3813 dd->bdev = NULL;
3815 if (dd->disk) {
3816 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
3817 del_gendisk(dd->disk);
3818 if (dd->disk->queue) {
3819 blk_cleanup_queue(dd->queue);
3820 blk_mq_free_tag_set(&dd->tags);
3821 dd->queue = NULL;
3823 put_disk(dd->disk);
3825 dd->disk = NULL;
3827 ida_free(&rssd_index_ida, dd->index);
3830 mtip_hw_exit(dd);
3842 * @dd Pointer to the driver data structure.
3847 static int mtip_block_shutdown(struct driver_data *dd)
3849 mtip_hw_shutdown(dd);
3852 if (dd->disk) {
3853 dev_info(&dd->pdev->dev,
3854 "Shutting down %s ...\n", dd->disk->disk_name);
3856 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
3857 del_gendisk(dd->disk);
3858 if (dd->disk->queue) {
3859 blk_cleanup_queue(dd->queue);
3860 blk_mq_free_tag_set(&dd->tags);
3862 put_disk(dd->disk);
3863 dd->disk = NULL;
3864 dd->queue = NULL;
3867 ida_free(&rssd_index_ida, dd->index);
3871 static int mtip_block_suspend(struct driver_data *dd)
3873 dev_info(&dd->pdev->dev,
3874 "Suspending %s ...\n", dd->disk->disk_name);
3875 mtip_hw_suspend(dd);
3879 static int mtip_block_resume(struct driver_data *dd)
3881 dev_info(&dd->pdev->dev, "Resuming %s ...\n",
3882 dd->disk->disk_name);
3883 mtip_hw_resume(dd);
3937 static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
3949 dev_info(&dd->pdev->dev,
3961 static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
3970 mtip_disable_link_opts(dd, pdev->bus->self);
3981 mtip_disable_link_opts(dd,
4001 struct driver_data *dd = NULL;
4021 dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
4022 if (dd == NULL) {
4029 pci_set_drvdata(pdev, dd);
4051 dd->major = mtip_major;
4052 dd->instance = instance;
4053 dd->pdev = pdev;
4054 dd->numa_node = my_node;
4056 INIT_LIST_HEAD(&dd->online_list);
4057 INIT_LIST_HEAD(&dd->remove_list);
4059 memset(dd->workq_name, 0, 32);
4060 snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
4062 dd->isr_workq = create_workqueue(dd->workq_name);
4063 if (!dd->isr_workq) {
4064 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
4071 node_mask = cpumask_of_node(dd->numa_node);
4080 dd->numa_node,
4082 nr_cpus_node(dd->numa_node),
4087 dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node);
4089 cpu_to_node(dd->isr_binding), dd->isr_binding);
4092 dd->work[0].cpu_binding = dd->isr_binding;
4093 dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
4094 dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
4095 dd->work[3].cpu_binding = dd->work[0].cpu_binding;
4096 dd->work[4].cpu_binding = dd->work[1].cpu_binding;
4097 dd->work[5].cpu_binding = dd->work[2].cpu_binding;
4098 dd->work[6].cpu_binding = dd->work[2].cpu_binding;
4099 dd->work[7].cpu_binding = dd->work[1].cpu_binding;
4105 if (dd->work[i].cpu_binding == cpu) {
4114 INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0);
4115 INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1);
4116 INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2);
4117 INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3);
4118 INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4);
4119 INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5);
4120 INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6);
4121 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
4131 mtip_fix_ero_nosnoop(dd, pdev);
4134 rv = mtip_block_initialize(dd);
4147 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
4153 list_add(&dd->online_list, &online_list);
4162 if (dd->isr_workq) {
4163 flush_workqueue(dd->isr_workq);
4164 destroy_workqueue(dd->isr_workq);
4165 drop_cpu(dd->work[0].cpu_binding);
4166 drop_cpu(dd->work[1].cpu_binding);
4167 drop_cpu(dd->work[2].cpu_binding);
4173 kfree(dd);
4189 struct driver_data *dd = pci_get_drvdata(pdev);
4192 set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
4195 list_del_init(&dd->online_list);
4196 list_add(&dd->remove_list, &removing_list);
4200 synchronize_irq(dd->pdev->irq);
4206 } while (atomic_read(&dd->irq_workers_active) != 0 &&
4209 if (!dd->sr)
4210 fsync_bdev(dd->bdev);
4212 if (atomic_read(&dd->irq_workers_active) != 0) {
4213 dev_warn(&dd->pdev->dev,
4217 blk_set_queue_dying(dd->queue);
4218 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
4221 mtip_block_remove(dd);
4223 if (dd->isr_workq) {
4224 flush_workqueue(dd->isr_workq);
4225 destroy_workqueue(dd->isr_workq);
4226 drop_cpu(dd->work[0].cpu_binding);
4227 drop_cpu(dd->work[1].cpu_binding);
4228 drop_cpu(dd->work[2].cpu_binding);
4234 list_del_init(&dd->remove_list);
4237 kfree(dd);
4253 struct driver_data *dd = pci_get_drvdata(pdev);
4255 if (!dd) {
4261 set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
4264 rv = mtip_block_suspend(dd);
4294 struct driver_data *dd;
4296 dd = pci_get_drvdata(pdev);
4297 if (!dd) {
4322 rv = mtip_block_resume(dd);
4327 clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
4340 struct driver_data *dd = pci_get_drvdata(pdev);
4341 if (dd)
4342 mtip_block_shutdown(dd);