Lines Matching refs:hpriv
438 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
439 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
440 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
441 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
442 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
577 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
579 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
580 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
584 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
604 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
606 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
607 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
611 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
614 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
616 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
617 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
621 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
622 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
624 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
628 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
631 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
634 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
922 struct mv_host_priv *hpriv = host->private_data;
923 return hpriv->base;
993 struct mv_host_priv *hpriv,
1023 static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1037 writelfl(mask, hpriv->main_irq_mask_addr);
1043 struct mv_host_priv *hpriv = host->private_data;
1046 old_mask = hpriv->main_irq_mask;
1049 hpriv->main_irq_mask = new_mask;
1050 mv_write_main_irq_mask(new_mask, hpriv);
1071 struct mv_host_priv *hpriv = ap->host->private_data;
1085 if (IS_GEN_IIE(hpriv))
1094 struct mv_host_priv *hpriv = host->private_data;
1095 void __iomem *mmio = hpriv->base, *hc_mmio;
1098 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1117 if (is_dual_hc && !IS_GEN_I(hpriv)) {
1174 struct mv_host_priv *hpriv = ap->host->private_data;
1178 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1358 struct mv_host_priv *hpriv = link->ap->host->private_data;
1376 if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
1505 struct mv_host_priv *hpriv = ap->host->private_data;
1509 old = readl(hpriv->base + GPIO_PORT_CTL);
1515 writel(new, hpriv->base + GPIO_PORT_CTL);
1559 struct mv_host_priv *hpriv = host->private_data;
1563 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1565 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1574 struct mv_host_priv *hpriv = host->private_data;
1579 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1583 for (port = 0; port < hpriv->n_ports; port++) {
1591 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1601 struct mv_host_priv *hpriv = ap->host->private_data;
1609 if (IS_GEN_I(hpriv))
1612 else if (IS_GEN_II(hpriv)) {
1616 } else if (IS_GEN_IIE(hpriv)) {
1638 if (!IS_SOC(hpriv))
1641 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1645 if (IS_SOC(hpriv)) {
1663 struct mv_host_priv *hpriv = ap->host->private_data;
1668 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1672 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1681 if (tag == 0 || !IS_GEN_I(hpriv))
1682 dma_pool_free(hpriv->sg_tbl_pool,
1703 struct mv_host_priv *hpriv = ap->host->private_data;
1713 pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1717 pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1722 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1729 if (tag == 0 || !IS_GEN_I(hpriv)) {
1730 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
2400 struct mv_host_priv *hpriv = ap->host->private_data;
2412 if (IS_GEN_II(hpriv))
2639 struct mv_host_priv *hpriv = ap->host->private_data;
2654 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2674 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2711 if (IS_GEN_I(hpriv)) {
2799 struct mv_host_priv *hpriv = ap->host->private_data;
2816 if (IS_GEN_I(hpriv)) {
2882 struct mv_host_priv *hpriv = host->private_data;
2883 void __iomem *mmio = hpriv->base, *hc_mmio;
2890 for (port = 0; port < hpriv->n_ports; port++) {
2925 if ((port + p) >= hpriv->n_ports)
2947 struct mv_host_priv *hpriv = host->private_data;
2954 err_cause = readl(mmio + hpriv->irq_cause_offset);
2961 writelfl(0, mmio + hpriv->irq_cause_offset);
3002 struct mv_host_priv *hpriv = host->private_data;
3004 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
3011 mv_write_main_irq_mask(0, hpriv);
3013 main_irq_cause = readl(hpriv->main_irq_cause_addr);
3014 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
3020 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
3021 handled = mv_pci_error(host, hpriv->base);
3028 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3054 struct mv_host_priv *hpriv = link->ap->host->private_data;
3055 void __iomem *mmio = hpriv->base;
3068 struct mv_host_priv *hpriv = link->ap->host->private_data;
3069 void __iomem *mmio = hpriv->base;
3096 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3101 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3109 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
3110 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
3113 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3126 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3132 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3147 tmp |= hpriv->signal[port].pre;
3148 tmp |= hpriv->signal[port].amps;
3155 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3160 mv_reset_channel(hpriv, mmio, port);
3179 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3200 struct mv_host_priv *hpriv = host->private_data;
3205 mv5_reset_hc_port(hpriv, mmio,
3208 mv5_reset_one_hc(hpriv, mmio, hc);
3218 struct mv_host_priv *hpriv = host->private_data;
3229 ZERO(hpriv->irq_cause_offset);
3230 ZERO(hpriv->irq_mask_offset);
3238 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3242 mv5_reset_flash(hpriv, mmio);
3314 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3322 hpriv->signal[idx].amps = 0x7 << 8;
3323 hpriv->signal[idx].pre = 0x1 << 5;
3330 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3331 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3334 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3339 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3344 u32 hp_flags = hpriv->hp_flags;
3374 if (IS_SOC(hpriv))
3384 if (IS_GEN_IIE(hpriv))
3402 m2 |= hpriv->signal[port].amps;
3403 m2 |= hpriv->signal[port].pre;
3407 if (IS_GEN_IIE(hpriv)) {
3417 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3423 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3432 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3433 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3438 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3443 mv_reset_channel(hpriv, mmio, port);
3463 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3479 struct mv_host_priv *hpriv = host->private_data;
3482 for (port = 0; port < hpriv->n_ports; port++)
3483 mv_soc_reset_hc_port(hpriv, mmio, port);
3485 mv_soc_reset_one_hc(hpriv, mmio);
3490 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3501 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3539 static bool soc_is_65n(struct mv_host_priv *hpriv)
3541 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3558 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3571 if (!IS_GEN_I(hpriv)) {
3584 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3586 if (IS_GEN_I(hpriv))
3622 struct mv_host_priv *hpriv = ap->host->private_data;
3624 void __iomem *mmio = hpriv->base;
3629 mv_reset_channel(hpriv, mmio, ap->port_no);
3645 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3666 struct mv_host_priv *hpriv = ap->host->private_data;
3669 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3725 struct mv_host_priv *hpriv = host->private_data;
3726 void __iomem *mmio = hpriv->base;
3729 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3739 struct mv_host_priv *hpriv = host->private_data;
3740 void __iomem *mmio = hpriv->base;
3753 struct mv_host_priv *hpriv = host->private_data;
3754 void __iomem *mmio = hpriv->base;
3766 struct mv_host_priv *hpriv = host->private_data;
3767 u32 hp_flags = hpriv->hp_flags;
3771 hpriv->ops = &mv5xxx_ops;
3791 hpriv->ops = &mv5xxx_ops;
3811 hpriv->ops = &mv6xxx_ops;
3863 hpriv->ops = &mv6xxx_ops;
3880 if (soc_is_65n(hpriv))
3881 hpriv->ops = &mv_soc_65n_ops;
3883 hpriv->ops = &mv_soc_ops;
3893 hpriv->hp_flags = hp_flags;
3895 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3896 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
3897 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3899 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3900 hpriv->irq_mask_offset = PCI_IRQ_MASK;
3901 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3920 struct mv_host_priv *hpriv = host->private_data;
3921 void __iomem *mmio = hpriv->base;
3923 rc = mv_chip_id(host, hpriv->board_idx);
3927 if (IS_SOC(hpriv)) {
3928 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3929 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
3931 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3932 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
3936 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3944 if (hpriv->ops->read_preamp)
3945 hpriv->ops->read_preamp(hpriv, port, mmio);
3947 rc = hpriv->ops->reset_hc(host, mmio, n_hc);
3951 hpriv->ops->reset_flash(hpriv, mmio);
3952 hpriv->ops->reset_bus(host, mmio);
3953 hpriv->ops->enable_leds(hpriv, mmio);
3974 if (!IS_SOC(hpriv)) {
3976 writelfl(0, mmio + hpriv->irq_cause_offset);
3979 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3993 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3995 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3997 if (!hpriv->crqb_pool)
4000 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
4002 if (!hpriv->crpb_pool)
4005 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
4007 if (!hpriv->sg_tbl_pool)
4013 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
4019 writel(0, hpriv->base + WINDOW_CTRL(i));
4020 writel(0, hpriv->base + WINDOW_BASE(i));
4029 hpriv->base + WINDOW_CTRL(i));
4030 writel(cs->base, hpriv->base + WINDOW_BASE(i));
4049 struct mv_host_priv *hpriv;
4100 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4102 if (!host || !hpriv)
4104 hpriv->port_clks = devm_kcalloc(&pdev->dev,
4107 if (!hpriv->port_clks)
4109 hpriv->port_phys = devm_kcalloc(&pdev->dev,
4112 if (!hpriv->port_phys)
4114 host->private_data = hpriv;
4115 hpriv->board_idx = chip_soc;
4118 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4120 if (!hpriv->base)
4123 hpriv->base -= SATAHC0_REG_BASE;
4125 hpriv->clk = clk_get(&pdev->dev, NULL);
4126 if (IS_ERR(hpriv->clk))
4129 clk_prepare_enable(hpriv->clk);
4134 hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4135 if (!IS_ERR(hpriv->port_clks[port]))
4136 clk_prepare_enable(hpriv->port_clks[port]);
4139 hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
4141 if (IS_ERR(hpriv->port_phys[port])) {
4142 rc = PTR_ERR(hpriv->port_phys[port]);
4143 hpriv->port_phys[port] = NULL;
4148 hpriv->n_ports = port;
4151 phy_power_on(hpriv->port_phys[port]);
4155 hpriv->n_ports = n_ports;
4162 mv_conf_mbus_windows(hpriv, dram);
4164 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4175 hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
4190 if (!IS_ERR(hpriv->clk)) {
4191 clk_disable_unprepare(hpriv->clk);
4192 clk_put(hpriv->clk);
4194 for (port = 0; port < hpriv->n_ports; port++) {
4195 if (!IS_ERR(hpriv->port_clks[port])) {
4196 clk_disable_unprepare(hpriv->port_clks[port]);
4197 clk_put(hpriv->port_clks[port]);
4199 phy_power_off(hpriv->port_phys[port]);
4216 struct mv_host_priv *hpriv = host->private_data;
4220 if (!IS_ERR(hpriv->clk)) {
4221 clk_disable_unprepare(hpriv->clk);
4222 clk_put(hpriv->clk);
4225 if (!IS_ERR(hpriv->port_clks[port])) {
4226 clk_disable_unprepare(hpriv->port_clks[port]);
4227 clk_put(hpriv->port_clks[port]);
4229 phy_power_off(hpriv->port_phys[port]);
4250 struct mv_host_priv *hpriv = host->private_data;
4257 mv_conf_mbus_windows(hpriv, dram);
4328 struct mv_host_priv *hpriv = host->private_data;
4343 if (IS_GEN_I(hpriv))
4345 else if (IS_GEN_II(hpriv))
4347 else if (IS_GEN_IIE(hpriv))
4354 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4371 struct mv_host_priv *hpriv;
4380 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4381 if (!host || !hpriv)
4383 host->private_data = hpriv;
4384 hpriv->n_ports = n_ports;
4385 hpriv->board_idx = board_idx;
4398 hpriv->base = host->iomap[MV_PRIMARY_BAR];
4406 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4412 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4413 unsigned int offset = port_mmio - hpriv->base;
4426 hpriv->hp_flags |= MV_HP_FLAG_MSI;
4434 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);