Lines Matching defs:mgbe

11 	"rx-pcs", "tx", "tx-pcs", "mac-divider", "mac", "mgbe", "ptp-ref", "mac"
57 struct tegra_mgbe *mgbe = get_stmmac_bsp_priv(dev);
64 clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
66 return reset_control_assert(mgbe->rst_mac);
71 struct tegra_mgbe *mgbe = get_stmmac_bsp_priv(dev);
75 err = clk_bulk_prepare_enable(ARRAY_SIZE(mgbe_clks), mgbe->clks);
79 err = reset_control_deassert(mgbe->rst_mac);
84 writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
87 writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
89 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
91 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
93 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
96 err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL, value,
100 dev_err(mgbe->dev, "timeout waiting for TX lane to become enabled\n");
101 clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
107 clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
114 struct tegra_mgbe *mgbe = (struct tegra_mgbe *)mgbe_data;
118 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
120 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
122 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
124 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
126 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
128 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
130 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
132 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
134 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
136 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
138 err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL, value,
142 dev_err(mgbe->dev, "timeout waiting for RX calibration to become enabled\n");
146 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
148 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
150 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
152 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
154 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
156 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
158 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
160 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
162 err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value,
166 dev_err(mgbe->dev, "timeout waiting for link to become ready\n");
171 writel(value, mgbe->xpcs + XPCS_WRAP_IRQ_STATUS);
178 struct tegra_mgbe *mgbe = (struct tegra_mgbe *)mgbe_data;
181 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
183 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
185 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
187 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
189 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
191 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
193 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
195 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
197 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
199 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
206 struct tegra_mgbe *mgbe;
210 mgbe = devm_kzalloc(&pdev->dev, sizeof(*mgbe), GFP_KERNEL);
211 if (!mgbe)
214 mgbe->dev = &pdev->dev;
222 mgbe->hv = devm_platform_ioremap_resource_byname(pdev, "hypervisor");
223 if (IS_ERR(mgbe->hv))
224 return PTR_ERR(mgbe->hv);
226 mgbe->regs = devm_platform_ioremap_resource_byname(pdev, "mac");
227 if (IS_ERR(mgbe->regs))
228 return PTR_ERR(mgbe->regs);
230 mgbe->xpcs = devm_platform_ioremap_resource_byname(pdev, "xpcs");
231 if (IS_ERR(mgbe->xpcs))
232 return PTR_ERR(mgbe->xpcs);
234 res.addr = mgbe->regs;
237 mgbe->clks = devm_kcalloc(&pdev->dev, ARRAY_SIZE(mgbe_clks),
238 sizeof(*mgbe->clks), GFP_KERNEL);
239 if (!mgbe->clks)
243 mgbe->clks[i].id = mgbe_clks[i];
245 err = devm_clk_bulk_get(mgbe->dev, ARRAY_SIZE(mgbe_clks), mgbe->clks);
249 err = clk_bulk_prepare_enable(ARRAY_SIZE(mgbe_clks), mgbe->clks);
254 mgbe->rst_mac = devm_reset_control_get(&pdev->dev, "mac");
255 if (IS_ERR(mgbe->rst_mac)) {
256 err = PTR_ERR(mgbe->rst_mac);
260 err = reset_control_assert(mgbe->rst_mac);
266 err = reset_control_deassert(mgbe->rst_mac);
271 mgbe->rst_pcs = devm_reset_control_get(&pdev->dev, "pcs");
272 if (IS_ERR(mgbe->rst_pcs)) {
273 err = PTR_ERR(mgbe->rst_pcs);
277 err = reset_control_assert(mgbe->rst_pcs);
283 err = reset_control_deassert(mgbe->rst_pcs);
296 plat->bsp_priv = mgbe;
312 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_STATUS);
314 value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
316 writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL);
319 err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_UPHY_HW_INIT_CTRL, value,
323 dev_err(mgbe->dev, "timeout waiting for TX lane to become enabled\n");
336 writel(MAC_SBD_INTR, mgbe->regs + MGBE_WRAP_COMMON_INTR_ENABLE);
339 writel(MGBE_SID, mgbe->hv + MGBE_WRAP_AXI_ASID0_CTRL);
352 clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
359 struct tegra_mgbe *mgbe = get_stmmac_bsp_priv(&pdev->dev);
361 clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
367 { .compatible = "nvidia,tegra234-mgbe", },
378 .name = "tegra-mgbe",