Lines Matching refs:cbphy

107 	struct intel_combo_phy *cbphy = iphy->parent;
108 u32 mask = BIT(cbphy->phy_mode * 2 + iphy->id);
114 return regmap_update_bits(cbphy->hsiocfg, REG_CLK_DISABLE(cbphy->bid),
120 struct intel_combo_phy *cbphy = iphy->parent;
121 u32 mask = BIT(cbphy->id * 2 + iphy->id);
127 return regmap_update_bits(cbphy->syscfg, PAD_DIS_CFG, mask, val);
144 struct intel_combo_phy *cbphy = iphy->parent;
151 if (cbphy->aggr_mode != PHY_DL_MODE)
154 return phy_cfg(&cbphy->iphy[PHY_1]);
159 struct intel_combo_phy *cbphy = iphy->parent;
164 dev_err(cbphy->dev, "Failed to enable PCIe pad refclk\n");
168 if (cbphy->init_cnt)
171 combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
182 struct intel_combo_phy *cbphy = iphy->parent;
187 dev_err(cbphy->dev, "Failed to disable PCIe pad refclk\n");
191 if (cbphy->init_cnt)
194 combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
200 static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
203 enum aggregated_mode aggr = cbphy->aggr_mode;
204 struct device *dev = cbphy->dev;
208 mode = cbphy->phy_mode;
231 ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
238 static void intel_cbphy_rst_assert(struct intel_combo_phy *cbphy)
240 reset_control_assert(cbphy->core_rst);
241 reset_control_assert(cbphy->phy_rst);
244 static void intel_cbphy_rst_deassert(struct intel_combo_phy *cbphy)
246 reset_control_deassert(cbphy->core_rst);
247 reset_control_deassert(cbphy->phy_rst);
254 struct intel_combo_phy *cbphy = iphy->parent;
257 if (!cbphy->init_cnt) {
258 ret = clk_prepare_enable(cbphy->core_clk);
260 dev_err(cbphy->dev, "Clock enable failed!\n");
264 ret = clk_set_rate(cbphy->core_clk, cbphy->clk_rate);
266 dev_err(cbphy->dev, "Clock freq set to %lu failed!\n",
267 cbphy->clk_rate);
271 intel_cbphy_rst_assert(cbphy);
272 intel_cbphy_rst_deassert(cbphy);
273 ret = intel_cbphy_set_mode(cbphy);
280 dev_err(cbphy->dev, "Failed enabling PHY core\n");
286 dev_err(cbphy->dev, "PHY(%u:%u) reset deassert failed!\n",
297 clk_disable_unprepare(cbphy->core_clk);
304 struct intel_combo_phy *cbphy = iphy->parent;
309 dev_err(cbphy->dev, "PHY(%u:%u) reset assert failed!\n",
316 dev_err(cbphy->dev, "Failed disabling PHY core\n");
320 if (cbphy->init_cnt)
323 clk_disable_unprepare(cbphy->core_clk);
324 intel_cbphy_rst_assert(cbphy);
332 struct intel_combo_phy *cbphy = iphy->parent;
335 mutex_lock(&cbphy->lock);
340 if (cbphy->phy_mode == PHY_PCIE_MODE) {
346 cbphy->init_cnt++;
349 mutex_unlock(&cbphy->lock);
357 struct intel_combo_phy *cbphy = iphy->parent;
360 mutex_lock(&cbphy->lock);
361 cbphy->init_cnt--;
362 if (cbphy->phy_mode == PHY_PCIE_MODE) {
371 mutex_unlock(&cbphy->lock);
379 struct intel_combo_phy *cbphy = iphy->parent;
380 void __iomem *cr_base = cbphy->cr_base;
383 if (cbphy->phy_mode != PHY_XPCS_MODE)
395 dev_err(cbphy->dev, "RX Adaptation failed!\n");
397 dev_dbg(cbphy->dev, "RX Adaptation success!\n");
406 static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
408 struct device *dev = cbphy->dev;
415 cbphy->core_clk = devm_clk_get(dev, NULL);
416 if (IS_ERR(cbphy->core_clk))
417 return dev_err_probe(dev, PTR_ERR(cbphy->core_clk),
420 cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
421 if (IS_ERR(cbphy->core_rst))
422 return dev_err_probe(dev, PTR_ERR(cbphy->core_rst),
425 cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
426 if (IS_ERR(cbphy->phy_rst))
427 return dev_err_probe(dev, PTR_ERR(cbphy->phy_rst),
430 cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
431 if (IS_ERR(cbphy->iphy[0].app_rst))
432 return dev_err_probe(dev, PTR_ERR(cbphy->iphy[0].app_rst),
435 cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
436 if (IS_ERR(cbphy->iphy[1].app_rst))
437 return dev_err_probe(dev, PTR_ERR(cbphy->iphy[1].app_rst),
440 cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
441 if (IS_ERR(cbphy->app_base))
442 return PTR_ERR(cbphy->app_base);
444 cbphy->cr_base = devm_platform_ioremap_resource_byname(pdev, "core");
445 if (IS_ERR(cbphy->cr_base))
446 return PTR_ERR(cbphy->cr_base);
458 cbphy->id = ref.args[0];
459 cbphy->syscfg = device_node_to_regmap(to_of_node(ref.fwnode));
467 cbphy->bid = ref.args[0];
468 cbphy->hsiocfg = device_node_to_regmap(to_of_node(ref.fwnode));
477 cbphy->phy_mode = PHY_PCIE_MODE;
481 cbphy->phy_mode = PHY_SATA_MODE;
485 cbphy->phy_mode = PHY_XPCS_MODE;
493 cbphy->clk_rate = intel_iphy_clk_rates[cbphy->phy_mode];
496 cbphy->aggr_mode = PHY_DL_MODE;
498 cbphy->aggr_mode = PHY_SL_MODE;
513 struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
527 if (cbphy->aggr_mode == PHY_DL_MODE && iphy_id == PHY_1) {
532 return cbphy->iphy[iphy_id].phy;
535 static int intel_cbphy_create(struct intel_combo_phy *cbphy)
538 struct device *dev = cbphy->dev;
543 iphy = &cbphy->iphy[i];
544 iphy->parent = cbphy;
548 if (cbphy->aggr_mode == PHY_DL_MODE && iphy->id == PHY_1)
562 dev_set_drvdata(dev, cbphy);
573 struct intel_combo_phy *cbphy;
576 cbphy = devm_kzalloc(dev, sizeof(*cbphy), GFP_KERNEL);
577 if (!cbphy)
580 cbphy->dev = dev;
581 cbphy->init_cnt = 0;
582 mutex_init(&cbphy->lock);
583 ret = intel_cbphy_fwnode_parse(cbphy);
587 platform_set_drvdata(pdev, cbphy);
589 return intel_cbphy_create(cbphy);
594 struct intel_combo_phy *cbphy = platform_get_drvdata(pdev);
596 intel_cbphy_rst_assert(cbphy);
597 clk_disable_unprepare(cbphy->core_clk);