Lines Matching refs:res
239 union qcom_pcie_resources res;
301 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
307 res->supplies[0].supply = "vdda";
308 res->supplies[1].supply = "vdda_phy";
309 res->supplies[2].supply = "vdda_refclk";
310 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
311 res->supplies);
315 res->clks[0].id = "iface";
316 res->clks[1].id = "core";
317 res->clks[2].id = "phy";
318 res->clks[3].id = "aux";
319 res->clks[4].id = "ref";
322 ret = devm_clk_bulk_get(dev, 3, res->clks);
327 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
331 res->resets[0].id = "pci";
332 res->resets[1].id = "axi";
333 res->resets[2].id = "ahb";
334 res->resets[3].id = "por";
335 res->resets[4].id = "phy";
336 res->resets[5].id = "ext";
339 res->num_resets = is_apq ? 5 : 6;
340 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
349 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
351 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
352 reset_control_bulk_assert(res->num_resets, res->resets);
356 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
361 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
367 ret = reset_control_bulk_assert(res->num_resets, res->resets);
373 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
379 ret = reset_control_bulk_deassert(res->num_resets, res->resets);
382 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
391 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
403 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
451 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
456 res->vdda = devm_regulator_get(dev, "vdda");
457 if (IS_ERR(res->vdda))
458 return PTR_ERR(res->vdda);
460 res->clks[0].id = "iface";
461 res->clks[1].id = "aux";
462 res->clks[2].id = "master_bus";
463 res->clks[3].id = "slave_bus";
465 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
469 res->core = devm_reset_control_get_exclusive(dev, "core");
470 return PTR_ERR_OR_ZERO(res->core);
475 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
477 reset_control_assert(res->core);
478 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
479 regulator_disable(res->vdda);
484 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
489 ret = reset_control_deassert(res->core);
495 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
501 ret = regulator_enable(res->vdda);
510 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
512 reset_control_assert(res->core);
546 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
551 res->supplies[0].supply = "vdda";
552 res->supplies[1].supply = "vddpe-3v3";
553 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
554 res->supplies);
558 res->clks[0].id = "aux";
559 res->clks[1].id = "cfg";
560 res->clks[2].id = "bus_master";
561 res->clks[3].id = "bus_slave";
563 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
572 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
574 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
575 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
580 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
585 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
591 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
594 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
633 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
639 res->clks[0].id = "aux";
640 res->clks[1].id = "master_bus";
641 res->clks[2].id = "slave_bus";
642 res->clks[3].id = "iface";
645 res->num_clks = is_ipq ? 3 : 4;
647 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
651 res->resets[0].id = "axi_m";
652 res->resets[1].id = "axi_s";
653 res->resets[2].id = "axi_m_sticky";
654 res->resets[3].id = "pipe_sticky";
655 res->resets[4].id = "pwr";
656 res->resets[5].id = "ahb";
657 res->resets[6].id = "pipe";
658 res->resets[7].id = "axi_m_vmid";
659 res->resets[8].id = "axi_s_xpu";
660 res->resets[9].id = "parf";
661 res->resets[10].id = "phy";
662 res->resets[11].id = "phy_ahb";
664 res->num_resets = is_ipq ? 12 : 6;
666 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
675 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
677 reset_control_bulk_assert(res->num_resets, res->resets);
678 clk_bulk_disable_unprepare(res->num_clks, res->clks);
683 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
688 ret = reset_control_bulk_assert(res->num_resets, res->resets);
696 ret = reset_control_bulk_deassert(res->num_resets, res->resets);
704 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
706 reset_control_bulk_assert(res->num_resets, res->resets);
715 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
720 res->clks[0].id = "iface";
721 res->clks[1].id = "axi_m";
722 res->clks[2].id = "axi_s";
723 res->clks[3].id = "ahb";
724 res->clks[4].id = "aux";
726 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
730 res->rst[0].id = "axi_m";
731 res->rst[1].id = "axi_s";
732 res->rst[2].id = "pipe";
733 res->rst[3].id = "axi_m_sticky";
734 res->rst[4].id = "sticky";
735 res->rst[5].id = "ahb";
736 res->rst[6].id = "sleep";
738 ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst);
747 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
749 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
754 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
759 ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
767 ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst);
779 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
792 reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
837 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
844 res->rst = devm_reset_control_array_get_exclusive(dev);
845 if (IS_ERR(res->rst))
846 return PTR_ERR(res->rst);
848 res->supplies[0].supply = "vdda";
849 res->supplies[1].supply = "vddpe-3v3";
850 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
851 res->supplies);
856 res->clks[idx++].id = "aux";
857 res->clks[idx++].id = "cfg";
858 res->clks[idx++].id = "bus_master";
859 res->clks[idx++].id = "bus_slave";
860 res->clks[idx++].id = "slave_q2a";
864 ret = devm_clk_bulk_get(dev, num_clks, res->clks);
868 res->clks[idx++].id = "tbu";
869 res->clks[idx++].id = "ddrss_sf_tbu";
870 res->clks[idx++].id = "aggre0";
871 res->clks[idx++].id = "aggre1";
872 res->clks[idx++].id = "noc_aggr";
873 res->clks[idx++].id = "noc_aggr_4";
874 res->clks[idx++].id = "noc_aggr_south_sf";
875 res->clks[idx++].id = "cnoc_qx";
876 res->clks[idx++].id = "sleep";
877 res->clks[idx++].id = "cnoc_sf_axi";
880 res->num_clks = idx;
882 ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
891 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
897 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
903 ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
907 ret = reset_control_assert(res->rst);
915 ret = reset_control_deassert(res->rst);
955 clk_bulk_disable_unprepare(res->num_clks, res->clks);
957 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
971 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
973 clk_bulk_disable_unprepare(res->num_clks, res->clks);
975 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1056 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1061 res->clks[0].id = "iface";
1062 res->clks[1].id = "axi_m";
1063 res->clks[2].id = "axi_s";
1064 res->clks[3].id = "axi_bridge";
1065 res->clks[4].id = "rchng";
1067 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
1071 res->rst = devm_reset_control_array_get_exclusive(dev);
1072 if (IS_ERR(res->rst))
1073 return PTR_ERR(res->rst);
1080 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1082 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
1087 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1091 ret = reset_control_assert(res->rst);
1103 ret = reset_control_deassert(res->rst);
1111 return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
1453 struct resource *res;
1503 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
1504 if (res) {
1505 pcie->mhi = devm_ioremap_resource(dev, res);