diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-24 22:11:28 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-24 22:11:28 +0200 |
commit | c33ffdb70cc6df4105160f991288e7d2567d7ffa (patch) | |
tree | 08a60884121a7096638f4687c655f61738d12c73 /drivers/phy | |
parent | Merge tag 'soundwire-6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/g... (diff) | |
parent | phy: airoha: Add dtime and Rx AEQ IO registers (diff) | |
download | linux-c33ffdb70cc6df4105160f991288e7d2567d7ffa.tar.xz linux-c33ffdb70cc6df4105160f991288e7d2567d7ffa.zip |
Merge tag 'phy-for-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy
Pull phy updates from Vinod Koul:
"New Support
- Samsung Exynos gs101 drd combo phy
- Qualcomm SC8180x USB uniphy, IPQ9574 QMP PCIe phy
- Airoha EN7581 PCIe phy
- Freescale i.MX8Q HSIO SerDes phy
- Starfive jh7110 dphy tx
Updates:
- Resume support for j721e-wiz driver
- Updates to Exynos usbdrd driver
- Support for optional power domains in g12a usb2-phy driver
- Debugfs support and updates to zynqmp driver"
* tag 'phy-for-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy: (56 commits)
phy: airoha: Add dtime and Rx AEQ IO registers
dt-bindings: phy: airoha: Add dtime and Rx AEQ IO registers
dt-bindings: phy: rockchip-emmc-phy: Convert to dtschema
dt-bindings: phy: qcom,qmp-usb: fix spelling error
phy: exynos5-usbdrd: support Exynos USBDRD 3.1 combo phy (HS & SS)
phy: exynos5-usbdrd: convert Vbus supplies to regulator_bulk
phy: exynos5-usbdrd: convert (phy) register access clock to clk_bulk
phy: exynos5-usbdrd: convert core clocks to clk_bulk
phy: exynos5-usbdrd: support isolating HS and SS ports independently
dt-bindings: phy: samsung,usb3-drd-phy: add gs101 compatible
phy: core: Fix documentation of of_phy_get
phy: starfive: Correct the dphy configure process
phy: zynqmp: Add debugfs support
phy: zynqmp: Take the phy mutex in xlate
phy: zynqmp: Only wait for PLL lock "primary" instances
phy: zynqmp: Store instance instead of type
phy: zynqmp: Enable reference clock correctly
phy: cadence-torrent: Check return value on register read
phy: Fix the cacography in phy-exynos5250-usb2.c
phy: phy-rockchip-samsung-hdptx: Select CONFIG_MFD_SYSCON
...
Diffstat (limited to 'drivers/phy')
26 files changed, 4435 insertions, 397 deletions
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 787354b849c7..dfab1c66b3e5 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -72,6 +72,16 @@ config PHY_CAN_TRANSCEIVER functional modes using gpios and sets the attribute max link rate, for CAN drivers. +config PHY_AIROHA_PCIE + tristate "Airoha PCIe-PHY Driver" + depends on ARCH_AIROHA || COMPILE_TEST + depends on OF + select GENERIC_PHY + help + Say Y here to add support for Airoha PCIe PHY driver. + This driver create the basic PHY instance and provides initialize + callback for PCIe GEN3 port. + source "drivers/phy/allwinner/Kconfig" source "drivers/phy/amlogic/Kconfig" source "drivers/phy/broadcom/Kconfig" diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index 868a220ed0f6..5fcbce5f9ab1 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o obj-$(CONFIG_PHY_XGENE) += phy-xgene.o obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o obj-$(CONFIG_USB_LGM_PHY) += phy-lgm-usb.o +obj-$(CONFIG_PHY_AIROHA_PCIE) += phy-airoha-pcie.o obj-y += allwinner/ \ amlogic/ \ broadcom/ \ diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb2.c b/drivers/phy/broadcom/phy-bcm-ns-usb2.c index 269564bdf687..5213c75b6da6 100644 --- a/drivers/phy/broadcom/phy-bcm-ns-usb2.c +++ b/drivers/phy/broadcom/phy-bcm-ns-usb2.c @@ -162,4 +162,5 @@ static struct platform_driver bcm_ns_usb2_driver = { }; module_platform_driver(bcm_ns_usb2_driver); +MODULE_DESCRIPTION("Broadcom Northstar USB 2.0 PHY Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c index 2c8b1b7dda5b..9f995e156f75 100644 --- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c +++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c @@ -240,5 +240,6 @@ static struct mdio_driver bcm_ns_usb3_mdio_driver = { mdio_module_driver(bcm_ns_usb3_mdio_driver); +MODULE_DESCRIPTION("Broadcom Northstar USB 3.0 PHY Driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, bcm_ns_usb3_id_table); diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c index 95924a09960c..56ce82a47f88 100644 --- a/drivers/phy/cadence/phy-cadence-torrent.c +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -360,6 +360,7 @@ struct cdns_torrent_phy { enum cdns_torrent_ref_clk ref_clk1_rate; struct cdns_torrent_inst phys[MAX_NUM_LANES]; int nsubnodes; + int already_configured; const struct cdns_torrent_data *init_data; struct regmap *regmap_common_cdb; struct regmap *regmap_phy_pcs_common_cdb; @@ -1156,6 +1157,9 @@ static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy, ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK, read_val, (read_val & mask) == value, 0, POLL_TIMEOUT_US); + if (ret) + return ret; + cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000); ndelay(100); @@ -1594,6 +1598,9 @@ static int cdns_torrent_dp_configure(struct phy *phy, struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); int ret; + if (cdns_phy->already_configured) + return 0; + ret = cdns_torrent_dp_verify_config(inst, &opts->dp); if (ret) { dev_err(&phy->dev, "invalid params for phy configure\n"); @@ -1629,6 +1636,12 @@ static int cdns_torrent_phy_on(struct phy *phy) u32 read_val; int ret; + if (cdns_phy->already_configured) { + /* Give 5ms to 10ms delay for the PIPE clock to be stable */ + usleep_range(5000, 10000); + return 0; + } + if (cdns_phy->nsubnodes == 1) { /* Take the PHY lane group out of reset */ reset_control_deassert(inst->lnk_rst); @@ -2307,6 +2320,9 @@ static int cdns_torrent_phy_init(struct phy *phy) u32 num_regs; int i, j; + if (cdns_phy->already_configured) + return 0; + if (cdns_phy->nsubnodes > 1) { if (phy_type == TYPE_DP) return cdns_torrent_dp_multilink_init(cdns_phy, inst, phy); @@ -2444,19 +2460,6 @@ static const struct phy_ops cdns_torrent_phy_ops = { .owner = THIS_MODULE, }; -static int cdns_torrent_noop_phy_on(struct phy *phy) -{ - /* Give 5ms to 10ms delay for the PIPE clock to be stable */ - usleep_range(5000, 10000); - - return 0; -} - -static const struct phy_ops noop_ops = { - .power_on = cdns_torrent_noop_phy_on, - .owner = THIS_MODULE, -}; - static int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) { @@ -2678,7 +2681,7 @@ static int cdns_torrent_clk_register(struct cdns_torrent_phy *cdns_phy) return 0; } -static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy) +static int cdns_torrent_of_get_reset(struct cdns_torrent_phy *cdns_phy) { struct device *dev = cdns_phy->dev; @@ -2699,20 +2702,29 @@ static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy) return 0; } +static int cdns_torrent_of_get_clk(struct cdns_torrent_phy *cdns_phy) +{ + /* refclk: Input reference clock for PLL0 */ + cdns_phy->clk = devm_clk_get(cdns_phy->dev, "refclk"); + if (IS_ERR(cdns_phy->clk)) + return dev_err_probe(cdns_phy->dev, PTR_ERR(cdns_phy->clk), + "phy ref clock not found\n"); + + /* refclk1: Input reference clock for PLL1 */ + cdns_phy->clk1 = devm_clk_get_optional(cdns_phy->dev, "pll1_refclk"); + if (IS_ERR(cdns_phy->clk1)) + return dev_err_probe(cdns_phy->dev, PTR_ERR(cdns_phy->clk1), + "phy PLL1 ref clock not found\n"); + + return 0; +} + static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy) { - struct device *dev = cdns_phy->dev; unsigned long ref_clk1_rate; unsigned long ref_clk_rate; int ret; - /* refclk: Input reference clock for PLL0 */ - cdns_phy->clk = devm_clk_get(dev, "refclk"); - if (IS_ERR(cdns_phy->clk)) { - dev_err(dev, "phy ref clock not found\n"); - return PTR_ERR(cdns_phy->clk); - } - ret = clk_prepare_enable(cdns_phy->clk); if (ret) { dev_err(cdns_phy->dev, "Failed to prepare ref clock: %d\n", ret); @@ -2745,14 +2757,6 @@ static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy) goto disable_clk; } - /* refclk1: Input reference clock for PLL1 */ - cdns_phy->clk1 = devm_clk_get_optional(dev, "pll1_refclk"); - if (IS_ERR(cdns_phy->clk1)) { - dev_err(dev, "phy PLL1 ref clock not found\n"); - ret = PTR_ERR(cdns_phy->clk1); - goto disable_clk; - } - if (cdns_phy->clk1) { ret = clk_prepare_enable(cdns_phy->clk1); if (ret) { @@ -2807,7 +2811,6 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) struct device_node *child; int ret, subnodes, node = 0, i; u32 total_num_lanes = 0; - int already_configured; u8 init_dp_regmap = 0; u32 phy_type; @@ -2846,13 +2849,17 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) if (ret) return ret; - regmap_field_read(cdns_phy->phy_pma_cmn_ctrl_1, &already_configured); + ret = cdns_torrent_of_get_reset(cdns_phy); + if (ret) + goto clk_cleanup; - if (!already_configured) { - ret = cdns_torrent_reset(cdns_phy); - if (ret) - goto clk_cleanup; + ret = cdns_torrent_of_get_clk(cdns_phy); + if (ret) + goto clk_cleanup; + + regmap_field_read(cdns_phy->phy_pma_cmn_ctrl_1, &cdns_phy->already_configured); + if (!cdns_phy->already_configured) { ret = cdns_torrent_clk(cdns_phy); if (ret) goto clk_cleanup; @@ -2932,10 +2939,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) of_property_read_u32(child, "cdns,ssc-mode", &cdns_phy->phys[node].ssc_mode); - if (!already_configured) - gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops); - else - gphy = devm_phy_create(dev, child, &noop_ops); + gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops); if (IS_ERR(gphy)) { ret = PTR_ERR(gphy); goto put_child; @@ -3018,7 +3022,7 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) goto put_lnk_rst; } - if (cdns_phy->nsubnodes > 1 && !already_configured) { + if (cdns_phy->nsubnodes > 1 && !cdns_phy->already_configured) { ret = cdns_torrent_phy_configure_multilink(cdns_phy); if (ret) goto put_lnk_rst; @@ -3074,6 +3078,82 @@ static void cdns_torrent_phy_remove(struct platform_device *pdev) cdns_torrent_clk_cleanup(cdns_phy); } +/* SGMII and QSGMII link configuration */ +static struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = { + {0x0002, PHY_PLL_CFG} +}; + +static struct cdns_reg_pairs sgmii_qsgmii_xcvr_diag_ln_regs[] = { + {0x0003, XCVR_DIAG_HSCLK_DIV}, + {0x0113, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals sgmii_qsgmii_link_cmn_vals = { + .reg_pairs = sgmii_qsgmii_link_cmn_regs, + .num_regs = ARRAY_SIZE(sgmii_qsgmii_link_cmn_regs), +}; + +static struct cdns_torrent_vals sgmii_qsgmii_xcvr_diag_ln_vals = { + .reg_pairs = sgmii_qsgmii_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(sgmii_qsgmii_xcvr_diag_ln_regs), +}; + +static int cdns_torrent_phy_suspend_noirq(struct device *dev) +{ + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(dev); + int i; + + reset_control_assert(cdns_phy->phy_rst); + reset_control_assert(cdns_phy->apb_rst); + for (i = 0; i < cdns_phy->nsubnodes; i++) + reset_control_assert(cdns_phy->phys[i].lnk_rst); + + if (cdns_phy->already_configured) + cdns_phy->already_configured = 0; + else { + clk_disable_unprepare(cdns_phy->clk1); + clk_disable_unprepare(cdns_phy->clk); + } + + return 0; +} + +static int cdns_torrent_phy_resume_noirq(struct device *dev) +{ + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(dev); + int node = cdns_phy->nsubnodes; + int ret, i; + + ret = cdns_torrent_clk(cdns_phy); + if (ret) + return ret; + + /* Enable APB */ + reset_control_deassert(cdns_phy->apb_rst); + + if (cdns_phy->nsubnodes > 1) { + ret = cdns_torrent_phy_configure_multilink(cdns_phy); + if (ret) + goto put_lnk_rst; + } + + return 0; + +put_lnk_rst: + for (i = 0; i < node; i++) + reset_control_assert(cdns_phy->phys[i].lnk_rst); + reset_control_assert(cdns_phy->apb_rst); + + clk_disable_unprepare(cdns_phy->clk1); + clk_disable_unprepare(cdns_phy->clk); + + return ret; +} + +static DEFINE_NOIRQ_DEV_PM_OPS(cdns_torrent_phy_pm_ops, + cdns_torrent_phy_suspend_noirq, + cdns_torrent_phy_resume_noirq); + /* USB and DP link configuration */ static struct cdns_reg_pairs usb_dp_link_cmn_regs[] = { {0x0002, PHY_PLL_CFG}, @@ -4043,7 +4123,8 @@ static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = { {0x04A2, TX_PSC_A2}, {0x04A2, TX_PSC_A3}, {0x0000, TX_TXCC_CPOST_MULT_00}, - {0x00B3, DRV_DIAG_TX_DRV} + {0x00B3, DRV_DIAG_TX_DRV}, + {0x0002, XCVR_DIAG_PSC_OVRD} }; static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = { @@ -4052,7 +4133,8 @@ static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = { {0x04A2, TX_PSC_A3}, {0x0000, TX_TXCC_CPOST_MULT_00}, {0x00B3, DRV_DIAG_TX_DRV}, - {0x4000, XCVR_DIAG_RXCLK_CTRL}, + {0x0002, XCVR_DIAG_PSC_OVRD}, + {0x4000, XCVR_DIAG_RXCLK_CTRL} }; static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = { @@ -4219,7 +4301,8 @@ static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = { {0x04A2, TX_PSC_A3}, {0x0000, TX_TXCC_CPOST_MULT_00}, {0x0011, TX_TXCC_MGNFS_MULT_100}, - {0x0003, DRV_DIAG_TX_DRV} + {0x0003, DRV_DIAG_TX_DRV}, + {0x0002, XCVR_DIAG_PSC_OVRD} }; static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = { @@ -4229,7 +4312,8 @@ static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = { {0x0000, TX_TXCC_CPOST_MULT_00}, {0x0011, TX_TXCC_MGNFS_MULT_100}, {0x0003, DRV_DIAG_TX_DRV}, - {0x4000, XCVR_DIAG_RXCLK_CTRL}, + {0x0002, XCVR_DIAG_PSC_OVRD}, + {0x4000, XCVR_DIAG_RXCLK_CTRL} }; static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = { @@ -4541,11 +4625,13 @@ static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_QSGMII), &sgmii_qsgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &usb_sgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USXGMII), &usxgmii_sgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_SGMII), &sgmii_qsgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &usb_sgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USXGMII), &usxgmii_sgmii_link_cmn_vals}, @@ -4575,11 +4661,13 @@ static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_QSGMII), &sgmii_qsgmii_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USXGMII), &sgmii_usxgmii_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_SGMII), &sgmii_qsgmii_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USXGMII), &sgmii_usxgmii_xcvr_diag_ln_vals}, @@ -4635,6 +4723,8 @@ static struct cdns_torrent_vals_entry cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, @@ -4645,6 +4735,8 @@ static struct cdns_torrent_vals_entry cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals}, @@ -4713,6 +4805,8 @@ static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals}, @@ -4723,6 +4817,8 @@ static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, @@ -4791,6 +4887,8 @@ static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, @@ -4801,6 +4899,8 @@ static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, @@ -4905,6 +5005,8 @@ static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, @@ -4915,6 +5017,8 @@ static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, @@ -5017,6 +5121,8 @@ static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, @@ -5027,6 +5133,8 @@ static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals}, @@ -5095,6 +5203,8 @@ static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, @@ -5105,6 +5215,8 @@ static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, @@ -5173,6 +5285,8 @@ static struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, @@ -5183,6 +5297,8 @@ static struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, @@ -5275,6 +5391,7 @@ static struct platform_driver cdns_torrent_phy_driver = { .driver = { .name = "cdns-torrent-phy", .of_match_table = cdns_torrent_phy_of_match, + .pm = pm_sleep_ptr(&cdns_torrent_phy_pm_ops), } }; module_platform_driver(cdns_torrent_phy_driver); diff --git a/drivers/phy/freescale/Kconfig b/drivers/phy/freescale/Kconfig index 45aaaea14fb4..dcd9acff6d01 100644 --- a/drivers/phy/freescale/Kconfig +++ b/drivers/phy/freescale/Kconfig @@ -35,12 +35,19 @@ config PHY_FSL_IMX8M_PCIE Enable this to add support for the PCIE PHY as found on i.MX8M family of SOCs. +config PHY_FSL_IMX8QM_HSIO + tristate "Freescale i.MX8QM HSIO PHY" + depends on OF && HAS_IOMEM + select GENERIC_PHY + help + Enable this to add support for the HSIO PHY as found on + i.MX8QM family of SOCs. + config PHY_FSL_SAMSUNG_HDMI_PHY tristate "Samsung HDMI PHY support" depends on OF && HAS_IOMEM && COMMON_CLK help Enable this to add support for the Samsung HDMI PHY in i.MX8MP. - endif config PHY_FSL_LYNX_28G diff --git a/drivers/phy/freescale/Makefile b/drivers/phy/freescale/Makefile index c4386bfdb853..658eac7d0a62 100644 --- a/drivers/phy/freescale/Makefile +++ b/drivers/phy/freescale/Makefile @@ -3,5 +3,6 @@ obj-$(CONFIG_PHY_FSL_IMX8MQ_USB) += phy-fsl-imx8mq-usb.o obj-$(CONFIG_PHY_MIXEL_LVDS_PHY) += phy-fsl-imx8qm-lvds-phy.o obj-$(CONFIG_PHY_MIXEL_MIPI_DPHY) += phy-fsl-imx8-mipi-dphy.o obj-$(CONFIG_PHY_FSL_IMX8M_PCIE) += phy-fsl-imx8m-pcie.o +obj-$(CONFIG_PHY_FSL_IMX8QM_HSIO) += phy-fsl-imx8qm-hsio.o obj-$(CONFIG_PHY_FSL_LYNX_28G) += phy-fsl-lynx-28g.o obj-$(CONFIG_PHY_FSL_SAMSUNG_HDMI_PHY) += phy-fsl-samsung-hdmi.o diff --git a/drivers/phy/freescale/phy-fsl-imx8qm-hsio.c b/drivers/phy/freescale/phy-fsl-imx8qm-hsio.c new file mode 100644 index 000000000000..5dca93cd325c --- /dev/null +++ b/drivers/phy/freescale/phy-fsl-imx8qm-hsio.c @@ -0,0 +1,611 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2024 NXP + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/pci_regs.h> +#include <linux/phy/phy.h> +#include <linux/phy/pcie.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <dt-bindings/phy/phy.h> +#include <dt-bindings/phy/phy-imx8-pcie.h> + +#define MAX_NUM_LANE 3 +#define LANE_NUM_CLKS 5 + +/* Parameters for the waiting for PCIe PHY PLL to lock */ +#define PHY_INIT_WAIT_USLEEP_MAX 10 +#define PHY_INIT_WAIT_TIMEOUT (1000 * PHY_INIT_WAIT_USLEEP_MAX) + +/* i.MX8Q HSIO registers */ +#define HSIO_CTRL0 0x0 +#define HSIO_APB_RSTN_0 BIT(0) +#define HSIO_APB_RSTN_1 BIT(1) +#define HSIO_PIPE_RSTN_0_MASK GENMASK(25, 24) +#define HSIO_PIPE_RSTN_1_MASK GENMASK(27, 26) +#define HSIO_MODE_MASK GENMASK(20, 17) +#define HSIO_MODE_PCIE 0x0 +#define HSIO_MODE_SATA 0x4 +#define HSIO_DEVICE_TYPE_MASK GENMASK(27, 24) +#define HSIO_EPCS_TXDEEMP BIT(5) +#define HSIO_EPCS_TXDEEMP_SEL BIT(6) +#define HSIO_EPCS_PHYRESET_N BIT(7) +#define HSIO_RESET_N BIT(12) + +#define HSIO_IOB_RXENA BIT(0) +#define HSIO_IOB_TXENA BIT(1) +#define HSIO_IOB_A_0_TXOE BIT(2) +#define HSIO_IOB_A_0_M1M0_2 BIT(4) +#define HSIO_IOB_A_0_M1M0_MASK GENMASK(4, 3) +#define HSIO_PHYX1_EPCS_SEL BIT(12) +#define HSIO_PCIE_AB_SELECT BIT(13) + +#define HSIO_PHY_STS0 0x4 +#define HSIO_LANE0_TX_PLL_LOCK BIT(4) +#define HSIO_LANE1_TX_PLL_LOCK BIT(12) + +#define HSIO_CTRL2 0x8 +#define HSIO_LTSSM_ENABLE BIT(4) +#define HSIO_BUTTON_RST_N BIT(21) +#define HSIO_PERST_N BIT(22) +#define HSIO_POWER_UP_RST_N BIT(23) + +#define HSIO_PCIE_STS0 0xc +#define HSIO_PM_REQ_CORE_RST BIT(19) + +#define HSIO_REG48_PMA_STATUS 0x30 +#define HSIO_REG48_PMA_RDY BIT(7) + +struct imx_hsio_drvdata { + int lane_num; +}; + +struct imx_hsio_lane { + u32 ctrl_index; + u32 ctrl_off; + u32 idx; + u32 phy_off; + u32 phy_type; + const char * const *clk_names; + struct clk_bulk_data clks[LANE_NUM_CLKS]; + struct imx_hsio_priv *priv; + struct phy *phy; + enum phy_mode phy_mode; +}; + +struct imx_hsio_priv { + void __iomem *base; + struct device *dev; + struct mutex lock; + const char *hsio_cfg; + const char *refclk_pad; + u32 open_cnt; + struct regmap *phy; + struct regmap *ctrl; + struct regmap *misc; + const struct imx_hsio_drvdata *drvdata; + struct imx_hsio_lane lane[MAX_NUM_LANE]; +}; + +static const char * const lan0_pcie_clks[] = {"apb_pclk0", "pclk0", "ctl0_crr", + "phy0_crr", "misc_crr"}; +static const char * const lan1_pciea_clks[] = {"apb_pclk1", "pclk1", "ctl0_crr", + "phy0_crr", "misc_crr"}; +static const char * const lan1_pcieb_clks[] = {"apb_pclk1", "pclk1", "ctl1_crr", + "phy0_crr", "misc_crr"}; +static const char * const lan2_pcieb_clks[] = {"apb_pclk2", "pclk2", "ctl1_crr", + "phy1_crr", "misc_crr"}; +static const char * const lan2_sata_clks[] = {"pclk2", "epcs_tx", "epcs_rx", + "phy1_crr", "misc_crr"}; + +static const struct regmap_config regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static int imx_hsio_init(struct phy *phy) +{ + int ret, i; + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + struct imx_hsio_priv *priv = lane->priv; + struct device *dev = priv->dev; + + /* Assign clocks refer to different modes */ + switch (lane->phy_type) { + case PHY_TYPE_PCIE: + lane->phy_mode = PHY_MODE_PCIE; + if (lane->ctrl_index == 0) { /* PCIEA */ + lane->ctrl_off = 0; + lane->phy_off = 0; + + for (i = 0; i < LANE_NUM_CLKS; i++) { + if (lane->idx == 0) + lane->clks[i].id = lan0_pcie_clks[i]; + else + lane->clks[i].id = lan1_pciea_clks[i]; + } + } else { /* PCIEB */ + if (lane->idx == 0) { /* i.MX8QXP */ + lane->ctrl_off = 0; + lane->phy_off = 0; + } else { + /* + * On i.MX8QM, only second or third lane can be + * bound to PCIEB. + */ + lane->ctrl_off = SZ_64K; + if (lane->idx == 1) + lane->phy_off = 0; + else /* the third lane is bound to PCIEB */ + lane->phy_off = SZ_64K; + } + + for (i = 0; i < LANE_NUM_CLKS; i++) { + if (lane->idx == 1) + lane->clks[i].id = lan1_pcieb_clks[i]; + else if (lane->idx == 2) + lane->clks[i].id = lan2_pcieb_clks[i]; + else /* i.MX8QXP only has PCIEB, idx is 0 */ + lane->clks[i].id = lan0_pcie_clks[i]; + } + } + break; + case PHY_TYPE_SATA: + /* On i.MX8QM, only the third lane can be bound to SATA */ + lane->phy_mode = PHY_MODE_SATA; + lane->ctrl_off = SZ_128K; + lane->phy_off = SZ_64K; + + for (i = 0; i < LANE_NUM_CLKS; i++) + lane->clks[i].id = lan2_sata_clks[i]; + break; + default: + return -EINVAL; + } + + /* Fetch clocks and enable them */ + ret = devm_clk_bulk_get(dev, LANE_NUM_CLKS, lane->clks); + if (ret) + return ret; + ret = clk_bulk_prepare_enable(LANE_NUM_CLKS, lane->clks); + if (ret) + return ret; + + /* allow the clocks to stabilize */ + usleep_range(200, 500); + return 0; +} + +static int imx_hsio_exit(struct phy *phy) +{ + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + + clk_bulk_disable_unprepare(LANE_NUM_CLKS, lane->clks); + + return 0; +} + +static void imx_hsio_pcie_phy_resets(struct phy *phy) +{ + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + struct imx_hsio_priv *priv = lane->priv; + + regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, + HSIO_BUTTON_RST_N); + regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, + HSIO_PERST_N); + regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, + HSIO_POWER_UP_RST_N); + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, + HSIO_BUTTON_RST_N); + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, + HSIO_PERST_N); + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, + HSIO_POWER_UP_RST_N); + + if (lane->idx == 1) { + regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0, + HSIO_APB_RSTN_1); + regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0, + HSIO_PIPE_RSTN_1_MASK); + } else { + regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0, + HSIO_APB_RSTN_0); + regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0, + HSIO_PIPE_RSTN_0_MASK); + } +} + +static void imx_hsio_sata_phy_resets(struct phy *phy) +{ + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + struct imx_hsio_priv *priv = lane->priv; + + /* clear PHY RST, then set it */ + regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, + HSIO_EPCS_PHYRESET_N); + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, + HSIO_EPCS_PHYRESET_N); + + /* CTRL RST: SET -> delay 1 us -> CLEAR -> SET */ + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, HSIO_RESET_N); + udelay(1); + regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, + HSIO_RESET_N); + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, HSIO_RESET_N); +} + +static void imx_hsio_configure_clk_pad(struct phy *phy) +{ + bool pll = false; + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + struct imx_hsio_priv *priv = lane->priv; + + if (strncmp(priv->refclk_pad, "output", 6) == 0) { + pll = true; + regmap_update_bits(priv->misc, HSIO_CTRL0, + HSIO_IOB_A_0_TXOE | HSIO_IOB_A_0_M1M0_MASK, + HSIO_IOB_A_0_TXOE | HSIO_IOB_A_0_M1M0_2); + } else { + regmap_update_bits(priv->misc, HSIO_CTRL0, + HSIO_IOB_A_0_TXOE | HSIO_IOB_A_0_M1M0_MASK, + 0); + } + + regmap_update_bits(priv->misc, HSIO_CTRL0, HSIO_IOB_RXENA, + pll ? 0 : HSIO_IOB_RXENA); + regmap_update_bits(priv->misc, HSIO_CTRL0, HSIO_IOB_TXENA, + pll ? HSIO_IOB_TXENA : 0); +} + +static void imx_hsio_pre_set(struct phy *phy) +{ + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + struct imx_hsio_priv *priv = lane->priv; + + if (strncmp(priv->hsio_cfg, "pciea-x2-pcieb", 14) == 0) { + regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PCIE_AB_SELECT); + } else if (strncmp(priv->hsio_cfg, "pciea-x2-sata", 13) == 0) { + regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PHYX1_EPCS_SEL); + } else if (strncmp(priv->hsio_cfg, "pciea-pcieb-sata", 16) == 0) { + regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PCIE_AB_SELECT); + regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PHYX1_EPCS_SEL); + } + + imx_hsio_configure_clk_pad(phy); +} + +static int imx_hsio_pcie_power_on(struct phy *phy) +{ + int ret; + u32 val, addr, cond; + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + struct imx_hsio_priv *priv = lane->priv; + + imx_hsio_pcie_phy_resets(phy); + + /* Toggle apb_pclk to make sure PM_REQ_CORE_RST is cleared. */ + clk_disable_unprepare(lane->clks[0].clk); + mdelay(1); + ret = clk_prepare_enable(lane->clks[0].clk); + if (ret) { + dev_err(priv->dev, "unable to enable phy apb_pclk\n"); + return ret; + } + + addr = lane->ctrl_off + HSIO_PCIE_STS0; + cond = HSIO_PM_REQ_CORE_RST; + ret = regmap_read_poll_timeout(priv->ctrl, addr, val, + (val & cond) == 0, + PHY_INIT_WAIT_USLEEP_MAX, + PHY_INIT_WAIT_TIMEOUT); + if (ret) + dev_err(priv->dev, "HSIO_PM_REQ_CORE_RST is set\n"); + return ret; +} + +static int imx_hsio_sata_power_on(struct phy *phy) +{ + int ret; + u32 val, cond; + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + struct imx_hsio_priv *priv = lane->priv; + + regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0, HSIO_APB_RSTN_0); + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, + HSIO_EPCS_TXDEEMP); + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, + HSIO_EPCS_TXDEEMP_SEL); + + imx_hsio_sata_phy_resets(phy); + + cond = HSIO_REG48_PMA_RDY; + ret = read_poll_timeout(readb, val, ((val & cond) == cond), + PHY_INIT_WAIT_USLEEP_MAX, + PHY_INIT_WAIT_TIMEOUT, false, + priv->base + HSIO_REG48_PMA_STATUS); + if (ret) + dev_err(priv->dev, "PHY calibration is timeout\n"); + else + dev_dbg(priv->dev, "PHY calibration is done\n"); + + return ret; +} + +static int imx_hsio_power_on(struct phy *phy) +{ + int ret; + u32 val, cond; + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + struct imx_hsio_priv *priv = lane->priv; + + scoped_guard(mutex, &priv->lock) { + if (!priv->open_cnt) + imx_hsio_pre_set(phy); + priv->open_cnt++; + } + + if (lane->phy_mode == PHY_MODE_PCIE) + ret = imx_hsio_pcie_power_on(phy); + else /* SATA */ + ret = imx_hsio_sata_power_on(phy); + if (ret) + return ret; + + /* Polling to check the PHY is ready or not. */ + if (lane->idx == 1) + cond = HSIO_LANE1_TX_PLL_LOCK; + else + /* + * Except the phy_off, the bit-offset of lane2 is same to lane0. + * Merge the lane0 and lane2 bit-operations together. + */ + cond = HSIO_LANE0_TX_PLL_LOCK; + + ret = regmap_read_poll_timeout(priv->phy, lane->phy_off + HSIO_PHY_STS0, + val, ((val & cond) == cond), + PHY_INIT_WAIT_USLEEP_MAX, + PHY_INIT_WAIT_TIMEOUT); + if (ret) { + dev_err(priv->dev, "IMX8Q PHY%d PLL lock timeout\n", lane->idx); + return ret; + } + dev_dbg(priv->dev, "IMX8Q PHY%d PLL is locked\n", lane->idx); + + return ret; +} + +static int imx_hsio_power_off(struct phy *phy) +{ + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + struct imx_hsio_priv *priv = lane->priv; + + scoped_guard(mutex, &priv->lock) { + priv->open_cnt--; + if (priv->open_cnt == 0) { + regmap_clear_bits(priv->misc, HSIO_CTRL0, + HSIO_PCIE_AB_SELECT); + regmap_clear_bits(priv->misc, HSIO_CTRL0, + HSIO_PHYX1_EPCS_SEL); + + if (lane->phy_mode == PHY_MODE_PCIE) { + regmap_clear_bits(priv->ctrl, + lane->ctrl_off + HSIO_CTRL2, + HSIO_BUTTON_RST_N); + regmap_clear_bits(priv->ctrl, + lane->ctrl_off + HSIO_CTRL2, + HSIO_PERST_N); + regmap_clear_bits(priv->ctrl, + lane->ctrl_off + HSIO_CTRL2, + HSIO_POWER_UP_RST_N); + } else { + regmap_clear_bits(priv->ctrl, + lane->ctrl_off + HSIO_CTRL0, + HSIO_EPCS_TXDEEMP); + regmap_clear_bits(priv->ctrl, + lane->ctrl_off + HSIO_CTRL0, + HSIO_EPCS_TXDEEMP_SEL); + regmap_clear_bits(priv->ctrl, + lane->ctrl_off + HSIO_CTRL0, + HSIO_RESET_N); + } + + if (lane->idx == 1) { + regmap_clear_bits(priv->phy, + lane->phy_off + HSIO_CTRL0, + HSIO_APB_RSTN_1); + regmap_clear_bits(priv->phy, + lane->phy_off + HSIO_CTRL0, + HSIO_PIPE_RSTN_1_MASK); + } else { + /* + * Except the phy_off, the bit-offset of lane2 is same + * to lane0. Merge the lane0 and lane2 bit-operations + * together. + */ + regmap_clear_bits(priv->phy, + lane->phy_off + HSIO_CTRL0, + HSIO_APB_RSTN_0); + regmap_clear_bits(priv->phy, + lane->phy_off + HSIO_CTRL0, + HSIO_PIPE_RSTN_0_MASK); + } + } + } + + return 0; +} + +static int imx_hsio_set_mode(struct phy *phy, enum phy_mode mode, + int submode) +{ + u32 val; + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + struct imx_hsio_priv *priv = lane->priv; + + if (lane->phy_mode != mode) + return -EINVAL; + + val = (mode == PHY_MODE_PCIE) ? HSIO_MODE_PCIE : HSIO_MODE_SATA; + val = FIELD_PREP(HSIO_MODE_MASK, val); + regmap_update_bits(priv->phy, lane->phy_off + HSIO_CTRL0, + HSIO_MODE_MASK, val); + + switch (submode) { + case PHY_MODE_PCIE_RC: + val = FIELD_PREP(HSIO_DEVICE_TYPE_MASK, PCI_EXP_TYPE_ROOT_PORT); + break; + case PHY_MODE_PCIE_EP: + val = FIELD_PREP(HSIO_DEVICE_TYPE_MASK, PCI_EXP_TYPE_ENDPOINT); + break; + default: /* Support only PCIe EP and RC now. */ + return 0; + } + if (submode) + regmap_update_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, + HSIO_DEVICE_TYPE_MASK, val); + + return 0; +} + +static int imx_hsio_set_speed(struct phy *phy, int speed) +{ + struct imx_hsio_lane *lane = phy_get_drvdata(phy); + struct imx_hsio_priv *priv = lane->priv; + + regmap_update_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, + HSIO_LTSSM_ENABLE, + speed ? HSIO_LTSSM_ENABLE : 0); + return 0; +} + +static const struct phy_ops imx_hsio_ops = { + .init = imx_hsio_init, + .exit = imx_hsio_exit, + .power_on = imx_hsio_power_on, + .power_off = imx_hsio_power_off, + .set_mode = imx_hsio_set_mode, + .set_speed = imx_hsio_set_speed, + .owner = THIS_MODULE, +}; + +static const struct imx_hsio_drvdata imx8qxp_hsio_drvdata = { + .lane_num = 0x1, +}; + +static const struct imx_hsio_drvdata imx8qm_hsio_drvdata = { + .lane_num = 0x3, +}; + +static const struct of_device_id imx_hsio_of_match[] = { + {.compatible = "fsl,imx8qm-hsio", .data = &imx8qm_hsio_drvdata}, + {.compatible = "fsl,imx8qxp-hsio", .data = &imx8qxp_hsio_drvdata}, + { }, +}; +MODULE_DEVICE_TABLE(of, imx_hsio_of_match); + +static struct phy *imx_hsio_xlate(struct device *dev, + const struct of_phandle_args *args) +{ + struct imx_hsio_priv *priv = dev_get_drvdata(dev); + int idx = args->args[0]; + int phy_type = args->args[1]; + int ctrl_index = args->args[2]; + + if (idx < 0 || idx >= priv->drvdata->lane_num) + return ERR_PTR(-EINVAL); + priv->lane[idx].idx = idx; + priv->lane[idx].phy_type = phy_type; + priv->lane[idx].ctrl_index = ctrl_index; + + return priv->lane[idx].phy; +} + +static int imx_hsio_probe(struct platform_device *pdev) +{ + int i; + void __iomem *off; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct imx_hsio_priv *priv; + struct phy_provider *provider; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + priv->dev = &pdev->dev; + priv->drvdata = of_device_get_match_data(dev); + + /* Get HSIO configuration mode */ + if (of_property_read_string(np, "fsl,hsio-cfg", &priv->hsio_cfg)) + priv->hsio_cfg = "pciea-pcieb-sata"; + /* Get PHY refclk pad mode */ + if (of_property_read_string(np, "fsl,refclk-pad-mode", + &priv->refclk_pad)) + priv->refclk_pad = NULL; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + off = devm_platform_ioremap_resource_byname(pdev, "phy"); + priv->phy = devm_regmap_init_mmio(dev, off, ®map_config); + if (IS_ERR(priv->phy)) + return dev_err_probe(dev, PTR_ERR(priv->phy), + "unable to find phy csr registers\n"); + + off = devm_platform_ioremap_resource_byname(pdev, "ctrl"); + priv->ctrl = devm_regmap_init_mmio(dev, off, ®map_config); + if (IS_ERR(priv->ctrl)) + return dev_err_probe(dev, PTR_ERR(priv->ctrl), + "unable to find ctrl csr registers\n"); + + off = devm_platform_ioremap_resource_byname(pdev, "misc"); + priv->misc = devm_regmap_init_mmio(dev, off, ®map_config); + if (IS_ERR(priv->misc)) + return dev_err_probe(dev, PTR_ERR(priv->misc), + "unable to find misc csr registers\n"); + + for (i = 0; i < priv->drvdata->lane_num; i++) { + struct imx_hsio_lane *lane = &priv->lane[i]; + struct phy *phy; + + phy = devm_phy_create(&pdev->dev, NULL, &imx_hsio_ops); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + lane->priv = priv; + lane->phy = phy; + lane->idx = i; + phy_set_drvdata(phy, lane); + } + + dev_set_drvdata(dev, priv); + dev_set_drvdata(&pdev->dev, priv); + + provider = devm_of_phy_provider_register(&pdev->dev, imx_hsio_xlate); + + return PTR_ERR_OR_ZERO(provider); +} + +static struct platform_driver imx_hsio_driver = { + .probe = imx_hsio_probe, + .driver = { + .name = "imx8qm-hsio-phy", + .of_match_table = imx_hsio_of_match, + } +}; +module_platform_driver(imx_hsio_driver); + +MODULE_DESCRIPTION("FSL IMX8QM HSIO SERDES PHY driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/phy-airoha-pcie-regs.h b/drivers/phy/phy-airoha-pcie-regs.h new file mode 100644 index 000000000000..bb1f679ca1df --- /dev/null +++ b/drivers/phy/phy-airoha-pcie-regs.h @@ -0,0 +1,494 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 AIROHA Inc + * Author: Lorenzo Bianconi <lorenzo@kernel.org> + */ + +#ifndef _PHY_AIROHA_PCIE_H +#define _PHY_AIROHA_PCIE_H + +/* CSR_2L */ +#define REG_CSR_2L_CMN 0x0000 +#define CSR_2L_PXP_CMN_LANE_EN BIT(0) +#define CSR_2L_PXP_CMN_TRIM_MASK GENMASK(28, 24) + +#define REG_CSR_2L_JCPLL_IB_EXT 0x0004 +#define REG_CSR_2L_JCPLL_LPF_SHCK_EN BIT(8) +#define CSR_2L_PXP_JCPLL_CHP_IBIAS GENMASK(21, 16) +#define CSR_2L_PXP_JCPLL_CHP_IOFST GENMASK(29, 24) + +#define REG_CSR_2L_JCPLL_LPF_BR 0x0008 +#define CSR_2L_PXP_JCPLL_LPF_BR GENMASK(4, 0) +#define CSR_2L_PXP_JCPLL_LPF_BC GENMASK(12, 8) +#define CSR_2L_PXP_JCPLL_LPF_BP GENMASK(20, 16) +#define CSR_2L_PXP_JCPLL_LPF_BWR GENMASK(28, 24) + +#define REG_CSR_2L_JCPLL_LPF_BWC 0x000c +#define CSR_2L_PXP_JCPLL_LPF_BWC GENMASK(4, 0) +#define CSR_2L_PXP_JCPLL_KBAND_CODE GENMASK(23, 16) +#define CSR_2L_PXP_JCPLL_KBAND_DIV GENMASK(26, 24) + +#define REG_CSR_2L_JCPLL_KBAND_KFC 0x0010 +#define CSR_2L_PXP_JCPLL_KBAND_KFC GENMASK(1, 0) +#define CSR_2L_PXP_JCPLL_KBAND_KF GENMASK(9, 8) +#define CSR_2L_PXP_JCPLL_KBAND_KS GENMASK(17, 16) +#define CSR_2L_PXP_JCPLL_POSTDIV_EN BIT(24) + +#define REG_CSR_2L_JCPLL_MMD_PREDIV_MODE 0x0014 +#define CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE GENMASK(1, 0) +#define CSR_2L_PXP_JCPLL_POSTDIV_D2 BIT(16) +#define CSR_2L_PXP_JCPLL_POSTDIV_D5 BIT(24) + +#define CSR_2L_PXP_JCPLL_MONCK 0x0018 +#define CSR_2L_PXP_JCPLL_REFIN_DIV GENMASK(25, 24) + +#define REG_CSR_2L_JCPLL_RST_DLY 0x001c +#define CSR_2L_PXP_JCPLL_RST_DLY GENMASK(2, 0) +#define CSR_2L_PXP_JCPLL_RST BIT(8) +#define CSR_2L_PXP_JCPLL_SDM_DI_EN BIT(16) +#define CSR_2L_PXP_JCPLL_SDM_DI_LS GENMASK(25, 24) + +#define REG_CSR_2L_JCPLL_SDM_IFM 0x0020 +#define CSR_2L_PXP_JCPLL_SDM_IFM BIT(0) + +#define REG_CSR_2L_JCPLL_SDM_HREN 0x0024 +#define CSR_2L_PXP_JCPLL_SDM_HREN BIT(0) +#define CSR_2L_PXP_JCPLL_TCL_AMP_EN BIT(8) +#define CSR_2L_PXP_JCPLL_TCL_AMP_GAIN GENMASK(18, 16) +#define CSR_2L_PXP_JCPLL_TCL_AMP_VREF GENMASK(28, 24) + +#define REG_CSR_2L_JCPLL_TCL_CMP 0x0028 +#define CSR_2L_PXP_JCPLL_TCL_LPF_EN BIT(16) +#define CSR_2L_PXP_JCPLL_TCL_LPF_BW GENMASK(26, 24) + +#define REG_CSR_2L_JCPLL_VCODIV 0x002c +#define CSR_2L_PXP_JCPLL_VCO_CFIX GENMASK(9, 8) +#define CSR_2L_PXP_JCPLL_VCO_HALFLSB_EN BIT(16) +#define CSR_2L_PXP_JCPLL_VCO_SCAPWR GENMASK(26, 24) + +#define REG_CSR_2L_JCPLL_VCO_TCLVAR 0x0030 +#define CSR_2L_PXP_JCPLL_VCO_TCLVAR GENMASK(2, 0) + +#define REG_CSR_2L_JCPLL_SSC 0x0038 +#define CSR_2L_PXP_JCPLL_SSC_EN BIT(0) +#define CSR_2L_PXP_JCPLL_SSC_PHASE_INI BIT(8) +#define CSR_2L_PXP_JCPLL_SSC_TRI_EN BIT(16) + +#define REG_CSR_2L_JCPLL_SSC_DELTA1 0x003c +#define CSR_2L_PXP_JCPLL_SSC_DELTA1 GENMASK(15, 0) +#define CSR_2L_PXP_JCPLL_SSC_DELTA GENMASK(31, 16) + +#define REG_CSR_2L_JCPLL_SSC_PERIOD 0x0040 +#define CSR_2L_PXP_JCPLL_SSC_PERIOD GENMASK(15, 0) + +#define REG_CSR_2L_JCPLL_TCL_VTP_EN 0x004c +#define CSR_2L_PXP_JCPLL_SPARE_LOW GENMASK(31, 24) + +#define REG_CSR_2L_JCPLL_TCL_KBAND_VREF 0x0050 +#define CSR_2L_PXP_JCPLL_TCL_KBAND_VREF GENMASK(4, 0) +#define CSR_2L_PXP_JCPLL_VCO_KBAND_MEAS_EN BIT(24) + +#define REG_CSR_2L_750M_SYS_CK 0x0054 +#define CSR_2L_PXP_TXPLL_LPF_SHCK_EN BIT(16) +#define CSR_2L_PXP_TXPLL_CHP_IBIAS GENMASK(29, 24) + +#define REG_CSR_2L_TXPLL_CHP_IOFST 0x0058 +#define CSR_2L_PXP_TXPLL_CHP_IOFST GENMASK(5, 0) +#define CSR_2L_PXP_TXPLL_LPF_BR GENMASK(12, 8) +#define CSR_2L_PXP_TXPLL_LPF_BC GENMASK(20, 16) +#define CSR_2L_PXP_TXPLL_LPF_BP GENMASK(28, 24) + +#define REG_CSR_2L_TXPLL_LPF_BWR 0x005c +#define CSR_2L_PXP_TXPLL_LPF_BWR GENMASK(4, 0) +#define CSR_2L_PXP_TXPLL_LPF_BWC GENMASK(12, 8) +#define CSR_2L_PXP_TXPLL_KBAND_CODE GENMASK(31, 24) + +#define REG_CSR_2L_TXPLL_KBAND_DIV 0x0060 +#define CSR_2L_PXP_TXPLL_KBAND_DIV GENMASK(2, 0) +#define CSR_2L_PXP_TXPLL_KBAND_KFC GENMASK(9, 8) +#define CSR_2L_PXP_TXPLL_KBAND_KF GENMASK(17, 16) +#define CSR_2L_PXP_txpll_KBAND_KS GENMASK(25, 24) + +#define REG_CSR_2L_TXPLL_POSTDIV 0x0064 +#define CSR_2L_PXP_TXPLL_POSTDIV_EN BIT(0) +#define CSR_2L_PXP_TXPLL_MMD_PREDIV_MODE GENMASK(9, 8) +#define CSR_2L_PXP_TXPLL_PHY_CK1_EN BIT(24) + +#define REG_CSR_2L_TXPLL_PHY_CK2 0x0068 +#define CSR_2L_PXP_TXPLL_REFIN_INTERNAL BIT(24) + +#define REG_CSR_2L_TXPLL_REFIN_DIV 0x006c +#define CSR_2L_PXP_TXPLL_REFIN_DIV GENMASK(1, 0) +#define CSR_2L_PXP_TXPLL_RST_DLY GENMASK(10, 8) +#define CSR_2L_PXP_TXPLL_PLL_RSTB BIT(16) + +#define REG_CSR_2L_TXPLL_SDM_DI_LS 0x0070 +#define CSR_2L_PXP_TXPLL_SDM_DI_LS GENMASK(1, 0) +#define CSR_2L_PXP_TXPLL_SDM_IFM BIT(8) +#define CSR_2L_PXP_TXPLL_SDM_ORD GENMASK(25, 24) + +#define REG_CSR_2L_TXPLL_SDM_OUT 0x0074 +#define CSR_2L_PXP_TXPLL_TCL_AMP_EN BIT(16) +#define CSR_2L_PXP_TXPLL_TCL_AMP_GAIN GENMASK(26, 24) + +#define REG_CSR_2L_TXPLL_TCL_AMP_VREF 0x0078 +#define CSR_2L_PXP_TXPLL_TCL_AMP_VREF GENMASK(4, 0) +#define CSR_2L_PXP_TXPLL_TCL_LPF_EN BIT(24) + +#define REG_CSR_2L_TXPLL_TCL_LPF_BW 0x007c +#define CSR_2L_PXP_TXPLL_TCL_LPF_BW GENMASK(2, 0) +#define CSR_2L_PXP_TXPLL_VCO_CFIX GENMASK(17, 16) +#define CSR_2L_PXP_TXPLL_VCO_HALFLSB_EN BIT(24) + +#define REG_CSR_2L_TXPLL_VCO_SCAPWR 0x0080 +#define CSR_2L_PXP_TXPLL_VCO_SCAPWR GENMASK(2, 0) + +#define REG_CSR_2L_TXPLL_SSC 0x0084 +#define CSR_2L_PXP_TXPLL_SSC_EN BIT(0) +#define CSR_2L_PXP_TXPLL_SSC_PHASE_INI BIT(8) + +#define REG_CSR_2L_TXPLL_SSC_DELTA1 0x0088 +#define CSR_2L_PXP_TXPLL_SSC_DELTA1 GENMASK(15, 0) +#define CSR_2L_PXP_TXPLL_SSC_DELTA GENMASK(31, 16) + +#define REG_CSR_2L_TXPLL_SSC_PERIOD 0x008c +#define CSR_2L_PXP_txpll_SSC_PERIOD GENMASK(15, 0) + +#define REG_CSR_2L_TXPLL_VTP 0x0090 +#define CSR_2L_PXP_TXPLL_VTP_EN BIT(0) + +#define REG_CSR_2L_TXPLL_TCL_VTP 0x0098 +#define CSR_2L_PXP_TXPLL_SPARE_L GENMASK(31, 24) + +#define REG_CSR_2L_TXPLL_TCL_KBAND_VREF 0x009c +#define CSR_2L_PXP_TXPLL_TCL_KBAND_VREF GENMASK(4, 0) +#define CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN BIT(24) + +#define REG_CSR_2L_TXPLL_POSTDIV_D256 0x00a0 +#define CSR_2L_PXP_CLKTX0_AMP GENMASK(10, 8) +#define CSR_2L_PXP_CLKTX0_OFFSET GENMASK(17, 16) +#define CSR_2L_PXP_CLKTX0_SR GENMASK(25, 24) + +#define REG_CSR_2L_CLKTX0_FORCE_OUT1 0x00a4 +#define CSR_2L_PXP_CLKTX0_HZ BIT(8) +#define CSR_2L_PXP_CLKTX0_IMP_SEL GENMASK(20, 16) +#define CSR_2L_PXP_CLKTX1_AMP GENMASK(26, 24) + +#define REG_CSR_2L_CLKTX1_OFFSET 0x00a8 +#define CSR_2L_PXP_CLKTX1_OFFSET GENMASK(1, 0) +#define CSR_2L_PXP_CLKTX1_SR GENMASK(9, 8) +#define CSR_2L_PXP_CLKTX1_HZ BIT(24) + +#define REG_CSR_2L_CLKTX1_IMP_SEL 0x00ac +#define CSR_2L_PXP_CLKTX1_IMP_SEL GENMASK(4, 0) + +#define REG_CSR_2L_PLL_CMN_RESERVE0 0x00b0 +#define CSR_2L_PXP_PLL_RESERVE_MASK GENMASK(15, 0) + +#define REG_CSR_2L_TX0_CKLDO 0x00cc +#define CSR_2L_PXP_TX0_CKLDO_EN BIT(0) +#define CSR_2L_PXP_TX0_DMEDGEGEN_EN BIT(24) + +#define REG_CSR_2L_TX1_CKLDO 0x00e8 +#define CSR_2L_PXP_TX1_CKLDO_EN BIT(0) +#define CSR_2L_PXP_TX1_DMEDGEGEN_EN BIT(24) + +#define REG_CSR_2L_TX1_MULTLANE 0x00ec +#define CSR_2L_PXP_TX1_MULTLANE_EN BIT(0) + +#define REG_CSR_2L_RX0_REV0 0x00fc +#define CSR_2L_PXP_VOS_PNINV GENMASK(3, 2) +#define CSR_2L_PXP_FE_GAIN_NORMAL_MODE GENMASK(6, 4) +#define CSR_2L_PXP_FE_GAIN_TRAIN_MODE GENMASK(10, 8) + +#define REG_CSR_2L_RX0_PHYCK_DIV 0x0100 +#define CSR_2L_PXP_RX0_PHYCK_SEL GENMASK(9, 8) +#define CSR_2L_PXP_RX0_PHYCK_RSTB BIT(16) +#define CSR_2L_PXP_RX0_TDC_CK_SEL BIT(24) + +#define REG_CSR_2L_CDR0_PD_PICAL_CKD8_INV 0x0104 +#define CSR_2L_PXP_CDR0_PD_EDGE_DISABLE BIT(8) + +#define REG_CSR_2L_CDR0_LPF_RATIO 0x0110 +#define CSR_2L_PXP_CDR0_LPF_TOP_LIM GENMASK(26, 8) + +#define REG_CSR_2L_CDR0_PR_INJ_MODE 0x011c +#define CSR_2L_PXP_CDR0_INJ_FORCE_OFF BIT(24) + +#define REG_CSR_2L_CDR0_PR_BETA_DAC 0x0120 +#define CSR_2L_PXP_CDR0_PR_BETA_SEL GENMASK(19, 16) +#define CSR_2L_PXP_CDR0_PR_KBAND_DIV GENMASK(26, 24) + +#define REG_CSR_2L_CDR0_PR_VREG_IBAND 0x0124 +#define CSR_2L_PXP_CDR0_PR_VREG_IBAND GENMASK(2, 0) +#define CSR_2L_PXP_CDR0_PR_VREG_CKBUF GENMASK(10, 8) + +#define REG_CSR_2L_CDR0_PR_CKREF_DIV 0x0128 +#define CSR_2L_PXP_CDR0_PR_CKREF_DIV GENMASK(1, 0) + +#define REG_CSR_2L_CDR0_PR_MONCK 0x012c +#define CSR_2L_PXP_CDR0_PR_MONCK_ENABLE BIT(0) +#define CSR_2L_PXP_CDR0_PR_RESERVE0 GENMASK(19, 16) + +#define REG_CSR_2L_CDR0_PR_COR_HBW 0x0130 +#define CSR_2L_PXP_CDR0_PR_LDO_FORCE_ON BIT(8) +#define CSR_2L_PXP_CDR0_PR_CKREF_DIV1 GENMASK(17, 16) + +#define REG_CSR_2L_CDR0_PR_MONPI 0x0134 +#define CSR_2L_PXP_CDR0_PR_XFICK_EN BIT(8) + +#define REG_CSR_2L_RX0_SIGDET_DCTEST 0x0140 +#define CSR_2L_PXP_RX0_SIGDET_LPF_CTRL GENMASK(9, 8) +#define CSR_2L_PXP_RX0_SIGDET_PEAK GENMASK(25, 24) + +#define REG_CSR_2L_RX0_SIGDET_VTH_SEL 0x0144 +#define CSR_2L_PXP_RX0_SIGDET_VTH_SEL GENMASK(4, 0) +#define CSR_2L_PXP_RX0_FE_VB_EQ1_EN BIT(24) + +#define REG_CSR_2L_PXP_RX0_FE_VB_EQ2 0x0148 +#define CSR_2L_PXP_RX0_FE_VB_EQ2_EN BIT(0) +#define CSR_2L_PXP_RX0_FE_VB_EQ3_EN BIT(8) +#define CSR_2L_PXP_RX0_FE_VCM_GEN_PWDB BIT(16) + +#define REG_CSR_2L_PXP_RX0_OSCAL_CTLE1IOS 0x0158 +#define CSR_2L_PXP_RX0_PR_OSCAL_VGA1IOS GENMASK(29, 24) + +#define REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS 0x015c +#define CSR_2L_PXP_RX0_PR_OSCAL_VGA1VOS GENMASK(5, 0) +#define CSR_2L_PXP_RX0_PR_OSCAL_VGA2IOS GENMASK(13, 8) + +#define REG_CSR_2L_RX1_REV0 0x01b4 + +#define REG_CSR_2L_RX1_PHYCK_DIV 0x01b8 +#define CSR_2L_PXP_RX1_PHYCK_SEL GENMASK(9, 8) +#define CSR_2L_PXP_RX1_PHYCK_RSTB BIT(16) +#define CSR_2L_PXP_RX1_TDC_CK_SEL BIT(24) + +#define REG_CSR_2L_CDR1_PD_PICAL_CKD8_INV 0x01bc +#define CSR_2L_PXP_CDR1_PD_EDGE_DISABLE BIT(8) + +#define REG_CSR_2L_CDR1_PR_BETA_DAC 0x01d8 +#define CSR_2L_PXP_CDR1_PR_BETA_SEL GENMASK(19, 16) +#define CSR_2L_PXP_CDR1_PR_KBAND_DIV GENMASK(26, 24) + +#define REG_CSR_2L_CDR1_PR_MONCK 0x01e4 +#define CSR_2L_PXP_CDR1_PR_MONCK_ENABLE BIT(0) +#define CSR_2L_PXP_CDR1_PR_RESERVE0 GENMASK(19, 16) + +#define REG_CSR_2L_CDR1_LPF_RATIO 0x01c8 +#define CSR_2L_PXP_CDR1_LPF_TOP_LIM GENMASK(26, 8) + +#define REG_CSR_2L_CDR1_PR_INJ_MODE 0x01d4 +#define CSR_2L_PXP_CDR1_INJ_FORCE_OFF BIT(24) + +#define REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL 0x01dc +#define CSR_2L_PXP_CDR1_PR_VREG_IBAND GENMASK(2, 0) +#define CSR_2L_PXP_CDR1_PR_VREG_CKBUF GENMASK(10, 8) + +#define REG_CSR_2L_CDR1_PR_CKREF_DIV 0x01e0 +#define CSR_2L_PXP_CDR1_PR_CKREF_DIV GENMASK(1, 0) + +#define REG_CSR_2L_CDR1_PR_COR_HBW 0x01e8 +#define CSR_2L_PXP_CDR1_PR_LDO_FORCE_ON BIT(8) +#define CSR_2L_PXP_CDR1_PR_CKREF_DIV1 GENMASK(17, 16) + +#define REG_CSR_2L_CDR1_PR_MONPI 0x01ec +#define CSR_2L_PXP_CDR1_PR_XFICK_EN BIT(8) + +#define REG_CSR_2L_RX1_DAC_RANGE_EYE 0x01f4 +#define CSR_2L_PXP_RX1_SIGDET_LPF_CTRL GENMASK(25, 24) + +#define REG_CSR_2L_RX1_SIGDET_NOVTH 0x01f8 +#define CSR_2L_PXP_RX1_SIGDET_PEAK GENMASK(9, 8) +#define CSR_2L_PXP_RX1_SIGDET_VTH_SEL GENMASK(20, 16) + +#define REG_CSR_2L_RX1_FE_VB_EQ1 0x0200 +#define CSR_2L_PXP_RX1_FE_VB_EQ1_EN BIT(0) +#define CSR_2L_PXP_RX1_FE_VB_EQ2_EN BIT(8) +#define CSR_2L_PXP_RX1_FE_VB_EQ3_EN BIT(16) +#define CSR_2L_PXP_RX1_FE_VCM_GEN_PWDB BIT(24) + +#define REG_CSR_2L_RX1_OSCAL_VGA1IOS 0x0214 +#define CSR_2L_PXP_RX1_PR_OSCAL_VGA1IOS GENMASK(5, 0) +#define CSR_2L_PXP_RX1_PR_OSCAL_VGA1VOS GENMASK(13, 8) +#define CSR_2L_PXP_RX1_PR_OSCAL_VGA2IOS GENMASK(21, 16) + +/* PMA */ +#define REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1 0x0004 +#define PCIE_LCPLL_MAN_PWDB BIT(0) + +#define REG_PCIE_PMA_SEQUENCE_DISB_CTRL1 0x010c +#define PCIE_DISB_RX_SDCAL_EN BIT(0) + +#define REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1 0x0114 +#define PCIE_FORCE_RX_SDCAL_EN BIT(0) + +#define REG_PCIE_PMA_SS_RX_FREQ_DET1 0x014c +#define PCIE_PLL_FT_LOCK_CYCLECNT GENMASK(15, 0) +#define PCIE_PLL_FT_UNLOCK_CYCLECNT GENMASK(31, 16) + +#define REG_PCIE_PMA_SS_RX_FREQ_DET2 0x0150 +#define PCIE_LOCK_TARGET_BEG GENMASK(15, 0) +#define PCIE_LOCK_TARGET_END GENMASK(31, 16) + +#define REG_PCIE_PMA_SS_RX_FREQ_DET3 0x0154 +#define PCIE_UNLOCK_TARGET_BEG GENMASK(15, 0) +#define PCIE_UNLOCK_TARGET_END GENMASK(31, 16) + +#define REG_PCIE_PMA_SS_RX_FREQ_DET4 0x0158 +#define PCIE_FREQLOCK_DET_EN GENMASK(2, 0) +#define PCIE_LOCK_LOCKTH GENMASK(11, 8) +#define PCIE_UNLOCK_LOCKTH GENMASK(15, 12) + +#define REG_PCIE_PMA_SS_RX_CAL1 0x0160 +#define REG_PCIE_PMA_SS_RX_CAL2 0x0164 +#define PCIE_CAL_OUT_OS GENMASK(11, 8) + +#define REG_PCIE_PMA_SS_RX_SIGDET0 0x0168 +#define PCIE_SIGDET_WIN_NONVLD_TIMES GENMASK(28, 24) + +#define REG_PCIE_PMA_TX_RESET 0x0260 +#define PCIE_TX_TOP_RST BIT(0) +#define PCIE_TX_CAL_RST BIT(8) + +#define REG_PCIE_PMA_RX_FORCE_MODE0 0x0294 +#define PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL GENMASK(1, 0) + +#define REG_PCIE_PMA_SS_DA_XPON_PWDB0 0x034c +#define PCIE_DA_XPON_CDR_PR_PWDB BIT(8) + +#define REG_PCIE_PMA_SW_RESET 0x0460 +#define PCIE_SW_RX_FIFO_RST BIT(0) +#define PCIE_SW_RX_RST BIT(1) +#define PCIE_SW_TX_RST BIT(2) +#define PCIE_SW_PMA_RST BIT(3) +#define PCIE_SW_ALLPCS_RST BIT(4) +#define PCIE_SW_REF_RST BIT(5) +#define PCIE_SW_TX_FIFO_RST BIT(6) +#define PCIE_SW_XFI_TXPCS_RST BIT(7) +#define PCIE_SW_XFI_RXPCS_RST BIT(8) +#define PCIE_SW_XFI_RXPCS_BIST_RST BIT(9) +#define PCIE_SW_HSG_TXPCS_RST BIT(10) +#define PCIE_SW_HSG_RXPCS_RST BIT(11) +#define PCIE_PMA_SW_RST (PCIE_SW_RX_FIFO_RST | \ + PCIE_SW_RX_RST | \ + PCIE_SW_TX_RST | \ + PCIE_SW_PMA_RST | \ + PCIE_SW_ALLPCS_RST | \ + PCIE_SW_REF_RST | \ + PCIE_SW_TX_FIFO_RST | \ + PCIE_SW_XFI_TXPCS_RST | \ + PCIE_SW_XFI_RXPCS_RST | \ + PCIE_SW_XFI_RXPCS_BIST_RST | \ + PCIE_SW_HSG_TXPCS_RST | \ + PCIE_SW_HSG_RXPCS_RST) + +#define REG_PCIE_PMA_RO_RX_FREQDET 0x0530 +#define PCIE_RO_FBCK_LOCK BIT(0) +#define PCIE_RO_FL_OUT GENMASK(31, 16) + +#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC 0x0794 +#define PCIE_FORCE_DA_PXP_CDR_PR_IDAC GENMASK(10, 0) +#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC BIT(16) +#define PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW BIT(24) + +#define REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW 0x0798 +#define PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW GENMASK(30, 0) + +#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS 0x079c +#define PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW BIT(16) + +#define REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW 0x0800 +#define PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW GENMASK(30, 0) + +#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB 0x081c +#define PCIE_FORCE_DA_PXP_CDR_PD_PWDB BIT(0) +#define PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB BIT(8) + +#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C 0x0820 +#define PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN BIT(0) +#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN BIT(8) +#define PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN BIT(16) +#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN BIT(24) + +#define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB 0x0824 +#define PCIE_FORCE_DA_PXP_CDR_PR_PWDB BIT(16) +#define PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB BIT(24) + +#define REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT 0x0828 +#define PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN BIT(0) +#define PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN BIT(8) +#define PCIE_FORCE_DA_PXP_JCPLL_EN BIT(16) +#define PCIE_FORCE_SEL_DA_PXP_JCPLL_EN BIT(24) + +#define REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST 0x0084c +#define PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB BIT(16) +#define PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB BIT(24) + +#define REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT 0x0854 +#define PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN BIT(0) +#define PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN BIT(8) +#define PCIE_FORCE_DA_PXP_TXPLL_EN BIT(16) +#define PCIE_FORCE_SEL_DA_PXP_TXPLL_EN BIT(24) + +#define REG_PCIE_PMA_SCAN_MODE 0x0884 +#define PCIE_FORCE_DA_PXP_JCPLL_KBAND_LOAD_EN BIT(0) +#define PCIE_FORCE_SEL_DA_PXP_JCPLL_KBAND_LOAD_EN BIT(8) + +#define REG_PCIE_PMA_DIG_RESERVE_13 0x08bc +#define PCIE_FLL_IDAC_PCIEG1 GENMASK(10, 0) +#define PCIE_FLL_IDAC_PCIEG2 GENMASK(26, 16) + +#define REG_PCIE_PMA_DIG_RESERVE_14 0x08c0 +#define PCIE_FLL_IDAC_PCIEG3 GENMASK(10, 0) +#define PCIE_FLL_LOAD_EN BIT(16) + +#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL 0x088c +#define PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL GENMASK(1, 0) +#define PCIE_FORCE_SEL_DA_PXP_RX_FE_GAIN_CTRL BIT(8) + +#define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB 0x0894 +#define PCIE_FORCE_DA_PXP_RX_FE_PWDB BIT(0) +#define PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB BIT(8) + +#define REG_PCIE_PMA_DIG_RESERVE_12 0x08b8 +#define PCIE_FORCE_PMA_RX_SPEED GENMASK(7, 4) +#define PCIE_FORCE_SEL_PMA_RX_SPEED BIT(7) + +#define REG_PCIE_PMA_DIG_RESERVE_17 0x08e0 + +#define REG_PCIE_PMA_DIG_RESERVE_18 0x08e4 +#define PCIE_PXP_RX_VTH_SEL_PCIE_G1 GENMASK(4, 0) +#define PCIE_PXP_RX_VTH_SEL_PCIE_G2 GENMASK(12, 8) +#define PCIE_PXP_RX_VTH_SEL_PCIE_G3 GENMASK(20, 16) + +#define REG_PCIE_PMA_DIG_RESERVE_19 0x08e8 +#define PCIE_PCP_RX_REV0_PCIE_GEN1 GENMASK(31, 16) + +#define REG_PCIE_PMA_DIG_RESERVE_20 0x08ec +#define PCIE_PCP_RX_REV0_PCIE_GEN2 GENMASK(15, 0) +#define PCIE_PCP_RX_REV0_PCIE_GEN3 GENMASK(31, 16) + +#define REG_PCIE_PMA_DIG_RESERVE_21 0x08f0 +#define REG_PCIE_PMA_DIG_RESERVE_22 0x08f4 +#define REG_PCIE_PMA_DIG_RESERVE_27 0x0908 +#define REG_PCIE_PMA_DIG_RESERVE_30 0x0914 + +/* DTIME */ +#define REG_PCIE_PEXTP_DIG_GLB44 0x00 +#define PCIE_XTP_RXDET_VCM_OFF_STB_T_SEL GENMASK(7, 0) +#define PCIE_XTP_RXDET_EN_STB_T_SEL GENMASK(15, 8) +#define PCIE_XTP_RXDET_FINISH_STB_T_SEL GENMASK(23, 16) +#define PCIE_XTP_TXPD_TX_DATA_EN_DLY GENMASK(27, 24) +#define PCIE_XTP_TXPD_RXDET_DONE_CDT BIT(28) +#define PCIE_XTP_RXDET_LATCH_STB_T_SEL GENMASK(31, 29) + +/* RX AEQ */ +#define REG_PCIE_PEXTP_DIG_LN_RX30_P0 0x0000 +#define PCIE_XTP_LN_RX_PDOWN_L1P2_EXIT_WAIT GENMASK(7, 0) +#define PCIE_XTP_LN_RX_PDOWN_T2RLB_DIG_EN BIT(8) +#define PCIE_XTP_LN_RX_PDOWN_E0_AEQEN_WAIT GENMASK(31, 16) + +#define REG_PCIE_PEXTP_DIG_LN_RX30_P1 0x0100 + +#endif /* _PHY_AIROHA_PCIE_H */ diff --git a/drivers/phy/phy-airoha-pcie.c b/drivers/phy/phy-airoha-pcie.c new file mode 100644 index 000000000000..bd3edaa986c8 --- /dev/null +++ b/drivers/phy/phy-airoha-pcie.c @@ -0,0 +1,1286 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 AIROHA Inc + * Author: Lorenzo Bianconi <lorenzo@kernel.org> + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "phy-airoha-pcie-regs.h" + +#define LEQ_LEN_CTRL_MAX_VAL 7 +#define FREQ_LOCK_MAX_ATTEMPT 10 + +enum airoha_pcie_port_gen { + PCIE_PORT_GEN1 = 1, + PCIE_PORT_GEN2, + PCIE_PORT_GEN3, +}; + +/** + * struct airoha_pcie_phy - PCIe phy driver main structure + * @dev: pointer to device + * @phy: pointer to generic phy + * @csr_2l: Analogic lane IO mapped register base address + * @pma0: IO mapped register base address of PMA0-PCIe + * @pma1: IO mapped register base address of PMA1-PCIe + * @p0_xr_dtime: IO mapped register base address of port0 Tx-Rx detection time + * @p1_xr_dtime: IO mapped register base address of port1 Tx-Rx detection time + * @rx_aeq: IO mapped register base address of Rx AEQ training + */ +struct airoha_pcie_phy { + struct device *dev; + struct phy *phy; + void __iomem *csr_2l; + void __iomem *pma0; + void __iomem *pma1; + void __iomem *p0_xr_dtime; + void __iomem *p1_xr_dtime; + void __iomem *rx_aeq; +}; + +static void airoha_phy_clear_bits(void __iomem *reg, u32 mask) +{ + u32 val = readl(reg) & ~mask; + + writel(val, reg); +} + +static void airoha_phy_set_bits(void __iomem *reg, u32 mask) +{ + u32 val = readl(reg) | mask; + + writel(val, reg); +} + +static void airoha_phy_update_bits(void __iomem *reg, u32 mask, u32 val) +{ + u32 tmp = readl(reg); + + tmp &= ~mask; + tmp |= val & mask; + writel(tmp, reg); +} + +#define airoha_phy_update_field(reg, mask, val) \ + do { \ + BUILD_BUG_ON_MSG(!__builtin_constant_p((mask)), \ + "mask is not constant"); \ + airoha_phy_update_bits((reg), (mask), \ + FIELD_PREP((mask), (val))); \ + } while (0) + +#define airoha_phy_csr_2l_clear_bits(pcie_phy, reg, mask) \ + airoha_phy_clear_bits((pcie_phy)->csr_2l + (reg), (mask)) +#define airoha_phy_csr_2l_set_bits(pcie_phy, reg, mask) \ + airoha_phy_set_bits((pcie_phy)->csr_2l + (reg), (mask)) +#define airoha_phy_csr_2l_update_field(pcie_phy, reg, mask, val) \ + airoha_phy_update_field((pcie_phy)->csr_2l + (reg), (mask), (val)) +#define airoha_phy_pma0_clear_bits(pcie_phy, reg, mask) \ + airoha_phy_clear_bits((pcie_phy)->pma0 + (reg), (mask)) +#define airoha_phy_pma1_clear_bits(pcie_phy, reg, mask) \ + airoha_phy_clear_bits((pcie_phy)->pma1 + (reg), (mask)) +#define airoha_phy_pma0_set_bits(pcie_phy, reg, mask) \ + airoha_phy_set_bits((pcie_phy)->pma0 + (reg), (mask)) +#define airoha_phy_pma1_set_bits(pcie_phy, reg, mask) \ + airoha_phy_set_bits((pcie_phy)->pma1 + (reg), (mask)) +#define airoha_phy_pma0_update_field(pcie_phy, reg, mask, val) \ + airoha_phy_update_field((pcie_phy)->pma0 + (reg), (mask), (val)) +#define airoha_phy_pma1_update_field(pcie_phy, reg, mask, val) \ + airoha_phy_update_field((pcie_phy)->pma1 + (reg), (mask), (val)) + +static void +airoha_phy_init_lane0_rx_fw_pre_calib(struct airoha_pcie_phy *pcie_phy, + enum airoha_pcie_port_gen gen) +{ + u32 fl_out_target = gen == PCIE_PORT_GEN3 ? 41600 : 41941; + u32 lock_cyclecnt = gen == PCIE_PORT_GEN3 ? 26000 : 32767; + u32 pr_idac, val, cdr_pr_idac_tmp = 0; + int i; + + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1, + PCIE_LCPLL_MAN_PWDB); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2, + PCIE_LOCK_TARGET_BEG, + fl_out_target - 100); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2, + PCIE_LOCK_TARGET_END, + fl_out_target + 100); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1, + PCIE_PLL_FT_LOCK_CYCLECNT, lock_cyclecnt); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_LOCK_LOCKTH, 0x3); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3, + PCIE_UNLOCK_TARGET_BEG, + fl_out_target - 100); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3, + PCIE_UNLOCK_TARGET_END, + fl_out_target + 100); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1, + PCIE_PLL_FT_UNLOCK_CYCLECNT, + lock_cyclecnt); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_UNLOCK_LOCKTH, 0x3); + + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_INJ_MODE, + CSR_2L_PXP_CDR0_INJ_FORCE_OFF); + + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN); + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN); + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN); + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN); + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC); + + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB); + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, + PCIE_FORCE_DA_PXP_CDR_PR_PWDB); + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, + PCIE_FORCE_DA_PXP_CDR_PR_PWDB); + + for (i = 0; i < LEQ_LEN_CTRL_MAX_VAL; i++) { + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, i << 8); + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN); + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN, 0x3); + + usleep_range(10000, 15000); + + val = FIELD_GET(PCIE_RO_FL_OUT, + readl(pcie_phy->pma0 + + REG_PCIE_PMA_RO_RX_FREQDET)); + if (val > fl_out_target) + cdr_pr_idac_tmp = i << 8; + } + + for (i = LEQ_LEN_CTRL_MAX_VAL; i >= 0; i--) { + pr_idac = cdr_pr_idac_tmp | (0x1 << i); + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, pr_idac); + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN); + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN, 0x3); + + usleep_range(10000, 15000); + + val = FIELD_GET(PCIE_RO_FL_OUT, + readl(pcie_phy->pma0 + + REG_PCIE_PMA_RO_RX_FREQDET)); + if (val < fl_out_target) + pr_idac &= ~(0x1 << i); + + cdr_pr_idac_tmp = pr_idac; + } + + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, + cdr_pr_idac_tmp); + + for (i = 0; i < FREQ_LOCK_MAX_ATTEMPT; i++) { + u32 val; + + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN); + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN, 0x3); + + usleep_range(10000, 15000); + + val = readl(pcie_phy->pma0 + REG_PCIE_PMA_RO_RX_FREQDET); + if (val & PCIE_RO_FBCK_LOCK) + break; + } + + /* turn off force mode and update band values */ + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_INJ_MODE, + CSR_2L_PXP_CDR0_INJ_FORCE_OFF); + + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN); + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN); + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB); + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC); + if (gen == PCIE_PORT_GEN3) { + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_DIG_RESERVE_14, + PCIE_FLL_IDAC_PCIEG3, + cdr_pr_idac_tmp); + } else { + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_DIG_RESERVE_13, + PCIE_FLL_IDAC_PCIEG1, + cdr_pr_idac_tmp); + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_DIG_RESERVE_13, + PCIE_FLL_IDAC_PCIEG2, + cdr_pr_idac_tmp); + } +} + +static void +airoha_phy_init_lane1_rx_fw_pre_calib(struct airoha_pcie_phy *pcie_phy, + enum airoha_pcie_port_gen gen) +{ + u32 fl_out_target = gen == PCIE_PORT_GEN3 ? 41600 : 41941; + u32 lock_cyclecnt = gen == PCIE_PORT_GEN3 ? 26000 : 32767; + u32 pr_idac, val, cdr_pr_idac_tmp = 0; + int i; + + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1, + PCIE_LCPLL_MAN_PWDB); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2, + PCIE_LOCK_TARGET_BEG, + fl_out_target - 100); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2, + PCIE_LOCK_TARGET_END, + fl_out_target + 100); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1, + PCIE_PLL_FT_LOCK_CYCLECNT, lock_cyclecnt); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_LOCK_LOCKTH, 0x3); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3, + PCIE_UNLOCK_TARGET_BEG, + fl_out_target - 100); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3, + PCIE_UNLOCK_TARGET_END, + fl_out_target + 100); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1, + PCIE_PLL_FT_UNLOCK_CYCLECNT, + lock_cyclecnt); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_UNLOCK_LOCKTH, 0x3); + + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_INJ_MODE, + CSR_2L_PXP_CDR1_INJ_FORCE_OFF); + + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN); + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB); + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, + PCIE_FORCE_DA_PXP_CDR_PR_PWDB); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, + PCIE_FORCE_DA_PXP_CDR_PR_PWDB); + + for (i = 0; i < LEQ_LEN_CTRL_MAX_VAL; i++) { + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, i << 8); + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN); + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN, 0x3); + + usleep_range(10000, 15000); + + val = FIELD_GET(PCIE_RO_FL_OUT, + readl(pcie_phy->pma1 + + REG_PCIE_PMA_RO_RX_FREQDET)); + if (val > fl_out_target) + cdr_pr_idac_tmp = i << 8; + } + + for (i = LEQ_LEN_CTRL_MAX_VAL; i >= 0; i--) { + pr_idac = cdr_pr_idac_tmp | (0x1 << i); + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, pr_idac); + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN); + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN, 0x3); + + usleep_range(10000, 15000); + + val = FIELD_GET(PCIE_RO_FL_OUT, + readl(pcie_phy->pma1 + + REG_PCIE_PMA_RO_RX_FREQDET)); + if (val < fl_out_target) + pr_idac &= ~(0x1 << i); + + cdr_pr_idac_tmp = pr_idac; + } + + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, + cdr_pr_idac_tmp); + + for (i = 0; i < FREQ_LOCK_MAX_ATTEMPT; i++) { + u32 val; + + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN); + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_SS_RX_FREQ_DET4, + PCIE_FREQLOCK_DET_EN, 0x3); + + usleep_range(10000, 15000); + + val = readl(pcie_phy->pma1 + REG_PCIE_PMA_RO_RX_FREQDET); + if (val & PCIE_RO_FBCK_LOCK) + break; + } + + /* turn off force mode and update band values */ + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_INJ_MODE, + CSR_2L_PXP_CDR1_INJ_FORCE_OFF); + + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN); + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN); + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB); + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC); + if (gen == PCIE_PORT_GEN3) { + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_DIG_RESERVE_14, + PCIE_FLL_IDAC_PCIEG3, + cdr_pr_idac_tmp); + } else { + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_DIG_RESERVE_13, + PCIE_FLL_IDAC_PCIEG1, + cdr_pr_idac_tmp); + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_DIG_RESERVE_13, + PCIE_FLL_IDAC_PCIEG2, + cdr_pr_idac_tmp); + } +} + +static void airoha_pcie_phy_init_default(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CMN, + CSR_2L_PXP_CMN_TRIM_MASK, 0x10); + writel(0xcccbcccb, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_21); + writel(0xcccb, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_22); + writel(0xcccbcccb, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_21); + writel(0xcccb, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_22); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CMN, + CSR_2L_PXP_CMN_LANE_EN); +} + +static void airoha_pcie_phy_init_clk_out(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_TXPLL_POSTDIV_D256, + CSR_2L_PXP_CLKTX0_AMP, 0x5); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_CLKTX0_FORCE_OUT1, + CSR_2L_PXP_CLKTX1_AMP, 0x5); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_TXPLL_POSTDIV_D256, + CSR_2L_PXP_CLKTX0_OFFSET, 0x2); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET, + CSR_2L_PXP_CLKTX1_OFFSET, 0x2); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX0_FORCE_OUT1, + CSR_2L_PXP_CLKTX0_HZ); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET, + CSR_2L_PXP_CLKTX1_HZ); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_CLKTX0_FORCE_OUT1, + CSR_2L_PXP_CLKTX0_IMP_SEL, 0x12); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CLKTX1_IMP_SEL, + CSR_2L_PXP_CLKTX1_IMP_SEL, 0x12); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV_D256, + CSR_2L_PXP_CLKTX0_SR); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET, + CSR_2L_PXP_CLKTX1_SR); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_PLL_CMN_RESERVE0, + CSR_2L_PXP_PLL_RESERVE_MASK, 0xdd); +} + +static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, + PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST | + PCIE_SW_RX_RST); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, + PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST | + PCIE_SW_RX_RST); + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET, + PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET, + PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET); +} + +static void airoha_pcie_phy_init_rx(struct airoha_pcie_phy *pcie_phy) +{ + writel(0x2a00090b, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_17); + writel(0x2a00090b, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_17); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_MONPI, + CSR_2L_PXP_CDR0_PR_XFICK_EN); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_MONPI, + CSR_2L_PXP_CDR1_PR_XFICK_EN); + airoha_phy_csr_2l_clear_bits(pcie_phy, + REG_CSR_2L_CDR0_PD_PICAL_CKD8_INV, + CSR_2L_PXP_CDR0_PD_EDGE_DISABLE); + airoha_phy_csr_2l_clear_bits(pcie_phy, + REG_CSR_2L_CDR1_PD_PICAL_CKD8_INV, + CSR_2L_PXP_CDR1_PD_EDGE_DISABLE); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_PHYCK_DIV, + CSR_2L_PXP_RX0_PHYCK_SEL, 0x1); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_PHYCK_DIV, + CSR_2L_PXP_RX1_PHYCK_SEL, 0x1); +} + +static void airoha_pcie_phy_init_jcpll(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_JCPLL_EN); + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_DA_PXP_JCPLL_EN); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_JCPLL_EN); + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_DA_PXP_JCPLL_EN); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_TCL_VTP_EN, + CSR_2L_PXP_JCPLL_SPARE_LOW, 0x20); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY, + CSR_2L_PXP_JCPLL_RST); + writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_JCPLL_SSC_DELTA1); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC_PERIOD, + CSR_2L_PXP_JCPLL_SSC_PERIOD); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, + CSR_2L_PXP_JCPLL_SSC_PHASE_INI); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, + CSR_2L_PXP_JCPLL_SSC_TRI_EN); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR, + CSR_2L_PXP_JCPLL_LPF_BR, 0xa); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR, + CSR_2L_PXP_JCPLL_LPF_BP, 0xc); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR, + CSR_2L_PXP_JCPLL_LPF_BC, 0x1f); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC, + CSR_2L_PXP_JCPLL_LPF_BWC, 0x1e); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR, + CSR_2L_PXP_JCPLL_LPF_BWR, 0xa); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_JCPLL_MMD_PREDIV_MODE, + CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE, + 0x1); + airoha_phy_csr_2l_clear_bits(pcie_phy, CSR_2L_PXP_JCPLL_MONCK, + CSR_2L_PXP_JCPLL_REFIN_DIV); + + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS, + PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS, + PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW); + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW, + PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW, + 0x50000000); + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW, + PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW, + 0x50000000); + + airoha_phy_csr_2l_set_bits(pcie_phy, + REG_CSR_2L_JCPLL_MMD_PREDIV_MODE, + CSR_2L_PXP_JCPLL_POSTDIV_D5); + airoha_phy_csr_2l_set_bits(pcie_phy, + REG_CSR_2L_JCPLL_MMD_PREDIV_MODE, + CSR_2L_PXP_JCPLL_POSTDIV_D2); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY, + CSR_2L_PXP_JCPLL_RST_DLY, 0x4); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY, + CSR_2L_PXP_JCPLL_SDM_DI_LS); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_TCL_KBAND_VREF, + CSR_2L_PXP_JCPLL_VCO_KBAND_MEAS_EN); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT, + CSR_2L_PXP_JCPLL_CHP_IOFST); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT, + CSR_2L_PXP_JCPLL_CHP_IBIAS, 0xc); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_JCPLL_MMD_PREDIV_MODE, + CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE, + 0x1); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_VCODIV, + CSR_2L_PXP_JCPLL_VCO_HALFLSB_EN); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCODIV, + CSR_2L_PXP_JCPLL_VCO_CFIX, 0x1); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCODIV, + CSR_2L_PXP_JCPLL_VCO_SCAPWR, 0x4); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT, + REG_CSR_2L_JCPLL_LPF_SHCK_EN); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC, + CSR_2L_PXP_JCPLL_POSTDIV_EN); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC, + CSR_2L_PXP_JCPLL_KBAND_KFC); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC, + CSR_2L_PXP_JCPLL_KBAND_KF, 0x3); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC, + CSR_2L_PXP_JCPLL_KBAND_KS); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC, + CSR_2L_PXP_JCPLL_KBAND_DIV, 0x1); + + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SCAN_MODE, + PCIE_FORCE_SEL_DA_PXP_JCPLL_KBAND_LOAD_EN); + airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SCAN_MODE, + PCIE_FORCE_DA_PXP_JCPLL_KBAND_LOAD_EN); + + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC, + CSR_2L_PXP_JCPLL_KBAND_CODE, 0xe4); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN, + CSR_2L_PXP_JCPLL_TCL_AMP_EN); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_TCL_CMP, + CSR_2L_PXP_JCPLL_TCL_LPF_EN); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_JCPLL_TCL_KBAND_VREF, + CSR_2L_PXP_JCPLL_TCL_KBAND_VREF, 0xf); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN, + CSR_2L_PXP_JCPLL_TCL_AMP_GAIN, 0x1); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN, + CSR_2L_PXP_JCPLL_TCL_AMP_VREF, 0x5); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_TCL_CMP, + CSR_2L_PXP_JCPLL_TCL_LPF_BW, 0x1); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCO_TCLVAR, + CSR_2L_PXP_JCPLL_VCO_TCLVAR, 0x3); + + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN); + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN); + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_JCPLL_EN); + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_DA_PXP_JCPLL_EN); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_JCPLL_EN); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, + PCIE_FORCE_DA_PXP_JCPLL_EN); +} + +static void airoha_pcie_phy_txpll(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_TXPLL_EN); + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_DA_PXP_TXPLL_EN); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_TXPLL_EN); + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_DA_PXP_TXPLL_EN); + + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV, + CSR_2L_PXP_TXPLL_PLL_RSTB); + writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_TXPLL_SSC_DELTA1); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC_PERIOD, + CSR_2L_PXP_txpll_SSC_PERIOD); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST, + CSR_2L_PXP_TXPLL_CHP_IOFST, 0x1); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_750M_SYS_CK, + CSR_2L_PXP_TXPLL_CHP_IBIAS, 0x2d); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV, + CSR_2L_PXP_TXPLL_REFIN_DIV); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW, + CSR_2L_PXP_TXPLL_VCO_CFIX, 0x3); + + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW); + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW, + PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW, + 0xc800000); + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW, + PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW, + 0xc800000); + + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS, + CSR_2L_PXP_TXPLL_SDM_IFM); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC, + CSR_2L_PXP_TXPLL_SSC_PHASE_INI); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV, + CSR_2L_PXP_TXPLL_RST_DLY, 0x4); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS, + CSR_2L_PXP_TXPLL_SDM_DI_LS); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS, + CSR_2L_PXP_TXPLL_SDM_ORD, 0x3); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_KBAND_VREF, + CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN); + writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_TXPLL_SSC_DELTA1); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST, + CSR_2L_PXP_TXPLL_LPF_BP, 0x1); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST, + CSR_2L_PXP_TXPLL_LPF_BC, 0x18); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST, + CSR_2L_PXP_TXPLL_LPF_BR, 0x5); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST, + CSR_2L_PXP_TXPLL_CHP_IOFST, 0x1); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_750M_SYS_CK, + CSR_2L_PXP_TXPLL_CHP_IBIAS, 0x2d); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_VTP, + CSR_2L_PXP_TXPLL_SPARE_L, 0x1); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR, + CSR_2L_PXP_TXPLL_LPF_BWC); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV, + CSR_2L_PXP_TXPLL_MMD_PREDIV_MODE); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV, + CSR_2L_PXP_TXPLL_REFIN_DIV); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW, + CSR_2L_PXP_TXPLL_VCO_HALFLSB_EN); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_VCO_SCAPWR, + CSR_2L_PXP_TXPLL_VCO_SCAPWR, 0x7); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW, + CSR_2L_PXP_TXPLL_VCO_CFIX, 0x3); + + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, + PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW); + + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC, + CSR_2L_PXP_TXPLL_SSC_PHASE_INI); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR, + CSR_2L_PXP_TXPLL_LPF_BWR); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_PHY_CK2, + CSR_2L_PXP_TXPLL_REFIN_INTERNAL); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_KBAND_VREF, + CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_VTP, + CSR_2L_PXP_TXPLL_VTP_EN); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV, + CSR_2L_PXP_TXPLL_PHY_CK1_EN); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_PHY_CK2, + CSR_2L_PXP_TXPLL_REFIN_INTERNAL); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC, + CSR_2L_PXP_TXPLL_SSC_EN); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_750M_SYS_CK, + CSR_2L_PXP_TXPLL_LPF_SHCK_EN); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV, + CSR_2L_PXP_TXPLL_POSTDIV_EN); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV, + CSR_2L_PXP_TXPLL_KBAND_KFC); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV, + CSR_2L_PXP_TXPLL_KBAND_KF, 0x3); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV, + CSR_2L_PXP_txpll_KBAND_KS, 0x1); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV, + CSR_2L_PXP_TXPLL_KBAND_DIV, 0x4); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR, + CSR_2L_PXP_TXPLL_KBAND_CODE, 0xe4); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_OUT, + CSR_2L_PXP_TXPLL_TCL_AMP_EN); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_AMP_VREF, + CSR_2L_PXP_TXPLL_TCL_LPF_EN); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_TXPLL_TCL_KBAND_VREF, + CSR_2L_PXP_TXPLL_TCL_KBAND_VREF, 0xf); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_SDM_OUT, + CSR_2L_PXP_TXPLL_TCL_AMP_GAIN, 0x3); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_TXPLL_TCL_AMP_VREF, + CSR_2L_PXP_TXPLL_TCL_AMP_VREF, 0xb); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW, + CSR_2L_PXP_TXPLL_TCL_LPF_BW, 0x3); + + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN); + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN); + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_TXPLL_EN); + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_DA_PXP_TXPLL_EN); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_SEL_DA_PXP_TXPLL_EN); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, + PCIE_FORCE_DA_PXP_TXPLL_EN); +} + +static void airoha_pcie_phy_init_ssc_jcpll(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_DELTA1, + CSR_2L_PXP_JCPLL_SSC_DELTA1, 0x106); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_DELTA1, + CSR_2L_PXP_JCPLL_SSC_DELTA, 0x106); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_PERIOD, + CSR_2L_PXP_JCPLL_SSC_PERIOD, 0x31b); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, + CSR_2L_PXP_JCPLL_SSC_PHASE_INI); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, + CSR_2L_PXP_JCPLL_SSC_EN); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_IFM, + CSR_2L_PXP_JCPLL_SDM_IFM); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN, + REG_CSR_2L_JCPLL_SDM_HREN); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY, + CSR_2L_PXP_JCPLL_SDM_DI_EN); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, + CSR_2L_PXP_JCPLL_SSC_TRI_EN); +} + +static void +airoha_pcie_phy_set_rxlan0_signal_detect(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_COR_HBW, + CSR_2L_PXP_CDR0_PR_LDO_FORCE_ON); + + usleep_range(100, 200); + + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_19, + PCIE_PCP_RX_REV0_PCIE_GEN1, 0x18b0); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20, + PCIE_PCP_RX_REV0_PCIE_GEN2, 0x18b0); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20, + PCIE_PCP_RX_REV0_PCIE_GEN3, 0x1030); + + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_DCTEST, + CSR_2L_PXP_RX0_SIGDET_PEAK, 0x2); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_VTH_SEL, + CSR_2L_PXP_RX0_SIGDET_VTH_SEL, 0x5); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0, + CSR_2L_PXP_VOS_PNINV, 0x2); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_DCTEST, + CSR_2L_PXP_RX0_SIGDET_LPF_CTRL, 0x1); + + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_CAL2, + PCIE_CAL_OUT_OS, 0x0); + + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_PXP_RX0_FE_VB_EQ2, + CSR_2L_PXP_RX0_FE_VCM_GEN_PWDB); + + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL, + PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB); + airoha_phy_pma0_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL, + PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 0x3); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_RX_FORCE_MODE0, + PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL, 0x1); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_SIGDET0, + PCIE_SIGDET_WIN_NONVLD_TIMES, 0x3); + airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SEQUENCE_DISB_CTRL1, + PCIE_DISB_RX_SDCAL_EN); + + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1, + PCIE_FORCE_RX_SDCAL_EN); + usleep_range(150, 200); + airoha_phy_pma0_clear_bits(pcie_phy, + REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1, + PCIE_FORCE_RX_SDCAL_EN); +} + +static void +airoha_pcie_phy_set_rxlan1_signal_detect(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_COR_HBW, + CSR_2L_PXP_CDR1_PR_LDO_FORCE_ON); + + usleep_range(100, 200); + + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_19, + PCIE_PCP_RX_REV0_PCIE_GEN1, 0x18b0); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20, + PCIE_PCP_RX_REV0_PCIE_GEN2, 0x18b0); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20, + PCIE_PCP_RX_REV0_PCIE_GEN3, 0x1030); + + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_SIGDET_NOVTH, + CSR_2L_PXP_RX1_SIGDET_PEAK, 0x2); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_SIGDET_NOVTH, + CSR_2L_PXP_RX1_SIGDET_VTH_SEL, 0x5); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0, + CSR_2L_PXP_VOS_PNINV, 0x2); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_DAC_RANGE_EYE, + CSR_2L_PXP_RX1_SIGDET_LPF_CTRL, 0x1); + + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_CAL2, + PCIE_CAL_OUT_OS, 0x0); + + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_FE_VB_EQ1, + CSR_2L_PXP_RX1_FE_VCM_GEN_PWDB); + + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL, + PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB); + airoha_phy_pma1_update_field(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL, + PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 0x3); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_RX_FORCE_MODE0, + PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL, 0x1); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_SIGDET0, + PCIE_SIGDET_WIN_NONVLD_TIMES, 0x3); + airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SEQUENCE_DISB_CTRL1, + PCIE_DISB_RX_SDCAL_EN); + + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1, + PCIE_FORCE_RX_SDCAL_EN); + usleep_range(150, 200); + airoha_phy_pma1_clear_bits(pcie_phy, + REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1, + PCIE_FORCE_RX_SDCAL_EN); +} + +static void airoha_pcie_phy_set_rxflow(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST, + PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB | + PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST, + PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB | + PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB); + + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB, + PCIE_FORCE_DA_PXP_CDR_PD_PWDB | + PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB); + airoha_phy_pma0_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB, + PCIE_FORCE_DA_PXP_RX_FE_PWDB | + PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB, + PCIE_FORCE_DA_PXP_CDR_PD_PWDB | + PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB); + airoha_phy_pma1_set_bits(pcie_phy, + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB, + PCIE_FORCE_DA_PXP_RX_FE_PWDB | + PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB); + + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX0_PHYCK_DIV, + CSR_2L_PXP_RX0_PHYCK_RSTB | + CSR_2L_PXP_RX0_TDC_CK_SEL); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_PHYCK_DIV, + CSR_2L_PXP_RX1_PHYCK_RSTB | + CSR_2L_PXP_RX1_TDC_CK_SEL); + + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, + PCIE_SW_RX_FIFO_RST | PCIE_SW_TX_RST | + PCIE_SW_PMA_RST | PCIE_SW_ALLPCS_RST | + PCIE_SW_TX_FIFO_RST); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, + PCIE_SW_RX_FIFO_RST | PCIE_SW_TX_RST | + PCIE_SW_PMA_RST | PCIE_SW_ALLPCS_RST | + PCIE_SW_TX_FIFO_RST); + + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_PXP_RX0_FE_VB_EQ2, + CSR_2L_PXP_RX0_FE_VB_EQ2_EN | + CSR_2L_PXP_RX0_FE_VB_EQ3_EN); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX0_SIGDET_VTH_SEL, + CSR_2L_PXP_RX0_FE_VB_EQ1_EN); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_FE_VB_EQ1, + CSR_2L_PXP_RX1_FE_VB_EQ1_EN | + CSR_2L_PXP_RX1_FE_VB_EQ2_EN | + CSR_2L_PXP_RX1_FE_VB_EQ3_EN); + + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0, + CSR_2L_PXP_FE_GAIN_NORMAL_MODE, 0x4); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0, + CSR_2L_PXP_FE_GAIN_TRAIN_MODE, 0x4); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0, + CSR_2L_PXP_FE_GAIN_NORMAL_MODE, 0x4); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0, + CSR_2L_PXP_FE_GAIN_TRAIN_MODE, 0x4); +} + +static void airoha_pcie_phy_set_pr(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_VREG_IBAND, + CSR_2L_PXP_CDR0_PR_VREG_IBAND, 0x5); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_VREG_IBAND, + CSR_2L_PXP_CDR0_PR_VREG_CKBUF, 0x5); + + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_CKREF_DIV, + CSR_2L_PXP_CDR0_PR_CKREF_DIV); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_COR_HBW, + CSR_2L_PXP_CDR0_PR_CKREF_DIV1); + + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL, + CSR_2L_PXP_CDR1_PR_VREG_IBAND, 0x5); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL, + CSR_2L_PXP_CDR1_PR_VREG_CKBUF, 0x5); + + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_CKREF_DIV, + CSR_2L_PXP_CDR1_PR_CKREF_DIV); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_COR_HBW, + CSR_2L_PXP_CDR1_PR_CKREF_DIV1); + + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_LPF_RATIO, + CSR_2L_PXP_CDR0_LPF_TOP_LIM, 0x20000); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_LPF_RATIO, + CSR_2L_PXP_CDR1_LPF_TOP_LIM, 0x20000); + + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_BETA_DAC, + CSR_2L_PXP_CDR0_PR_BETA_SEL, 0x2); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_BETA_DAC, + CSR_2L_PXP_CDR1_PR_BETA_SEL, 0x2); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_BETA_DAC, + CSR_2L_PXP_CDR0_PR_KBAND_DIV, 0x4); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_BETA_DAC, + CSR_2L_PXP_CDR1_PR_KBAND_DIV, 0x4); +} + +static void airoha_pcie_phy_set_txflow(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX0_CKLDO, + CSR_2L_PXP_TX0_CKLDO_EN); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX1_CKLDO, + CSR_2L_PXP_TX1_CKLDO_EN); + + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX0_CKLDO, + CSR_2L_PXP_TX0_DMEDGEGEN_EN); + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX1_CKLDO, + CSR_2L_PXP_TX1_DMEDGEGEN_EN); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TX1_MULTLANE, + CSR_2L_PXP_TX1_MULTLANE_EN); +} + +static void airoha_pcie_phy_set_rx_mode(struct airoha_pcie_phy *pcie_phy) +{ + writel(0x804000, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_27); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, + PCIE_PXP_RX_VTH_SEL_PCIE_G1, 0x5); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, + PCIE_PXP_RX_VTH_SEL_PCIE_G2, 0x5); + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, + PCIE_PXP_RX_VTH_SEL_PCIE_G3, 0x5); + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_30, + 0x77700); + + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_MONCK, + CSR_2L_PXP_CDR0_PR_MONCK_ENABLE); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_MONCK, + CSR_2L_PXP_CDR0_PR_RESERVE0, 0x2); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_PXP_RX0_OSCAL_CTLE1IOS, + CSR_2L_PXP_RX0_PR_OSCAL_VGA1IOS, 0x19); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS, + CSR_2L_PXP_RX0_PR_OSCAL_VGA1VOS, 0x19); + airoha_phy_csr_2l_update_field(pcie_phy, + REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS, + CSR_2L_PXP_RX0_PR_OSCAL_VGA2IOS, 0x14); + + writel(0x804000, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_27); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, + PCIE_PXP_RX_VTH_SEL_PCIE_G1, 0x5); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, + PCIE_PXP_RX_VTH_SEL_PCIE_G2, 0x5); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, + PCIE_PXP_RX_VTH_SEL_PCIE_G3, 0x5); + + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_30, + 0x77700); + + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_MONCK, + CSR_2L_PXP_CDR1_PR_MONCK_ENABLE); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_MONCK, + CSR_2L_PXP_CDR1_PR_RESERVE0, 0x2); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS, + CSR_2L_PXP_RX1_PR_OSCAL_VGA1IOS, 0x19); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS, + CSR_2L_PXP_RX1_PR_OSCAL_VGA1VOS, 0x19); + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS, + CSR_2L_PXP_RX1_PR_OSCAL_VGA2IOS, 0x14); +} + +static void airoha_pcie_phy_load_kflow(struct airoha_pcie_phy *pcie_phy) +{ + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12, + PCIE_FORCE_PMA_RX_SPEED, 0xa); + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12, + PCIE_FORCE_PMA_RX_SPEED, 0xa); + airoha_phy_init_lane0_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN3); + airoha_phy_init_lane1_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN3); + + airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12, + PCIE_FORCE_PMA_RX_SPEED); + airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12, + PCIE_FORCE_PMA_RX_SPEED); + usleep_range(100, 200); + + airoha_phy_init_lane0_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN2); + airoha_phy_init_lane1_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN2); +} + +/** + * airoha_pcie_phy_init() - Initialize the phy + * @phy: the phy to be initialized + * + * Initialize the phy registers. + * The hardware settings will be reset during suspend, it should be + * reinitialized when the consumer calls phy_init() again on resume. + */ +static int airoha_pcie_phy_init(struct phy *phy) +{ + struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy); + u32 val; + + /* Setup Tx-Rx detection time */ + val = FIELD_PREP(PCIE_XTP_RXDET_VCM_OFF_STB_T_SEL, 0x33) | + FIELD_PREP(PCIE_XTP_RXDET_EN_STB_T_SEL, 0x1) | + FIELD_PREP(PCIE_XTP_RXDET_FINISH_STB_T_SEL, 0x2) | + FIELD_PREP(PCIE_XTP_TXPD_TX_DATA_EN_DLY, 0x3) | + FIELD_PREP(PCIE_XTP_RXDET_LATCH_STB_T_SEL, 0x1); + writel(val, pcie_phy->p0_xr_dtime + REG_PCIE_PEXTP_DIG_GLB44); + writel(val, pcie_phy->p1_xr_dtime + REG_PCIE_PEXTP_DIG_GLB44); + /* Setup Rx AEQ training time */ + val = FIELD_PREP(PCIE_XTP_LN_RX_PDOWN_L1P2_EXIT_WAIT, 0x32) | + FIELD_PREP(PCIE_XTP_LN_RX_PDOWN_E0_AEQEN_WAIT, 0x5050); + writel(val, pcie_phy->rx_aeq + REG_PCIE_PEXTP_DIG_LN_RX30_P0); + writel(val, pcie_phy->rx_aeq + REG_PCIE_PEXTP_DIG_LN_RX30_P1); + + /* enable load FLL-K flow */ + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14, + PCIE_FLL_LOAD_EN); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14, + PCIE_FLL_LOAD_EN); + + airoha_pcie_phy_init_default(pcie_phy); + airoha_pcie_phy_init_clk_out(pcie_phy); + airoha_pcie_phy_init_csr_2l(pcie_phy); + + usleep_range(100, 200); + + airoha_pcie_phy_init_rx(pcie_phy); + /* phase 1, no ssc for K TXPLL */ + airoha_pcie_phy_init_jcpll(pcie_phy); + + usleep_range(500, 600); + + /* TX PLL settings */ + airoha_pcie_phy_txpll(pcie_phy); + + usleep_range(200, 300); + + /* SSC JCPLL setting */ + airoha_pcie_phy_init_ssc_jcpll(pcie_phy); + + usleep_range(100, 200); + + /* Rx lan0 signal detect */ + airoha_pcie_phy_set_rxlan0_signal_detect(pcie_phy); + /* Rx lan1 signal detect */ + airoha_pcie_phy_set_rxlan1_signal_detect(pcie_phy); + /* RX FLOW */ + airoha_pcie_phy_set_rxflow(pcie_phy); + + usleep_range(100, 200); + + airoha_pcie_phy_set_pr(pcie_phy); + /* TX FLOW */ + airoha_pcie_phy_set_txflow(pcie_phy); + + usleep_range(100, 200); + /* RX mode setting */ + airoha_pcie_phy_set_rx_mode(pcie_phy); + /* Load K-Flow */ + airoha_pcie_phy_load_kflow(pcie_phy); + airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0, + PCIE_DA_XPON_CDR_PR_PWDB); + airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0, + PCIE_DA_XPON_CDR_PR_PWDB); + + usleep_range(100, 200); + + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0, + PCIE_DA_XPON_CDR_PR_PWDB); + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0, + PCIE_DA_XPON_CDR_PR_PWDB); + + usleep_range(100, 200); + + return 0; +} + +static int airoha_pcie_phy_exit(struct phy *phy) +{ + struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy); + + airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, + PCIE_PMA_SW_RST); + airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, + PCIE_PMA_SW_RST); + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, + CSR_2L_PXP_JCPLL_SSC_PHASE_INI | + CSR_2L_PXP_JCPLL_SSC_TRI_EN | + CSR_2L_PXP_JCPLL_SSC_EN); + + return 0; +} + +static const struct phy_ops airoha_pcie_phy_ops = { + .init = airoha_pcie_phy_init, + .exit = airoha_pcie_phy_exit, + .owner = THIS_MODULE, +}; + +static int airoha_pcie_phy_probe(struct platform_device *pdev) +{ + struct airoha_pcie_phy *pcie_phy; + struct device *dev = &pdev->dev; + struct phy_provider *provider; + + pcie_phy = devm_kzalloc(dev, sizeof(*pcie_phy), GFP_KERNEL); + if (!pcie_phy) + return -ENOMEM; + + pcie_phy->csr_2l = devm_platform_ioremap_resource_byname(pdev, "csr-2l"); + if (IS_ERR(pcie_phy->csr_2l)) + return dev_err_probe(dev, PTR_ERR(pcie_phy->csr_2l), + "Failed to map phy-csr-2l base\n"); + + pcie_phy->pma0 = devm_platform_ioremap_resource_byname(pdev, "pma0"); + if (IS_ERR(pcie_phy->pma0)) + return dev_err_probe(dev, PTR_ERR(pcie_phy->pma0), + "Failed to map phy-pma0 base\n"); + + pcie_phy->pma1 = devm_platform_ioremap_resource_byname(pdev, "pma1"); + if (IS_ERR(pcie_phy->pma1)) + return dev_err_probe(dev, PTR_ERR(pcie_phy->pma1), + "Failed to map phy-pma1 base\n"); + + pcie_phy->phy = devm_phy_create(dev, dev->of_node, &airoha_pcie_phy_ops); + if (IS_ERR(pcie_phy->phy)) + return dev_err_probe(dev, PTR_ERR(pcie_phy->phy), + "Failed to create PCIe phy\n"); + + pcie_phy->p0_xr_dtime = + devm_platform_ioremap_resource_byname(pdev, "p0-xr-dtime"); + if (IS_ERR(pcie_phy->p0_xr_dtime)) + return dev_err_probe(dev, PTR_ERR(pcie_phy->p0_xr_dtime), + "Failed to map P0 Tx-Rx dtime base\n"); + + pcie_phy->p1_xr_dtime = + devm_platform_ioremap_resource_byname(pdev, "p1-xr-dtime"); + if (IS_ERR(pcie_phy->p1_xr_dtime)) + return dev_err_probe(dev, PTR_ERR(pcie_phy->p1_xr_dtime), + "Failed to map P1 Tx-Rx dtime base\n"); + + pcie_phy->rx_aeq = devm_platform_ioremap_resource_byname(pdev, "rx-aeq"); + if (IS_ERR(pcie_phy->rx_aeq)) + return dev_err_probe(dev, PTR_ERR(pcie_phy->rx_aeq), + "Failed to map Rx AEQ base\n"); + + pcie_phy->dev = dev; + phy_set_drvdata(pcie_phy->phy, pcie_phy); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(provider)) + return dev_err_probe(dev, PTR_ERR(provider), + "PCIe phy probe failed\n"); + + return 0; +} + +static const struct of_device_id airoha_pcie_phy_of_match[] = { + { .compatible = "airoha,en7581-pcie-phy" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, airoha_pcie_phy_of_match); + +static struct platform_driver airoha_pcie_phy_driver = { + .probe = airoha_pcie_phy_probe, + .driver = { + .name = "airoha-pcie-phy", + .of_match_table = airoha_pcie_phy_of_match, + }, +}; +module_platform_driver(airoha_pcie_phy_driver); + +MODULE_DESCRIPTION("Airoha PCIe PHY driver"); +MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index bf6a07590321..f053b525ccff 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -664,7 +664,7 @@ out_unlock: * * Returns the phy driver, after getting a refcount to it; or * -ENODEV if there is no such phy. The caller is responsible for - * calling phy_put() to release that count. + * calling of_phy_put() to release that count. */ struct phy *of_phy_get(struct device_node *np, const char *con_id) { diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c index 6c796723c8f5..5b36cc7ac78b 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c @@ -489,6 +489,243 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_gen3_pcs_misc_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), }; +static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18), + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30), + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21), + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4), + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x20), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x89), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x10), +}; + +static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18), + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30), + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21), + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4), + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07), + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55), + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29), + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90), + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x89), + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x10), +}; + +static const struct qmp_phy_init_tbl ipq9574_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x61), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x73), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xf0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xd3), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), +}; + +static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10), +}; + +static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x14), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x10), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0x0b), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x50), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x1a), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x06), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6, 0x03), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), +}; + +static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), +}; + +static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2, 0x1d), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG1, 0x14), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG1, 0x10), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0b), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_PRESET_P10_PRE, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_PRESET_P10_POST, 0x58), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4, 0x07), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG1, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG4, 0x19), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x49), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x2a), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x02), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6, 0x03), + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), +}; + static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14), QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), @@ -2535,6 +2772,16 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v5 = { .rx2 = 0x1800, }; +static const struct qmp_pcie_offsets qmp_pcie_offsets_ipq9574 = { + .serdes = 0, + .pcs = 0x1000, + .pcs_misc = 0x1400, + .tx = 0x0200, + .rx = 0x0400, + .tx2 = 0x0600, + .rx2 = 0x0800, +}; + static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_20 = { .serdes = 0x1000, .pcs = 0x1200, @@ -2647,6 +2894,62 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = { .phy_status = PHYSTATUS, }; +static const struct qmp_phy_cfg ipq9574_gen3x1_pciephy_cfg = { + .lanes = 1, + + .offsets = &qmp_pcie_offsets_v4x1, + + .tbls = { + .serdes = ipq9574_gen3x1_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_serdes_tbl), + .tx = ipq8074_pcie_gen3_tx_tbl, + .tx_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl), + .rx = ipq9574_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(ipq9574_pcie_rx_tbl), + .pcs = ipq9574_gen3x1_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_pcs_tbl), + .pcs_misc = ipq9574_gen3x1_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_pcs_misc_tbl), + }, + .reset_list = ipq8074_pciephy_reset_l, + .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), + .vreg_list = NULL, + .num_vregs = 0, + .regs = pciephy_v4_regs_layout, + + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, + .pipe_clock_rate = 250000000, +}; + +static const struct qmp_phy_cfg ipq9574_gen3x2_pciephy_cfg = { + .lanes = 2, + + .offsets = &qmp_pcie_offsets_ipq9574, + + .tbls = { + .serdes = ipq9574_gen3x2_pcie_serdes_tbl, + .serdes_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_serdes_tbl), + .tx = ipq8074_pcie_gen3_tx_tbl, + .tx_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl), + .rx = ipq9574_pcie_rx_tbl, + .rx_num = ARRAY_SIZE(ipq9574_pcie_rx_tbl), + .pcs = ipq9574_gen3x2_pcie_pcs_tbl, + .pcs_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_pcs_tbl), + .pcs_misc = ipq9574_gen3x2_pcie_pcs_misc_tbl, + .pcs_misc_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_pcs_misc_tbl), + }, + .reset_list = ipq8074_pciephy_reset_l, + .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), + .vreg_list = NULL, + .num_vregs = 0, + .regs = pciephy_v5_regs_layout, + + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + .phy_status = PHYSTATUS, + .pipe_clock_rate = 250000000, +}; + static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = { .lanes = 1, @@ -3730,14 +4033,11 @@ static int phy_aux_clk_register(struct qmp_pcie *qmp, struct device_node *np) { struct clk_fixed_rate *fixed = &qmp->aux_clk_fixed; struct clk_init_data init = { }; - int ret; + char name[64]; - ret = of_property_read_string_index(np, "clock-output-names", 1, &init.name); - if (ret) { - dev_err(qmp->dev, "%pOFn: No clock-output-names index 1\n", np); - return ret; - } + snprintf(name, sizeof(name), "%s::phy_aux_clk", dev_name(qmp->dev)); + init.name = name; init.ops = &clk_fixed_rate_ops; fixed->fixed_rate = qmp->cfg->aux_clock_rate; @@ -4031,6 +4331,12 @@ static const struct of_device_id qmp_pcie_of_match_table[] = { .compatible = "qcom,ipq8074-qmp-pcie-phy", .data = &ipq8074_pciephy_cfg, }, { + .compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy", + .data = &ipq9574_gen3x1_pciephy_cfg, + }, { + .compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy", + .data = &ipq9574_gen3x2_pciephy_cfg, + }, { .compatible = "qcom,msm8998-qmp-pcie-phy", .data = &msm8998_pciephy_cfg, }, { diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h index a469ae2a10a1..fa15a03055de 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h @@ -11,8 +11,22 @@ #define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2 0x0c #define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4 0x14 #define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20 +#define QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L 0x44 +#define QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H 0x48 +#define QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L 0x4c +#define QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H 0x50 #define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x54 +#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG1 0x5c +#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG2 0x60 +#define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG4 0x68 +#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2 0x7c +#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4 0x84 +#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x88 +#define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6 0x8c #define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS 0x94 +#define QPHY_V5_PCS_PCIE_EQ_CONFIG1 0xa4 #define QPHY_V5_PCS_PCIE_EQ_CONFIG2 0xa8 +#define QPHY_V5_PCS_PCIE_PRESET_P10_PRE 0xc0 +#define QPHY_V5_PCS_PCIE_PRESET_P10_POST 0xe4 #endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h index ad326e301a3a..231e59364e31 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h @@ -8,6 +8,9 @@ /* QMP V2 PHY for PCIE gen3 ports - QSERDES PLL registers */ #define QSERDES_PLL_BG_TIMER 0x00c +#define QSERDES_PLL_SSC_EN_CENTER 0x010 +#define QSERDES_PLL_SSC_ADJ_PER1 0x014 +#define QSERDES_PLL_SSC_ADJ_PER2 0x018 #define QSERDES_PLL_SSC_PER1 0x01c #define QSERDES_PLL_SSC_PER2 0x020 #define QSERDES_PLL_SSC_STEP_SIZE1_MODE0 0x024 diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c index c174463c58a3..9b0eb87b1680 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c @@ -2253,6 +2253,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = { .compatible = "qcom,sa8775p-qmp-usb3-uni-phy", .data = &sa8775p_usb3_uniphy_cfg, }, { + .compatible = "qcom,sc8180x-qmp-usb3-uni-phy", + .data = &sm8150_usb3_uniphy_cfg, + }, { .compatible = "qcom,sc8280xp-qmp-usb3-uni-phy", .data = &sc8280xp_usb3_uniphy_cfg, }, { diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig index 08b0f4345760..490263375057 100644 --- a/drivers/phy/rockchip/Kconfig +++ b/drivers/phy/rockchip/Kconfig @@ -86,7 +86,9 @@ config PHY_ROCKCHIP_PCIE config PHY_ROCKCHIP_SAMSUNG_HDPTX tristate "Rockchip Samsung HDMI/eDP Combo PHY driver" depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF + depends on HAS_IOMEM select GENERIC_PHY + select MFD_SYSCON select RATIONAL help Enable this to support the Rockchip HDMI/eDP Combo PHY diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index 04171eed5b16..df52b78a120b 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -8,6 +8,7 @@ * Author: Vivek Gautam <gautam.vivek@samsung.com> */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> @@ -30,18 +31,16 @@ #define EXYNOS5_FSEL_19MHZ2 0x3 #define EXYNOS5_FSEL_20MHZ 0x4 #define EXYNOS5_FSEL_24MHZ 0x5 -#define EXYNOS5_FSEL_26MHZ 0x82 +#define EXYNOS5_FSEL_26MHZ 0x6 #define EXYNOS5_FSEL_50MHZ 0x7 /* Exynos5: USB 3.0 DRD PHY registers */ #define EXYNOS5_DRD_LINKSYSTEM 0x04 - +#define LINKSYSTEM_XHCI_VERSION_CONTROL BIT(27) #define LINKSYSTEM_FLADJ_MASK (0x3f << 1) #define LINKSYSTEM_FLADJ(_x) ((_x) << 1) -#define LINKSYSTEM_XHCI_VERSION_CONTROL BIT(27) #define EXYNOS5_DRD_PHYUTMI 0x08 - #define PHYUTMI_OTGDISABLE BIT(6) #define PHYUTMI_FORCESUSPEND BIT(1) #define PHYUTMI_FORCESLEEP BIT(0) @@ -49,40 +48,31 @@ #define EXYNOS5_DRD_PHYPIPE 0x0c #define EXYNOS5_DRD_PHYCLKRST 0x10 - #define PHYCLKRST_EN_UTMISUSPEND BIT(31) - #define PHYCLKRST_SSC_REFCLKSEL_MASK (0xff << 23) #define PHYCLKRST_SSC_REFCLKSEL(_x) ((_x) << 23) - #define PHYCLKRST_SSC_RANGE_MASK (0x03 << 21) #define PHYCLKRST_SSC_RANGE(_x) ((_x) << 21) - #define PHYCLKRST_SSC_EN BIT(20) #define PHYCLKRST_REF_SSP_EN BIT(19) #define PHYCLKRST_REF_CLKDIV2 BIT(18) - #define PHYCLKRST_MPLL_MULTIPLIER_MASK (0x7f << 11) #define PHYCLKRST_MPLL_MULTIPLIER_100MHZ_REF (0x19 << 11) #define PHYCLKRST_MPLL_MULTIPLIER_50M_REF (0x32 << 11) #define PHYCLKRST_MPLL_MULTIPLIER_24MHZ_REF (0x68 << 11) #define PHYCLKRST_MPLL_MULTIPLIER_20MHZ_REF (0x7d << 11) #define PHYCLKRST_MPLL_MULTIPLIER_19200KHZ_REF (0x02 << 11) - -#define PHYCLKRST_FSEL_UTMI_MASK (0x7 << 5) #define PHYCLKRST_FSEL_PIPE_MASK (0x7 << 8) +#define PHYCLKRST_FSEL_UTMI_MASK (0x7 << 5) #define PHYCLKRST_FSEL(_x) ((_x) << 5) #define PHYCLKRST_FSEL_PAD_100MHZ (0x27 << 5) #define PHYCLKRST_FSEL_PAD_24MHZ (0x2a << 5) #define PHYCLKRST_FSEL_PAD_20MHZ (0x31 << 5) #define PHYCLKRST_FSEL_PAD_19_2MHZ (0x38 << 5) - #define PHYCLKRST_RETENABLEN BIT(4) - #define PHYCLKRST_REFCLKSEL_MASK (0x03 << 2) #define PHYCLKRST_REFCLKSEL_PAD_REFCLK (0x2 << 2) #define PHYCLKRST_REFCLKSEL_EXT_REFCLK (0x3 << 2) - #define PHYCLKRST_PORTRESET BIT(1) #define PHYCLKRST_COMMONONN BIT(0) @@ -100,30 +90,27 @@ #define PHYREG1_CR_ACK BIT(0) #define EXYNOS5_DRD_PHYPARAM0 0x1c - #define PHYPARAM0_REF_USE_PAD BIT(31) #define PHYPARAM0_REF_LOSLEVEL_MASK (0x1f << 26) #define PHYPARAM0_REF_LOSLEVEL (0x9 << 26) #define EXYNOS5_DRD_PHYPARAM1 0x20 - #define PHYPARAM1_PCS_TXDEEMPH_MASK (0x1f << 0) #define PHYPARAM1_PCS_TXDEEMPH (0x1c) #define EXYNOS5_DRD_PHYTERM 0x24 #define EXYNOS5_DRD_PHYTEST 0x28 - #define PHYTEST_POWERDOWN_SSP BIT(3) #define PHYTEST_POWERDOWN_HSP BIT(2) #define EXYNOS5_DRD_PHYADP 0x2c #define EXYNOS5_DRD_PHYUTMICLKSEL 0x30 - #define PHYUTMICLKSEL_UTMI_CLKSEL BIT(2) #define EXYNOS5_DRD_PHYRESUME 0x34 + #define EXYNOS5_DRD_LINKPORT 0x44 /* USB 3.0 DRD PHY SS Function Control Reg; accessed by CR_PORT */ @@ -147,35 +134,215 @@ /* Exynos850: USB DRD PHY registers */ #define EXYNOS850_DRD_LINKCTRL 0x04 -#define LINKCTRL_BUS_FILTER_BYPASS(_x) ((_x) << 4) +#define LINKCTRL_FORCE_RXELECIDLE BIT(18) +#define LINKCTRL_FORCE_PHYSTATUS BIT(17) +#define LINKCTRL_FORCE_PIPE_EN BIT(16) #define LINKCTRL_FORCE_QACT BIT(8) +#define LINKCTRL_BUS_FILTER_BYPASS(_x) ((_x) << 4) + +#define EXYNOS850_DRD_LINKPORT 0x08 +#define LINKPORT_HOST_NUM_U3 GENMASK(19, 16) +#define LINKPORT_HOST_NUM_U2 GENMASK(15, 12) #define EXYNOS850_DRD_CLKRST 0x20 -#define CLKRST_LINK_SW_RST BIT(0) -#define CLKRST_PORT_RST BIT(1) +/* + * On versions without SS ports (like E850), bit 3 is for the 2.0 phy (HS), + * while on versions with (like gs101), bits 2 and 3 are for the 3.0 phy (SS) + * and bits 12 & 13 for the 2.0 phy. + */ +#define CLKRST_PHY20_SW_POR BIT(13) +#define CLKRST_PHY20_SW_POR_SEL BIT(12) +#define CLKRST_LINK_PCLK_SEL BIT(7) #define CLKRST_PHY_SW_RST BIT(3) +#define CLKRST_PHY_RESET_SEL BIT(2) +#define CLKRST_PORT_RST BIT(1) +#define CLKRST_LINK_SW_RST BIT(0) + +#define EXYNOS850_DRD_SSPPLLCTL 0x30 +#define SSPPLLCTL_FSEL GENMASK(2, 0) #define EXYNOS850_DRD_UTMI 0x50 -#define UTMI_FORCE_SLEEP BIT(0) -#define UTMI_FORCE_SUSPEND BIT(1) -#define UTMI_DM_PULLDOWN BIT(2) -#define UTMI_DP_PULLDOWN BIT(3) -#define UTMI_FORCE_BVALID BIT(4) #define UTMI_FORCE_VBUSVALID BIT(5) +#define UTMI_FORCE_BVALID BIT(4) +#define UTMI_DP_PULLDOWN BIT(3) +#define UTMI_DM_PULLDOWN BIT(2) +#define UTMI_FORCE_SUSPEND BIT(1) +#define UTMI_FORCE_SLEEP BIT(0) #define EXYNOS850_DRD_HSP 0x54 -#define HSP_COMMONONN BIT(8) -#define HSP_EN_UTMISUSPEND BIT(9) -#define HSP_VBUSVLDEXT BIT(12) -#define HSP_VBUSVLDEXTSEL BIT(13) #define HSP_FSV_OUT_EN BIT(24) +#define HSP_VBUSVLDEXTSEL BIT(13) +#define HSP_VBUSVLDEXT BIT(12) +#define HSP_EN_UTMISUSPEND BIT(9) +#define HSP_COMMONONN BIT(8) + +#define EXYNOS850_DRD_HSPPARACON 0x58 +#define HSPPARACON_TXVREF GENMASK(31, 28) +#define HSPPARACON_TXRISE GENMASK(25, 24) +#define HSPPARACON_TXRES GENMASK(22, 21) +#define HSPPARACON_TXPREEMPPULSE BIT(20) +#define HSPPARACON_TXPREEMPAMP GENMASK(19, 18) +#define HSPPARACON_TXHSXV GENMASK(17, 16) +#define HSPPARACON_TXFSLS GENMASK(15, 12) +#define HSPPARACON_SQRX GENMASK(10, 8) +#define HSPPARACON_OTG GENMASK(6, 4) +#define HSPPARACON_COMPDIS GENMASK(2, 0) #define EXYNOS850_DRD_HSP_TEST 0x5c #define HSP_TEST_SIDDQ BIT(24) +/* Exynos9 - GS101 */ +#define EXYNOS850_DRD_SECPMACTL 0x48 +#define SECPMACTL_PMA_ROPLL_REF_CLK_SEL GENMASK(13, 12) +#define SECPMACTL_PMA_LCPLL_REF_CLK_SEL GENMASK(11, 10) +#define SECPMACTL_PMA_REF_FREQ_SEL GENMASK(9, 8) +#define SECPMACTL_PMA_LOW_PWR BIT(4) +#define SECPMACTL_PMA_TRSV_SW_RST BIT(3) +#define SECPMACTL_PMA_CMN_SW_RST BIT(2) +#define SECPMACTL_PMA_INIT_SW_RST BIT(1) +#define SECPMACTL_PMA_APB_SW_RST BIT(0) + +/* PMA registers */ +#define EXYNOS9_PMA_USBDP_CMN_REG0008 0x0020 +#define CMN_REG0008_OVRD_AUX_EN BIT(3) +#define CMN_REG0008_AUX_EN BIT(2) + +#define EXYNOS9_PMA_USBDP_CMN_REG00B8 0x02e0 +#define CMN_REG00B8_LANE_MUX_SEL_DP GENMASK(3, 0) + +#define EXYNOS9_PMA_USBDP_CMN_REG01C0 0x0700 +#define CMN_REG01C0_ANA_LCPLL_LOCK_DONE BIT(7) +#define CMN_REG01C0_ANA_LCPLL_AFC_DONE BIT(6) + +/* these have similar register layout, for lanes 0 and 2 */ +#define EXYNOS9_PMA_USBDP_TRSV_REG03C3 0x0f0c +#define EXYNOS9_PMA_USBDP_TRSV_REG07C3 0x1f0c +#define TRSV_REG03C3_LN0_MON_RX_CDR_AFC_DONE BIT(3) +#define TRSV_REG03C3_LN0_MON_RX_CDR_CAL_DONE BIT(2) +#define TRSV_REG03C3_LN0_MON_RX_CDR_FLD_PLL_MODE_DONE BIT(1) +#define TRSV_REG03C3_LN0_MON_RX_CDR_LOCK_DONE BIT(0) + +/* TRSV_REG0413 and TRSV_REG0813 have similar register layout */ +#define EXYNOS9_PMA_USBDP_TRSV_REG0413 0x104c +#define TRSV_REG0413_OVRD_LN1_TX_RXD_COMP_EN BIT(7) +#define TRSV_REG0413_OVRD_LN1_TX_RXD_EN BIT(5) + +#define EXYNOS9_PMA_USBDP_TRSV_REG0813 0x204c +#define TRSV_REG0813_OVRD_LN3_TX_RXD_COMP_EN BIT(7) +#define TRSV_REG0813_OVRD_LN3_TX_RXD_EN BIT(5) + +/* PCS registers */ +#define EXYNOS9_PCS_NS_VEC_PS1_N1 0x010c +#define EXYNOS9_PCS_NS_VEC_PS2_N0 0x0110 +#define EXYNOS9_PCS_NS_VEC_PS3_N0 0x0118 +#define NS_VEC_NS_REQ GENMASK(31, 24) +#define NS_VEC_ENABLE_TIMER BIT(22) +#define NS_VEC_SEL_TIMEOUT GENMASK(21, 20) +#define NS_VEC_INV_MASK GENMASK(19, 16) +#define NS_VEC_COND_MASK GENMASK(11, 8) +#define NS_VEC_EXP_COND GENMASK(3, 0) + +#define EXYNOS9_PCS_OUT_VEC_2 0x014c +#define EXYNOS9_PCS_OUT_VEC_3 0x0150 +#define PCS_OUT_VEC_B9_DYNAMIC BIT(19) +#define PCS_OUT_VEC_B9_SEL_OUT BIT(18) +#define PCS_OUT_VEC_B8_DYNAMIC BIT(17) +#define PCS_OUT_VEC_B8_SEL_OUT BIT(16) +#define PCS_OUT_VEC_B7_DYNAMIC BIT(15) +#define PCS_OUT_VEC_B7_SEL_OUT BIT(14) +#define PCS_OUT_VEC_B6_DYNAMIC BIT(13) +#define PCS_OUT_VEC_B6_SEL_OUT BIT(12) +#define PCS_OUT_VEC_B5_DYNAMIC BIT(11) +#define PCS_OUT_VEC_B5_SEL_OUT BIT(10) +#define PCS_OUT_VEC_B4_DYNAMIC BIT(9) +#define PCS_OUT_VEC_B4_SEL_OUT BIT(8) +#define PCS_OUT_VEC_B3_DYNAMIC BIT(7) +#define PCS_OUT_VEC_B3_SEL_OUT BIT(6) +#define PCS_OUT_VEC_B2_DYNAMIC BIT(5) +#define PCS_OUT_VEC_B2_SEL_OUT BIT(4) +#define PCS_OUT_VEC_B1_DYNAMIC BIT(3) +#define PCS_OUT_VEC_B1_SEL_OUT BIT(2) +#define PCS_OUT_VEC_B0_DYNAMIC BIT(1) +#define PCS_OUT_VEC_B0_SEL_OUT BIT(0) + +#define EXYNOS9_PCS_TIMEOUT_0 0x0170 + +#define EXYNOS9_PCS_TIMEOUT_3 0x017c + +#define EXYNOS9_PCS_EBUF_PARAM 0x0304 +#define EBUF_PARAM_SKP_REMOVE_TH_EMPTY_MODE GENMASK(29, 24) + +#define EXYNOS9_PCS_BACK_END_MODE_VEC 0x030c +#define BACK_END_MODE_VEC_FORCE_EBUF_EMPTY_MODE BIT(1) +#define BACK_END_MODE_VEC_DISABLE_DATA_MASK BIT(0) + +#define EXYNOS9_PCS_RX_CONTROL 0x03f0 +#define RX_CONTROL_EN_BLOCK_ALIGNER_TYPE_B BIT(22) + +#define EXYNOS9_PCS_RX_CONTROL_DEBUG 0x03f4 +#define RX_CONTROL_DEBUG_EN_TS_CHECK BIT(5) +#define RX_CONTROL_DEBUG_NUM_COM_FOUND GENMASK(3, 0) + +#define EXYNOS9_PCS_LOCAL_COEF 0x040c +#define LOCAL_COEF_PMA_CENTER_COEF GENMASK(21, 16) +#define LOCAL_COEF_LF GENMASK(13, 8) +#define LOCAL_COEF_FS GENMASK(5, 0) + +#define EXYNOS9_PCS_HS_TX_COEF_MAP_0 0x0410 +#define HS_TX_COEF_MAP_0_SSTX_DEEMP GENMASK(17, 12) +#define HS_TX_COEF_MAP_0_SSTX_LEVEL GENMASK(11, 6) +#define HS_TX_COEF_MAP_0_SSTX_PRE_SHOOT GENMASK(5, 0) + + #define KHZ 1000 #define MHZ (KHZ * KHZ) +#define PHY_TUNING_ENTRY_PHY(o, m, v) { \ + .off = (o), \ + .mask = (m), \ + .val = (v), \ + .region = PTR_PHY \ + } + +#define PHY_TUNING_ENTRY_PCS(o, m, v) { \ + .off = (o), \ + .mask = (m), \ + .val = (v), \ + .region = PTR_PCS \ + } + +#define PHY_TUNING_ENTRY_PMA(o, m, v) { \ + .off = (o), \ + .mask = (m), \ + .val = (v), \ + .region = PTR_PMA, \ + } + +#define PHY_TUNING_ENTRY_LAST { .region = PTR_INVALID } + +#define for_each_phy_tune(tune) \ + for (; (tune)->region != PTR_INVALID; ++(tune)) + +struct exynos5_usbdrd_phy_tuning { + u32 off; + u32 mask; + u32 val; + char region; +#define PTR_INVALID 0 +#define PTR_PHY 1 +#define PTR_PCS 2 +#define PTR_PMA 3 +}; + +enum exynos5_usbdrd_phy_tuning_state { + PTS_UTMI_POSTINIT, + PTS_PIPE3_PREINIT, + PTS_PIPE3_INIT, + PTS_PIPE3_POSTINIT, + PTS_PIPE3_POSTLOCK, + PTS_MAX, +}; + enum exynos5_usbdrd_phy_id { EXYNOS5_DRDPHY_UTMI, EXYNOS5_DRDPHY_PIPE3, @@ -187,44 +354,48 @@ struct exynos5_usbdrd_phy; struct exynos5_usbdrd_phy_config { u32 id; - void (*phy_isol)(struct phy_usb_instance *inst, u32 on); + void (*phy_isol)(struct phy_usb_instance *inst, bool isolate); void (*phy_init)(struct exynos5_usbdrd_phy *phy_drd); unsigned int (*set_refclk)(struct phy_usb_instance *inst); }; struct exynos5_usbdrd_phy_drvdata { const struct exynos5_usbdrd_phy_config *phy_cfg; + const struct exynos5_usbdrd_phy_tuning **phy_tunes; const struct phy_ops *phy_ops; + const char * const *clk_names; + int n_clks; + const char * const *core_clk_names; + int n_core_clks; + const char * const *regulator_names; + int n_regulators; u32 pmu_offset_usbdrd0_phy; + u32 pmu_offset_usbdrd0_phy_ss; u32 pmu_offset_usbdrd1_phy; - bool has_common_clk_gate; }; /** * struct exynos5_usbdrd_phy - driver data for USB 3.0 PHY * @dev: pointer to device instance of this platform device * @reg_phy: usb phy controller register memory base - * @clk: phy clock for register access - * @pipeclk: clock for pipe3 phy - * @utmiclk: clock for utmi+ phy - * @itpclk: clock for ITP generation + * @reg_pcs: usb phy physical coding sublayer register memory base + * @reg_pma: usb phy physical media attachment register memory base + * @clks: clocks for register access + * @core_clks: core clocks for phy (ref, pipe3, utmi+, ITP, etc. as required) * @drv_data: pointer to SoC level driver data structure * @phys: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY * instances each with its 'phy' and 'phy_cfg'. * @extrefclk: frequency select settings when using 'separate * reference clocks' for SS and HS operations - * @ref_clk: reference clock to PHY block from which PHY's - * operational clocks are derived - * @vbus: VBUS regulator for phy - * @vbus_boost: Boost regulator for VBUS present on few Exynos boards + * @regulators: regulators for phy */ struct exynos5_usbdrd_phy { struct device *dev; void __iomem *reg_phy; - struct clk *clk; - struct clk *pipeclk; - struct clk *utmiclk; - struct clk *itpclk; + void __iomem *reg_pcs; + void __iomem *reg_pma; + struct clk_bulk_data *clks; + struct clk_bulk_data *core_clks; const struct exynos5_usbdrd_phy_drvdata *drv_data; struct phy_usb_instance { struct phy *phy; @@ -234,9 +405,7 @@ struct exynos5_usbdrd_phy { const struct exynos5_usbdrd_phy_config *phy_cfg; } phys[EXYNOS5_DRDPHYS_NUM]; u32 extrefclk; - struct clk *ref_clk; - struct regulator *vbus; - struct regulator *vbus_boost; + struct regulator_bulk_data *regulators; }; static inline @@ -287,14 +456,14 @@ static unsigned int exynos5_rate_to_clk(unsigned long rate, u32 *reg) } static void exynos5_usbdrd_phy_isol(struct phy_usb_instance *inst, - unsigned int on) + bool isolate) { unsigned int val; if (!inst->reg_pmu) return; - val = on ? 0 : EXYNOS4_PHY_ENABLE; + val = isolate ? 0 : EXYNOS4_PHY_ENABLE; regmap_update_bits(inst->reg_pmu, inst->pmu_offset, EXYNOS4_PHY_ENABLE, val); @@ -371,6 +540,45 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst) return reg; } +static void +exynos5_usbdrd_apply_phy_tunes(struct exynos5_usbdrd_phy *phy_drd, + enum exynos5_usbdrd_phy_tuning_state state) +{ + const struct exynos5_usbdrd_phy_tuning *tune; + + tune = phy_drd->drv_data->phy_tunes[state]; + if (!tune) + return; + + for_each_phy_tune(tune) { + void __iomem *reg_base; + u32 reg = 0; + + switch (tune->region) { + case PTR_PHY: + reg_base = phy_drd->reg_phy; + break; + case PTR_PCS: + reg_base = phy_drd->reg_pcs; + break; + case PTR_PMA: + reg_base = phy_drd->reg_pma; + break; + default: + dev_warn_once(phy_drd->dev, + "unknown phy region %d\n", tune->region); + continue; + } + + if (~tune->mask) { + reg = readl(reg_base + tune->off); + reg &= ~tune->mask; + } + reg |= tune->val; + writel(reg, reg_base + tune->off); + } +} + static void exynos5_usbdrd_pipe3_init(struct exynos5_usbdrd_phy *phy_drd) { u32 reg; @@ -386,6 +594,129 @@ static void exynos5_usbdrd_pipe3_init(struct exynos5_usbdrd_phy *phy_drd) writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST); } +static void +exynos5_usbdrd_usbdp_g2_v4_ctrl_pma_ready(struct exynos5_usbdrd_phy *phy_drd) +{ + void __iomem *regs_base = phy_drd->reg_phy; + u32 reg; + + /* link pipe_clock selection to pclk of PMA */ + reg = readl(regs_base + EXYNOS850_DRD_CLKRST); + reg |= CLKRST_LINK_PCLK_SEL; + writel(reg, regs_base + EXYNOS850_DRD_CLKRST); + + reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL); + reg &= ~SECPMACTL_PMA_REF_FREQ_SEL; + reg |= FIELD_PREP_CONST(SECPMACTL_PMA_REF_FREQ_SEL, 1); + /* SFR reset */ + reg |= (SECPMACTL_PMA_LOW_PWR | SECPMACTL_PMA_APB_SW_RST); + reg &= ~(SECPMACTL_PMA_ROPLL_REF_CLK_SEL | + SECPMACTL_PMA_LCPLL_REF_CLK_SEL); + /* PMA power off */ + reg |= (SECPMACTL_PMA_TRSV_SW_RST | SECPMACTL_PMA_CMN_SW_RST | + SECPMACTL_PMA_INIT_SW_RST); + writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL); + + udelay(1); + + reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL); + reg &= ~SECPMACTL_PMA_LOW_PWR; + writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL); + + udelay(1); + + /* release override */ + reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL); + reg &= ~LINKCTRL_FORCE_PIPE_EN; + writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL); + + udelay(1); + + /* APB enable */ + reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL); + reg &= ~SECPMACTL_PMA_APB_SW_RST; + writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL); +} + +static void +exynos5_usbdrd_usbdp_g2_v4_pma_lane_mux_sel(struct exynos5_usbdrd_phy *phy_drd) +{ + void __iomem *regs_base = phy_drd->reg_pma; + u32 reg; + + /* lane configuration: USB on all lanes */ + reg = readl(regs_base + EXYNOS9_PMA_USBDP_CMN_REG00B8); + reg &= ~CMN_REG00B8_LANE_MUX_SEL_DP; + writel(reg, regs_base + EXYNOS9_PMA_USBDP_CMN_REG00B8); + + /* + * FIXME: below code supports one connector orientation only. It needs + * updating once we can receive connector events. + */ + /* override of TX receiver detector and comparator: lane 1 */ + reg = readl(regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0413); + reg &= ~TRSV_REG0413_OVRD_LN1_TX_RXD_COMP_EN; + reg &= ~TRSV_REG0413_OVRD_LN1_TX_RXD_EN; + writel(reg, regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0413); + + /* lane 3 */ + reg = readl(regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0813); + reg |= TRSV_REG0813_OVRD_LN3_TX_RXD_COMP_EN; + reg |= TRSV_REG0813_OVRD_LN3_TX_RXD_EN; + writel(reg, regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0813); +} + +static int +exynos5_usbdrd_usbdp_g2_v4_pma_check_pll_lock(struct exynos5_usbdrd_phy *phy_drd) +{ + static const unsigned int timeout_us = 40000; + static const unsigned int sleep_us = 40; + static const u32 locked = (CMN_REG01C0_ANA_LCPLL_LOCK_DONE | + CMN_REG01C0_ANA_LCPLL_AFC_DONE); + u32 reg; + int err; + + err = readl_poll_timeout( + phy_drd->reg_pma + EXYNOS9_PMA_USBDP_CMN_REG01C0, + reg, (reg & locked) == locked, sleep_us, timeout_us); + if (err) + dev_err(phy_drd->dev, + "timed out waiting for PLL lock: %#.8x\n", reg); + + return err; +} + +static void +exynos5_usbdrd_usbdp_g2_v4_pma_check_cdr_lock(struct exynos5_usbdrd_phy *phy_drd) +{ + static const unsigned int timeout_us = 40000; + static const unsigned int sleep_us = 40; + static const u32 locked = + (TRSV_REG03C3_LN0_MON_RX_CDR_AFC_DONE + | TRSV_REG03C3_LN0_MON_RX_CDR_CAL_DONE + | TRSV_REG03C3_LN0_MON_RX_CDR_FLD_PLL_MODE_DONE + | TRSV_REG03C3_LN0_MON_RX_CDR_LOCK_DONE); + u32 reg; + int err; + + err = readl_poll_timeout( + phy_drd->reg_pma + EXYNOS9_PMA_USBDP_TRSV_REG03C3, + reg, (reg & locked) == locked, sleep_us, timeout_us); + if (!err) + return; + + dev_err(phy_drd->dev, + "timed out waiting for CDR lock (l0): %#.8x, retrying\n", reg); + + /* based on cable orientation, this might be on the other phy port */ + err = readl_poll_timeout( + phy_drd->reg_pma + EXYNOS9_PMA_USBDP_TRSV_REG07C3, + reg, (reg & locked) == locked, sleep_us, timeout_us); + if (err) + dev_err(phy_drd->dev, + "timed out waiting for CDR lock (l2): %#.8x\n", reg); +} + static void exynos5_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd) { u32 reg; @@ -417,7 +748,7 @@ static int exynos5_usbdrd_phy_init(struct phy *phy) struct phy_usb_instance *inst = phy_get_drvdata(phy); struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); - ret = clk_prepare_enable(phy_drd->clk); + ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks); if (ret) return ret; @@ -462,12 +793,12 @@ static int exynos5_usbdrd_phy_init(struct phy *phy) writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST); - udelay(10); + fsleep(10); reg &= ~PHYCLKRST_PORTRESET; writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST); - clk_disable_unprepare(phy_drd->clk); + clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks); return 0; } @@ -479,7 +810,7 @@ static int exynos5_usbdrd_phy_exit(struct phy *phy) struct phy_usb_instance *inst = phy_get_drvdata(phy); struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); - ret = clk_prepare_enable(phy_drd->clk); + ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks); if (ret) return ret; @@ -501,7 +832,7 @@ static int exynos5_usbdrd_phy_exit(struct phy *phy) PHYTEST_POWERDOWN_HSP; writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST); - clk_disable_unprepare(phy_drd->clk); + clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks); return 0; } @@ -514,47 +845,27 @@ static int exynos5_usbdrd_phy_power_on(struct phy *phy) dev_dbg(phy_drd->dev, "Request to power_on usbdrd_phy phy\n"); - clk_prepare_enable(phy_drd->ref_clk); - if (!phy_drd->drv_data->has_common_clk_gate) { - clk_prepare_enable(phy_drd->pipeclk); - clk_prepare_enable(phy_drd->utmiclk); - clk_prepare_enable(phy_drd->itpclk); - } + ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_core_clks, + phy_drd->core_clks); + if (ret) + return ret; /* Enable VBUS supply */ - if (phy_drd->vbus_boost) { - ret = regulator_enable(phy_drd->vbus_boost); - if (ret) { - dev_err(phy_drd->dev, - "Failed to enable VBUS boost supply\n"); - goto fail_vbus; - } - } - - if (phy_drd->vbus) { - ret = regulator_enable(phy_drd->vbus); - if (ret) { - dev_err(phy_drd->dev, "Failed to enable VBUS supply\n"); - goto fail_vbus_boost; - } + ret = regulator_bulk_enable(phy_drd->drv_data->n_regulators, + phy_drd->regulators); + if (ret) { + dev_err(phy_drd->dev, "Failed to enable PHY regulator(s)\n"); + goto fail_vbus; } - /* Power-on PHY*/ - inst->phy_cfg->phy_isol(inst, 0); + /* Power-on PHY */ + inst->phy_cfg->phy_isol(inst, false); return 0; -fail_vbus_boost: - if (phy_drd->vbus_boost) - regulator_disable(phy_drd->vbus_boost); - fail_vbus: - clk_disable_unprepare(phy_drd->ref_clk); - if (!phy_drd->drv_data->has_common_clk_gate) { - clk_disable_unprepare(phy_drd->itpclk); - clk_disable_unprepare(phy_drd->utmiclk); - clk_disable_unprepare(phy_drd->pipeclk); - } + clk_bulk_disable_unprepare(phy_drd->drv_data->n_core_clks, + phy_drd->core_clks); return ret; } @@ -567,20 +878,14 @@ static int exynos5_usbdrd_phy_power_off(struct phy *phy) dev_dbg(phy_drd->dev, "Request to power_off usbdrd_phy phy\n"); /* Power-off the PHY */ - inst->phy_cfg->phy_isol(inst, 1); + inst->phy_cfg->phy_isol(inst, true); /* Disable VBUS supply */ - if (phy_drd->vbus) - regulator_disable(phy_drd->vbus); - if (phy_drd->vbus_boost) - regulator_disable(phy_drd->vbus_boost); - - clk_disable_unprepare(phy_drd->ref_clk); - if (!phy_drd->drv_data->has_common_clk_gate) { - clk_disable_unprepare(phy_drd->itpclk); - clk_disable_unprepare(phy_drd->pipeclk); - clk_disable_unprepare(phy_drd->utmiclk); - } + regulator_bulk_disable(phy_drd->drv_data->n_regulators, + phy_drd->regulators); + + clk_bulk_disable_unprepare(phy_drd->drv_data->n_core_clks, + phy_drd->core_clks); return 0; } @@ -744,10 +1049,29 @@ static const struct phy_ops exynos5_usbdrd_phy_ops = { .owner = THIS_MODULE, }; +static void +exynos5_usbdrd_usb_v3p1_pipe_override(struct exynos5_usbdrd_phy *phy_drd) +{ + void __iomem *regs_base = phy_drd->reg_phy; + u32 reg; + + /* force pipe3 signal for link */ + reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL); + reg &= ~LINKCTRL_FORCE_PHYSTATUS; + reg |= LINKCTRL_FORCE_PIPE_EN | LINKCTRL_FORCE_RXELECIDLE; + writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL); + + /* PMA disable */ + reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL); + reg |= SECPMACTL_PMA_LOW_PWR; + writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL); +} + static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd) { void __iomem *regs_base = phy_drd->reg_phy; u32 reg; + u32 ss_ports; /* * Disable HWACG (hardware auto clock gating control). This will force @@ -758,8 +1082,16 @@ static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd) reg |= LINKCTRL_FORCE_QACT; writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL); + reg = readl(regs_base + EXYNOS850_DRD_LINKPORT); + ss_ports = FIELD_GET(LINKPORT_HOST_NUM_U3, reg); + /* Start PHY Reset (POR=high) */ reg = readl(regs_base + EXYNOS850_DRD_CLKRST); + if (ss_ports) { + reg |= CLKRST_PHY20_SW_POR; + reg |= CLKRST_PHY20_SW_POR_SEL; + reg |= CLKRST_PHY_RESET_SEL; + } reg |= CLKRST_PHY_SW_RST; writel(reg, regs_base + EXYNOS850_DRD_CLKRST); @@ -787,22 +1119,58 @@ static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd) reg |= HSP_VBUSVLDEXT | HSP_VBUSVLDEXTSEL; writel(reg, regs_base + EXYNOS850_DRD_HSP); + reg = readl(regs_base + EXYNOS850_DRD_SSPPLLCTL); + reg &= ~SSPPLLCTL_FSEL; + switch (phy_drd->extrefclk) { + case EXYNOS5_FSEL_50MHZ: + reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 7); + break; + case EXYNOS5_FSEL_26MHZ: + reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 6); + break; + case EXYNOS5_FSEL_24MHZ: + reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 2); + break; + case EXYNOS5_FSEL_20MHZ: + reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 1); + break; + case EXYNOS5_FSEL_19MHZ2: + reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 0); + break; + default: + dev_warn(phy_drd->dev, "unsupported ref clk: %#.2x\n", + phy_drd->extrefclk); + break; + } + writel(reg, regs_base + EXYNOS850_DRD_SSPPLLCTL); + + if (phy_drd->drv_data->phy_tunes) + exynos5_usbdrd_apply_phy_tunes(phy_drd, + PTS_UTMI_POSTINIT); + /* Power up PHY analog blocks */ reg = readl(regs_base + EXYNOS850_DRD_HSP_TEST); reg &= ~HSP_TEST_SIDDQ; writel(reg, regs_base + EXYNOS850_DRD_HSP_TEST); /* Finish PHY reset (POR=low) */ - udelay(10); /* required before doing POR=low */ + fsleep(10); /* required before doing POR=low */ reg = readl(regs_base + EXYNOS850_DRD_CLKRST); + if (ss_ports) { + reg |= CLKRST_PHY20_SW_POR_SEL; + reg &= ~CLKRST_PHY20_SW_POR; + } reg &= ~(CLKRST_PHY_SW_RST | CLKRST_PORT_RST); writel(reg, regs_base + EXYNOS850_DRD_CLKRST); - udelay(75); /* required after POR=low for guaranteed PHY clock */ + fsleep(75); /* required after POR=low for guaranteed PHY clock */ /* Disable single ended signal out */ reg = readl(regs_base + EXYNOS850_DRD_HSP); reg &= ~HSP_FSV_OUT_EN; writel(reg, regs_base + EXYNOS850_DRD_HSP); + + if (ss_ports) + exynos5_usbdrd_usb_v3p1_pipe_override(phy_drd); } static int exynos850_usbdrd_phy_init(struct phy *phy) @@ -811,14 +1179,14 @@ static int exynos850_usbdrd_phy_init(struct phy *phy) struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); int ret; - ret = clk_prepare_enable(phy_drd->clk); + ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks); if (ret) return ret; /* UTMI or PIPE3 specific init */ inst->phy_cfg->phy_init(phy_drd); - clk_disable_unprepare(phy_drd->clk); + clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks); return 0; } @@ -831,7 +1199,7 @@ static int exynos850_usbdrd_phy_exit(struct phy *phy) u32 reg; int ret; - ret = clk_prepare_enable(phy_drd->clk); + ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks); if (ret) return ret; @@ -850,11 +1218,11 @@ static int exynos850_usbdrd_phy_exit(struct phy *phy) reg = readl(regs_base + EXYNOS850_DRD_CLKRST); reg |= CLKRST_LINK_SW_RST; writel(reg, regs_base + EXYNOS850_DRD_CLKRST); - udelay(10); /* required before doing POR=low */ + fsleep(10); /* required before doing POR=low */ reg &= ~CLKRST_LINK_SW_RST; writel(reg, regs_base + EXYNOS850_DRD_CLKRST); - clk_disable_unprepare(phy_drd->clk); + clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks); return 0; } @@ -867,53 +1235,138 @@ static const struct phy_ops exynos850_usbdrd_phy_ops = { .owner = THIS_MODULE, }; -static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd) +static void exynos5_usbdrd_gs101_pipe3_init(struct exynos5_usbdrd_phy *phy_drd) { - unsigned long ref_rate; + void __iomem *regs_pma = phy_drd->reg_pma; + void __iomem *regs_phy = phy_drd->reg_phy; + u32 reg; + + exynos5_usbdrd_usbdp_g2_v4_ctrl_pma_ready(phy_drd); + + /* force aux off */ + reg = readl(regs_pma + EXYNOS9_PMA_USBDP_CMN_REG0008); + reg &= ~CMN_REG0008_AUX_EN; + reg |= CMN_REG0008_OVRD_AUX_EN; + writel(reg, regs_pma + EXYNOS9_PMA_USBDP_CMN_REG0008); + + exynos5_usbdrd_apply_phy_tunes(phy_drd, PTS_PIPE3_PREINIT); + exynos5_usbdrd_apply_phy_tunes(phy_drd, PTS_PIPE3_INIT); + exynos5_usbdrd_apply_phy_tunes(phy_drd, PTS_PIPE3_POSTINIT); + + exynos5_usbdrd_usbdp_g2_v4_pma_lane_mux_sel(phy_drd); + + /* reset release from port */ + reg = readl(regs_phy + EXYNOS850_DRD_SECPMACTL); + reg &= ~(SECPMACTL_PMA_TRSV_SW_RST | SECPMACTL_PMA_CMN_SW_RST | + SECPMACTL_PMA_INIT_SW_RST); + writel(reg, regs_phy + EXYNOS850_DRD_SECPMACTL); + + if (!exynos5_usbdrd_usbdp_g2_v4_pma_check_pll_lock(phy_drd)) + exynos5_usbdrd_usbdp_g2_v4_pma_check_cdr_lock(phy_drd); +} + +static int exynos5_usbdrd_gs101_phy_init(struct phy *phy) +{ + struct phy_usb_instance *inst = phy_get_drvdata(phy); + struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); int ret; - phy_drd->clk = devm_clk_get(phy_drd->dev, "phy"); - if (IS_ERR(phy_drd->clk)) { - dev_err(phy_drd->dev, "Failed to get phy clock\n"); - return PTR_ERR(phy_drd->clk); + if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) { + /* Power-on PHY ... */ + ret = regulator_bulk_enable(phy_drd->drv_data->n_regulators, + phy_drd->regulators); + if (ret) { + dev_err(phy_drd->dev, + "Failed to enable PHY regulator(s)\n"); + return ret; + } } + /* + * ... and ungate power via PMU. Without this here, we get an SError + * trying to access PMA registers + */ + exynos5_usbdrd_phy_isol(inst, false); - phy_drd->ref_clk = devm_clk_get(phy_drd->dev, "ref"); - if (IS_ERR(phy_drd->ref_clk)) { - dev_err(phy_drd->dev, "Failed to get phy reference clock\n"); - return PTR_ERR(phy_drd->ref_clk); - } - ref_rate = clk_get_rate(phy_drd->ref_clk); + return exynos850_usbdrd_phy_init(phy); +} - ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk); - if (ret) { - dev_err(phy_drd->dev, "Clock rate (%ld) not supported\n", - ref_rate); +static int exynos5_usbdrd_gs101_phy_exit(struct phy *phy) +{ + struct phy_usb_instance *inst = phy_get_drvdata(phy); + struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); + int ret; + + if (inst->phy_cfg->id != EXYNOS5_DRDPHY_UTMI) + return 0; + + ret = exynos850_usbdrd_phy_exit(phy); + if (ret) return ret; - } - if (!phy_drd->drv_data->has_common_clk_gate) { - phy_drd->pipeclk = devm_clk_get(phy_drd->dev, "phy_pipe"); - if (IS_ERR(phy_drd->pipeclk)) { - dev_info(phy_drd->dev, - "PIPE3 phy operational clock not specified\n"); - phy_drd->pipeclk = NULL; - } + exynos5_usbdrd_phy_isol(inst, true); + return regulator_bulk_disable(phy_drd->drv_data->n_regulators, + phy_drd->regulators); +} - phy_drd->utmiclk = devm_clk_get(phy_drd->dev, "phy_utmi"); - if (IS_ERR(phy_drd->utmiclk)) { - dev_info(phy_drd->dev, - "UTMI phy operational clock not specified\n"); - phy_drd->utmiclk = NULL; - } +static const struct phy_ops gs101_usbdrd_phy_ops = { + .init = exynos5_usbdrd_gs101_phy_init, + .exit = exynos5_usbdrd_gs101_phy_exit, + .owner = THIS_MODULE, +}; + +static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd) +{ + int ret; + struct clk *ref_clk; + unsigned long ref_rate; + + phy_drd->clks = devm_kcalloc(phy_drd->dev, phy_drd->drv_data->n_clks, + sizeof(*phy_drd->clks), GFP_KERNEL); + if (!phy_drd->clks) + return -ENOMEM; + + for (int i = 0; i < phy_drd->drv_data->n_clks; ++i) + phy_drd->clks[i].id = phy_drd->drv_data->clk_names[i]; + + ret = devm_clk_bulk_get(phy_drd->dev, phy_drd->drv_data->n_clks, + phy_drd->clks); + if (ret) + return dev_err_probe(phy_drd->dev, ret, + "failed to get phy clock(s)\n"); + + phy_drd->core_clks = devm_kcalloc(phy_drd->dev, + phy_drd->drv_data->n_core_clks, + sizeof(*phy_drd->core_clks), + GFP_KERNEL); + if (!phy_drd->core_clks) + return -ENOMEM; - phy_drd->itpclk = devm_clk_get(phy_drd->dev, "itp"); - if (IS_ERR(phy_drd->itpclk)) { - dev_info(phy_drd->dev, - "ITP clock from main OSC not specified\n"); - phy_drd->itpclk = NULL; + for (int i = 0; i < phy_drd->drv_data->n_core_clks; ++i) + phy_drd->core_clks[i].id = phy_drd->drv_data->core_clk_names[i]; + + ret = devm_clk_bulk_get(phy_drd->dev, phy_drd->drv_data->n_core_clks, + phy_drd->core_clks); + if (ret) + return dev_err_probe(phy_drd->dev, ret, + "failed to get phy core clock(s)\n"); + + ref_clk = NULL; + for (int i = 0; i < phy_drd->drv_data->n_core_clks; ++i) { + if (!strcmp(phy_drd->core_clks[i].id, "ref")) { + ref_clk = phy_drd->core_clks[i].clk; + break; } } + if (!ref_clk) + return dev_err_probe(phy_drd->dev, -ENODEV, + "failed to find phy reference clock\n"); + + ref_rate = clk_get_rate(ref_clk); + ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk); + if (ret) + return dev_err_probe(phy_drd->dev, ret, + "clock rate (%ld) not supported\n", + ref_rate); return 0; } @@ -941,19 +1394,45 @@ static const struct exynos5_usbdrd_phy_config phy_cfg_exynos850[] = { }, }; +static const char * const exynos5_clk_names[] = { + "phy", +}; + +static const char * const exynos5_core_clk_names[] = { + "ref", +}; + +static const char * const exynos5433_core_clk_names[] = { + "ref", "phy_pipe", "phy_utmi", "itp", +}; + +static const char * const exynos5_regulator_names[] = { + "vbus", "vbus-boost", +}; + static const struct exynos5_usbdrd_phy_drvdata exynos5420_usbdrd_phy = { .phy_cfg = phy_cfg_exynos5, .phy_ops = &exynos5_usbdrd_phy_ops, .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, .pmu_offset_usbdrd1_phy = EXYNOS5420_USBDRD1_PHY_CONTROL, - .has_common_clk_gate = true, + .clk_names = exynos5_clk_names, + .n_clks = ARRAY_SIZE(exynos5_clk_names), + .core_clk_names = exynos5_core_clk_names, + .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names), + .regulator_names = exynos5_regulator_names, + .n_regulators = ARRAY_SIZE(exynos5_regulator_names), }; static const struct exynos5_usbdrd_phy_drvdata exynos5250_usbdrd_phy = { .phy_cfg = phy_cfg_exynos5, .phy_ops = &exynos5_usbdrd_phy_ops, .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, - .has_common_clk_gate = true, + .clk_names = exynos5_clk_names, + .n_clks = ARRAY_SIZE(exynos5_clk_names), + .core_clk_names = exynos5_core_clk_names, + .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names), + .regulator_names = exynos5_regulator_names, + .n_regulators = ARRAY_SIZE(exynos5_regulator_names), }; static const struct exynos5_usbdrd_phy_drvdata exynos5433_usbdrd_phy = { @@ -961,25 +1440,218 @@ static const struct exynos5_usbdrd_phy_drvdata exynos5433_usbdrd_phy = { .phy_ops = &exynos5_usbdrd_phy_ops, .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, .pmu_offset_usbdrd1_phy = EXYNOS5433_USBHOST30_PHY_CONTROL, - .has_common_clk_gate = false, + .clk_names = exynos5_clk_names, + .n_clks = ARRAY_SIZE(exynos5_clk_names), + .core_clk_names = exynos5433_core_clk_names, + .n_core_clks = ARRAY_SIZE(exynos5433_core_clk_names), + .regulator_names = exynos5_regulator_names, + .n_regulators = ARRAY_SIZE(exynos5_regulator_names), }; static const struct exynos5_usbdrd_phy_drvdata exynos7_usbdrd_phy = { .phy_cfg = phy_cfg_exynos5, .phy_ops = &exynos5_usbdrd_phy_ops, .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, - .has_common_clk_gate = false, + .clk_names = exynos5_clk_names, + .n_clks = ARRAY_SIZE(exynos5_clk_names), + .core_clk_names = exynos5433_core_clk_names, + .n_core_clks = ARRAY_SIZE(exynos5433_core_clk_names), + .regulator_names = exynos5_regulator_names, + .n_regulators = ARRAY_SIZE(exynos5_regulator_names), }; static const struct exynos5_usbdrd_phy_drvdata exynos850_usbdrd_phy = { .phy_cfg = phy_cfg_exynos850, .phy_ops = &exynos850_usbdrd_phy_ops, .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, - .has_common_clk_gate = true, + .clk_names = exynos5_clk_names, + .n_clks = ARRAY_SIZE(exynos5_clk_names), + .core_clk_names = exynos5_core_clk_names, + .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names), + .regulator_names = exynos5_regulator_names, + .n_regulators = ARRAY_SIZE(exynos5_regulator_names), +}; + +static const struct exynos5_usbdrd_phy_config phy_cfg_gs101[] = { + { + .id = EXYNOS5_DRDPHY_UTMI, + .phy_isol = exynos5_usbdrd_phy_isol, + .phy_init = exynos850_usbdrd_utmi_init, + }, + { + .id = EXYNOS5_DRDPHY_PIPE3, + .phy_isol = exynos5_usbdrd_phy_isol, + .phy_init = exynos5_usbdrd_gs101_pipe3_init, + }, +}; + +static const struct exynos5_usbdrd_phy_tuning gs101_tunes_utmi_postinit[] = { + PHY_TUNING_ENTRY_PHY(EXYNOS850_DRD_HSPPARACON, + (HSPPARACON_TXVREF | HSPPARACON_TXRES | + HSPPARACON_TXPREEMPAMP | HSPPARACON_SQRX | + HSPPARACON_COMPDIS), + (FIELD_PREP_CONST(HSPPARACON_TXVREF, 6) | + FIELD_PREP_CONST(HSPPARACON_TXRES, 1) | + FIELD_PREP_CONST(HSPPARACON_TXPREEMPAMP, 3) | + FIELD_PREP_CONST(HSPPARACON_SQRX, 5) | + FIELD_PREP_CONST(HSPPARACON_COMPDIS, 7))), + PHY_TUNING_ENTRY_LAST +}; + +static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_preinit[] = { + /* preinit */ + /* CDR data mode exit GEN1 ON / GEN2 OFF */ + PHY_TUNING_ENTRY_PMA(0x0c8c, -1, 0xff), + PHY_TUNING_ENTRY_PMA(0x1c8c, -1, 0xff), + PHY_TUNING_ENTRY_PMA(0x0c9c, -1, 0x7d), + PHY_TUNING_ENTRY_PMA(0x1c9c, -1, 0x7d), + /* improve EDS distribution */ + PHY_TUNING_ENTRY_PMA(0x0e7c, -1, 0x06), + PHY_TUNING_ENTRY_PMA(0x09e0, -1, 0x00), + PHY_TUNING_ENTRY_PMA(0x09e4, -1, 0x36), + PHY_TUNING_ENTRY_PMA(0x1e7c, -1, 0x06), + PHY_TUNING_ENTRY_PMA(0x1e90, -1, 0x00), + PHY_TUNING_ENTRY_PMA(0x1e94, -1, 0x36), + /* improve LVCC */ + PHY_TUNING_ENTRY_PMA(0x08f0, -1, 0x30), + PHY_TUNING_ENTRY_PMA(0x18f0, -1, 0x30), + /* LFPS RX VIH shmoo hole */ + PHY_TUNING_ENTRY_PMA(0x0a08, -1, 0x0c), + PHY_TUNING_ENTRY_PMA(0x1a08, -1, 0x0c), + /* remove unrelated option for v4 phy */ + PHY_TUNING_ENTRY_PMA(0x0a0c, -1, 0x05), + PHY_TUNING_ENTRY_PMA(0x1a0c, -1, 0x05), + /* improve Gen2 LVCC */ + PHY_TUNING_ENTRY_PMA(0x00f8, -1, 0x1c), + PHY_TUNING_ENTRY_PMA(0x00fc, -1, 0x54), + /* Change Vth of RCV_DET because of TD 7.40 Polling Retry Test */ + PHY_TUNING_ENTRY_PMA(0x104c, -1, 0x07), + PHY_TUNING_ENTRY_PMA(0x204c, -1, 0x07), + /* reduce Ux Exit time, assuming 26MHz clock */ + /* Gen1 */ + PHY_TUNING_ENTRY_PMA(0x0ca8, -1, 0x00), + PHY_TUNING_ENTRY_PMA(0x0cac, -1, 0x04), + PHY_TUNING_ENTRY_PMA(0x1ca8, -1, 0x00), + PHY_TUNING_ENTRY_PMA(0x1cac, -1, 0x04), + /* Gen2 */ + PHY_TUNING_ENTRY_PMA(0x0cb8, -1, 0x00), + PHY_TUNING_ENTRY_PMA(0x0cbc, -1, 0x04), + PHY_TUNING_ENTRY_PMA(0x1cb8, -1, 0x00), + PHY_TUNING_ENTRY_PMA(0x1cbc, -1, 0x04), + /* RX impedance setting */ + PHY_TUNING_ENTRY_PMA(0x0bb0, 0x03, 0x01), + PHY_TUNING_ENTRY_PMA(0x0bb4, 0xf0, 0xa0), + PHY_TUNING_ENTRY_PMA(0x1bb0, 0x03, 0x01), + PHY_TUNING_ENTRY_PMA(0x1bb4, 0xf0, 0xa0), + + PHY_TUNING_ENTRY_LAST +}; + +static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_init[] = { + /* init */ + /* abnormal common pattern mask */ + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_BACK_END_MODE_VEC, + BACK_END_MODE_VEC_DISABLE_DATA_MASK, 0), + /* de-serializer enabled when U2 */ + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_OUT_VEC_2, PCS_OUT_VEC_B4_DYNAMIC, + PCS_OUT_VEC_B4_SEL_OUT), + /* TX Keeper Disable, Squelch on when U3 */ + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_OUT_VEC_3, PCS_OUT_VEC_B7_DYNAMIC, + PCS_OUT_VEC_B7_SEL_OUT | PCS_OUT_VEC_B2_SEL_OUT), + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_NS_VEC_PS1_N1, -1, + (FIELD_PREP_CONST(NS_VEC_NS_REQ, 5) | + NS_VEC_ENABLE_TIMER | + FIELD_PREP_CONST(NS_VEC_SEL_TIMEOUT, 3))), + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_NS_VEC_PS2_N0, -1, + (FIELD_PREP_CONST(NS_VEC_NS_REQ, 1) | + NS_VEC_ENABLE_TIMER | + FIELD_PREP_CONST(NS_VEC_SEL_TIMEOUT, 3) | + FIELD_PREP_CONST(NS_VEC_COND_MASK, 2) | + FIELD_PREP_CONST(NS_VEC_EXP_COND, 2))), + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_NS_VEC_PS3_N0, -1, + (FIELD_PREP_CONST(NS_VEC_NS_REQ, 1) | + NS_VEC_ENABLE_TIMER | + FIELD_PREP_CONST(NS_VEC_SEL_TIMEOUT, 3) | + FIELD_PREP_CONST(NS_VEC_COND_MASK, 7) | + FIELD_PREP_CONST(NS_VEC_EXP_COND, 7))), + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_TIMEOUT_0, -1, 112), + /* Block Aligner Type B */ + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_RX_CONTROL, 0, + RX_CONTROL_EN_BLOCK_ALIGNER_TYPE_B), + /* Block align at TS1/TS2 for Gen2 stability (Gen2 only) */ + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_RX_CONTROL_DEBUG, + RX_CONTROL_DEBUG_NUM_COM_FOUND, + (RX_CONTROL_DEBUG_EN_TS_CHECK | + /* + * increase pcs ts1 adding packet-cnt 1 --> 4 + * lnx_rx_valid_rstn_delay_rise_sp/ssp : + * 19.6us(0x200) -> 15.3us(0x4) + */ + FIELD_PREP_CONST(RX_CONTROL_DEBUG_NUM_COM_FOUND, 4))), + /* Gen1 Tx DRIVER pre-shoot, de-emphasis, level ctrl */ + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_HS_TX_COEF_MAP_0, + (HS_TX_COEF_MAP_0_SSTX_DEEMP | HS_TX_COEF_MAP_0_SSTX_LEVEL | + HS_TX_COEF_MAP_0_SSTX_PRE_SHOOT), + (FIELD_PREP_CONST(HS_TX_COEF_MAP_0_SSTX_DEEMP, 8) | + FIELD_PREP_CONST(HS_TX_COEF_MAP_0_SSTX_LEVEL, 0xb) | + FIELD_PREP_CONST(HS_TX_COEF_MAP_0_SSTX_PRE_SHOOT, 0))), + /* Gen2 Tx DRIVER level ctrl */ + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_LOCAL_COEF, + LOCAL_COEF_PMA_CENTER_COEF, + FIELD_PREP_CONST(LOCAL_COEF_PMA_CENTER_COEF, 0xb)), + /* Gen2 U1 exit LFPS duration : 900ns ~ 1.2us */ + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_TIMEOUT_3, -1, 4096), + /* set skp_remove_th 0x2 -> 0x7 for avoiding retry problem. */ + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_EBUF_PARAM, + EBUF_PARAM_SKP_REMOVE_TH_EMPTY_MODE, + FIELD_PREP_CONST(EBUF_PARAM_SKP_REMOVE_TH_EMPTY_MODE, 0x7)), + + PHY_TUNING_ENTRY_LAST +}; + +static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_postlock[] = { + /* Squelch off when U3 */ + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_OUT_VEC_3, PCS_OUT_VEC_B2_SEL_OUT, 0), + + PHY_TUNING_ENTRY_LAST +}; + +static const struct exynos5_usbdrd_phy_tuning *gs101_tunes[PTS_MAX] = { + [PTS_UTMI_POSTINIT] = gs101_tunes_utmi_postinit, + [PTS_PIPE3_PREINIT] = gs101_tunes_pipe3_preinit, + [PTS_PIPE3_INIT] = gs101_tunes_pipe3_init, + [PTS_PIPE3_POSTLOCK] = gs101_tunes_pipe3_postlock, +}; + +static const char * const gs101_clk_names[] = { + "phy", "ctrl_aclk", "ctrl_pclk", "scl_pclk", +}; + +static const char * const gs101_regulator_names[] = { + "pll", + "dvdd-usb20", "vddh-usb20", "vdd33-usb20", + "vdda-usbdp", "vddh-usbdp", +}; + +static const struct exynos5_usbdrd_phy_drvdata gs101_usbd31rd_phy = { + .phy_cfg = phy_cfg_gs101, + .phy_tunes = gs101_tunes, + .phy_ops = &gs101_usbdrd_phy_ops, + .pmu_offset_usbdrd0_phy = GS101_PHY_CTRL_USB20, + .pmu_offset_usbdrd0_phy_ss = GS101_PHY_CTRL_USBDP, + .clk_names = gs101_clk_names, + .n_clks = ARRAY_SIZE(gs101_clk_names), + .core_clk_names = exynos5_core_clk_names, + .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names), + .regulator_names = gs101_regulator_names, + .n_regulators = ARRAY_SIZE(gs101_regulator_names), }; static const struct of_device_id exynos5_usbdrd_phy_of_match[] = { { + .compatible = "google,gs101-usb31drd-phy", + .data = &gs101_usbd31rd_phy + }, { .compatible = "samsung,exynos5250-usbdrd-phy", .data = &exynos5250_usbdrd_phy }, { @@ -1018,21 +1690,38 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev) dev_set_drvdata(dev, phy_drd); phy_drd->dev = dev; - phy_drd->reg_phy = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(phy_drd->reg_phy)) - return PTR_ERR(phy_drd->reg_phy); - drv_data = of_device_get_match_data(dev); if (!drv_data) return -EINVAL; - phy_drd->drv_data = drv_data; + if (of_property_present(dev->of_node, "reg-names")) { + void __iomem *reg; + + reg = devm_platform_ioremap_resource_byname(pdev, "phy"); + if (IS_ERR(reg)) + return PTR_ERR(reg); + phy_drd->reg_phy = reg; + + reg = devm_platform_ioremap_resource_byname(pdev, "pcs"); + if (IS_ERR(reg)) + return PTR_ERR(reg); + phy_drd->reg_pcs = reg; + + reg = devm_platform_ioremap_resource_byname(pdev, "pma"); + if (IS_ERR(reg)) + return PTR_ERR(reg); + phy_drd->reg_pma = reg; + } else { + /* DTB with just a single region */ + phy_drd->reg_phy = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(phy_drd->reg_phy)) + return PTR_ERR(phy_drd->reg_phy); + } + ret = exynos5_usbdrd_phy_clk_handle(phy_drd); - if (ret) { - dev_err(dev, "Failed to initialize clocks\n"); + if (ret) return ret; - } reg_pmu = syscon_regmap_lookup_by_phandle(dev->of_node, "samsung,pmu-syscon"); @@ -1050,36 +1739,20 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev) if (channel < 0) dev_dbg(dev, "Not a multi-controller usbdrd phy\n"); - switch (channel) { - case 1: - pmu_offset = phy_drd->drv_data->pmu_offset_usbdrd1_phy; - break; - case 0: - default: - pmu_offset = phy_drd->drv_data->pmu_offset_usbdrd0_phy; - break; - } - - /* Get Vbus regulators */ - phy_drd->vbus = devm_regulator_get(dev, "vbus"); - if (IS_ERR(phy_drd->vbus)) { - ret = PTR_ERR(phy_drd->vbus); - if (ret == -EPROBE_DEFER) - return ret; - - dev_warn(dev, "Failed to get VBUS supply regulator\n"); - phy_drd->vbus = NULL; - } - - phy_drd->vbus_boost = devm_regulator_get(dev, "vbus-boost"); - if (IS_ERR(phy_drd->vbus_boost)) { - ret = PTR_ERR(phy_drd->vbus_boost); - if (ret == -EPROBE_DEFER) - return ret; - - dev_warn(dev, "Failed to get VBUS boost supply regulator\n"); - phy_drd->vbus_boost = NULL; - } + /* Get regulators */ + phy_drd->regulators = devm_kcalloc(dev, + drv_data->n_regulators, + sizeof(*phy_drd->regulators), + GFP_KERNEL); + if (!phy_drd->regulators) + return ENOMEM; + regulator_bulk_set_supply_names(phy_drd->regulators, + drv_data->regulator_names, + drv_data->n_regulators); + ret = devm_regulator_bulk_get(dev, drv_data->n_regulators, + phy_drd->regulators); + if (ret) + return dev_err_probe(dev, ret, "failed to get regulators\n"); dev_vdbg(dev, "Creating usbdrd_phy phy\n"); @@ -1094,6 +1767,18 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev) phy_drd->phys[i].phy = phy; phy_drd->phys[i].index = i; phy_drd->phys[i].reg_pmu = reg_pmu; + switch (channel) { + case 1: + pmu_offset = drv_data->pmu_offset_usbdrd1_phy; + break; + case 0: + default: + pmu_offset = drv_data->pmu_offset_usbdrd0_phy; + if (i == EXYNOS5_DRDPHY_PIPE3 && drv_data + ->pmu_offset_usbdrd0_phy_ss) + pmu_offset = drv_data->pmu_offset_usbdrd0_phy_ss; + break; + } phy_drd->phys[i].pmu_offset = pmu_offset; phy_drd->phys[i].phy_cfg = &drv_data->phy_cfg[i]; phy_set_drvdata(phy, &phy_drd->phys[i]); diff --git a/drivers/phy/samsung/phy-exynos5250-usb2.c b/drivers/phy/samsung/phy-exynos5250-usb2.c index e198010e1bfd..21b06072f866 100644 --- a/drivers/phy/samsung/phy-exynos5250-usb2.c +++ b/drivers/phy/samsung/phy-exynos5250-usb2.c @@ -121,7 +121,7 @@ #define EXYNOS_5420_USB_ISOL_HOST_OFFSET 0x70C #define EXYNOS_5250_USB_ISOL_ENABLE BIT(0) -/* Mode swtich register */ +/* Mode switch register */ #define EXYNOS_5250_MODE_SWITCH_OFFSET 0x230 #define EXYNOS_5250_MODE_SWITCH_MASK 1 #define EXYNOS_5250_MODE_SWITCH_DEVICE 0 diff --git a/drivers/phy/st/phy-miphy28lp.c b/drivers/phy/st/phy-miphy28lp.c index 063fc38788ed..43cef89af55e 100644 --- a/drivers/phy/st/phy-miphy28lp.c +++ b/drivers/phy/st/phy-miphy28lp.c @@ -228,11 +228,6 @@ struct miphy28lp_dev { int nphys; }; -struct miphy_initval { - u16 reg; - u16 val; -}; - enum miphy_sata_gen { SATA_GEN1, SATA_GEN2, SATA_GEN3 }; static char *PHY_TYPE_name[] = { "sata-up", "pcie-up", "", "usb3-up" }; diff --git a/drivers/phy/starfive/Kconfig b/drivers/phy/starfive/Kconfig index 9508e2143011..d0cdd7cb4a13 100644 --- a/drivers/phy/starfive/Kconfig +++ b/drivers/phy/starfive/Kconfig @@ -15,6 +15,16 @@ config PHY_STARFIVE_JH7110_DPHY_RX system. If M is selected, the module will be called phy-jh7110-dphy-rx.ko. +config PHY_STARFIVE_JH7110_DPHY_TX + tristate "StarFive JH7110 D-PHY TX Support" + depends on HAS_IOMEM + select GENERIC_PHY + select GENERIC_PHY_MIPI_DPHY + help + Choose this option if you have a StarFive D-PHY TX in your + system. If M is selected, the module will be called + phy-jh7110-dphy-tx.ko. + config PHY_STARFIVE_JH7110_PCIE tristate "Starfive JH7110 PCIE 2.0/USB 3.0 PHY support" depends on HAS_IOMEM diff --git a/drivers/phy/starfive/Makefile b/drivers/phy/starfive/Makefile index b391018b7c47..eedc4a6fec15 100644 --- a/drivers/phy/starfive/Makefile +++ b/drivers/phy/starfive/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_PHY_STARFIVE_JH7110_DPHY_RX) += phy-jh7110-dphy-rx.o +obj-$(CONFIG_PHY_STARFIVE_JH7110_DPHY_TX) += phy-jh7110-dphy-tx.o obj-$(CONFIG_PHY_STARFIVE_JH7110_PCIE) += phy-jh7110-pcie.o obj-$(CONFIG_PHY_STARFIVE_JH7110_USB) += phy-jh7110-usb.o diff --git a/drivers/phy/starfive/phy-jh7110-dphy-rx.c b/drivers/phy/starfive/phy-jh7110-dphy-rx.c index 037a9e0263cd..0b039e1f71c5 100644 --- a/drivers/phy/starfive/phy-jh7110-dphy-rx.c +++ b/drivers/phy/starfive/phy-jh7110-dphy-rx.c @@ -46,11 +46,6 @@ #define STF_MAP_LANES_NUM 6 -struct regval { - u32 addr; - u32 val; -}; - struct stf_dphy_info { /** * @maps: diff --git a/drivers/phy/starfive/phy-jh7110-dphy-tx.c b/drivers/phy/starfive/phy-jh7110-dphy-tx.c new file mode 100644 index 000000000000..c64d1c91b130 --- /dev/null +++ b/drivers/phy/starfive/phy-jh7110-dphy-tx.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * DPHY TX driver for the StarFive JH7110 SoC + * + * Copyright (C) 2023 StarFive Technology Co., Ltd. + * Author: Keith Zhao <keith.zhao@starfivetech.com> + * Author: Shengyang Chen <shengyang.chen@starfivetech.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-mipi-dphy.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> + +#define STF_DPHY_APBIFSAIF_SYSCFG(x) (x) + +#define STF_DPHY_AON_POWER_READY_N_ACTIVE 0 +#define STF_DPHY_AON_POWER_READY_N BIT(0) +#define STF_DPHY_CFG_L0_SWAP_SEL GENMASK(14, 12) +#define STF_DPHY_CFG_L1_SWAP_SEL GENMASK(17, 15) +#define STF_DPHY_CFG_L2_SWAP_SEL GENMASK(20, 18) +#define STF_DPHY_CFG_L3_SWAP_SEL GENMASK(23, 21) +#define STF_DPHY_CFG_L4_SWAP_SEL GENMASK(26, 24) +#define STF_DPHY_RGS_CDTX_PLL_UNLOCK BIT(18) +#define STF_DPHY_RG_CDTX_L0N_HSTX_RES GENMASK(23, 19) +#define STF_DPHY_RG_CDTX_L0P_HSTX_RES GENMASK(28, 24) + +#define STF_DPHY_RG_CDTX_L1P_HSTX_RES GENMASK(9, 5) +#define STF_DPHY_RG_CDTX_L2N_HSTX_RES GENMASK(14, 10) +#define STF_DPHY_RG_CDTX_L2P_HSTX_RES GENMASK(19, 15) +#define STF_DPHY_RG_CDTX_L3N_HSTX_RES GENMASK(24, 20) +#define STF_DPHY_RG_CDTX_L3P_HSTX_RES GENMASK(29, 25) + +#define STF_DPHY_RG_CDTX_L4N_HSTX_RES GENMASK(4, 0) +#define STF_DPHY_RG_CDTX_L4P_HSTX_RES GENMASK(9, 5) +#define STF_DPHY_RG_CDTX_PLL_FBK_FRA GENMASK(23, 0) + +#define STF_DPHY_RG_CDTX_PLL_FBK_INT GENMASK(8, 0) +#define STF_DPHY_RG_CDTX_PLL_FM_EN BIT(9) +#define STF_DPHY_RG_CDTX_PLL_LDO_STB_X2_EN BIT(10) +#define STF_DPHY_RG_CDTX_PLL_PRE_DIV GENMASK(12, 11) + +#define STF_DPHY_RG_CDTX_PLL_SSC_EN BIT(18) + +#define STF_DPHY_RG_CLANE_HS_CLK_POST_TIME GENMASK(7, 0) +#define STF_DPHY_RG_CLANE_HS_CLK_PRE_TIME GENMASK(15, 8) +#define STF_DPHY_RG_CLANE_HS_PRE_TIME GENMASK(23, 16) +#define STF_DPHY_RG_CLANE_HS_TRAIL_TIME GENMASK(31, 24) + +#define STF_DPHY_RG_CLANE_HS_ZERO_TIME GENMASK(7, 0) +#define STF_DPHY_RG_DLANE_HS_PRE_TIME GENMASK(15, 8) +#define STF_DPHY_RG_DLANE_HS_TRAIL_TIME GENMASK(23, 16) +#define STF_DPHY_RG_DLANE_HS_ZERO_TIME GENMASK(31, 24) + +#define STF_DPHY_RG_EXTD_CYCLE_SEL GENMASK(2, 0) +#define STF_DPHY_SCFG_C_HS_PRE_ZERO_TIME GENMASK(31, 0) + +#define STF_DPHY_SCFG_DSI_TXREADY_ESC_SEL GENMASK(2, 1) +#define STF_DPHY_SCFG_PPI_C_READY_SEL GENMASK(4, 3) + +#define STF_DPHY_REFCLK_IN_SEL GENMASK(28, 26) +#define STF_DPHY_RESETB BIT(29) + +#define STF_DPHY_REFCLK_12M 1 +#define STF_DPHY_BITRATE_ALIGN 10000000 + +#define STF_MAP_LANES_NUM 5 + +#define STF_DPHY_LSHIFT_16(x) (FIELD_PREP(GENMASK(23, 16), (x))) +#define STF_DPHY_LSHIFT_8(x) (FIELD_PREP(GENMASK(15, 8), (x))) + +#define STF_DPHY_HW_DELAY_US 200 +#define STF_DPHY_HW_TIMEOUT_US 5000 + +struct stf_dphy_config { + unsigned long bitrate; + u32 pll_fbk_int; + u32 pll_fbk_fra_val; + u32 extd_cycle_sel; + u32 dlane_hs_pre_time; + u32 dlane_hs_zero_time; + u32 dlane_hs_trail_time; + u32 clane_hs_pre_time; + u32 clane_hs_zero_time; + u32 clane_hs_trail_time; + u32 clane_hs_clk_pre_time; + u32 clane_hs_clk_post_time; +}; + +static const struct stf_dphy_config reg_configs[] = { + {160000000, 0x6a, 0xaa, 0x3, 0xa, 0x17, 0x11, 0x5, 0x2b, 0xd, 0x7, 0x3d}, + {170000000, 0x71, 0x55, 0x3, 0xb, 0x18, 0x11, 0x5, 0x2e, 0xd, 0x7, 0x3d}, + {180000000, 0x78, 0x0, 0x3, 0xb, 0x19, 0x12, 0x6, 0x30, 0xe, 0x7, 0x3e}, + {190000000, 0x7e, 0xaa, 0x3, 0xc, 0x1a, 0x12, 0x6, 0x33, 0xe, 0x7, 0x3e}, + {200000000, 0x85, 0x55, 0x3, 0xc, 0x1b, 0x13, 0x7, 0x35, 0xf, 0x7, 0x3f}, + {320000000, 0x6a, 0xaa, 0x2, 0x8, 0x14, 0xf, 0x5, 0x2b, 0xd, 0x3, 0x23}, + {330000000, 0x6e, 0x0, 0x2, 0x8, 0x15, 0xf, 0x5, 0x2d, 0xd, 0x3, 0x23}, + {340000000, 0x71, 0x55, 0x2, 0x9, 0x15, 0xf, 0x5, 0x2e, 0xd, 0x3, 0x23}, + {350000000, 0x74, 0xaa, 0x2, 0x9, 0x15, 0x10, 0x6, 0x2f, 0xe, 0x3, 0x24}, + {360000000, 0x78, 0x0, 0x2, 0x9, 0x16, 0x10, 0x6, 0x30, 0xe, 0x3, 0x24}, + {370000000, 0x7b, 0x55, 0x2, 0x9, 0x17, 0x10, 0x6, 0x32, 0xe, 0x3, 0x24}, + {380000000, 0x7e, 0xaa, 0x2, 0xa, 0x17, 0x10, 0x6, 0x33, 0xe, 0x3, 0x24}, + {390000000, 0x82, 0x0, 0x2, 0xa, 0x17, 0x11, 0x6, 0x35, 0xf, 0x3, 0x25}, + {400000000, 0x85, 0x55, 0x2, 0xa, 0x18, 0x11, 0x7, 0x35, 0xf, 0x3, 0x25}, + {410000000, 0x88, 0xaa, 0x2, 0xa, 0x19, 0x11, 0x7, 0x37, 0xf, 0x3, 0x25}, + {420000000, 0x8c, 0x0, 0x2, 0xa, 0x19, 0x12, 0x7, 0x38, 0x10, 0x3, 0x26}, + {430000000, 0x8f, 0x55, 0x2, 0xb, 0x19, 0x12, 0x7, 0x39, 0x10, 0x3, 0x26}, + {440000000, 0x92, 0xaa, 0x2, 0xb, 0x1a, 0x12, 0x7, 0x3b, 0x10, 0x3, 0x26}, + {450000000, 0x96, 0x0, 0x2, 0xb, 0x1b, 0x12, 0x8, 0x3c, 0x10, 0x3, 0x26}, + {460000000, 0x99, 0x55, 0x2, 0xb, 0x1b, 0x13, 0x8, 0x3d, 0x11, 0x3, 0x27}, + {470000000, 0x9c, 0xaa, 0x2, 0xc, 0x1b, 0x13, 0x8, 0x3e, 0x11, 0x3, 0x27}, + {480000000, 0xa0, 0x27, 0x2, 0xc, 0x1c, 0x13, 0x8, 0x40, 0x11, 0x3, 0x27}, + {490000000, 0xa3, 0x55, 0x2, 0xc, 0x1d, 0x14, 0x8, 0x42, 0x12, 0x3, 0x28}, + {500000000, 0xa6, 0xaa, 0x2, 0xc, 0x1d, 0x14, 0x9, 0x42, 0x12, 0x3, 0x28}, + {510000000, 0xaa, 0x0, 0x2, 0xc, 0x1e, 0x14, 0x9, 0x44, 0x12, 0x3, 0x28}, + {520000000, 0xad, 0x55, 0x2, 0xd, 0x1e, 0x15, 0x9, 0x45, 0x13, 0x3, 0x29}, + {530000000, 0xb0, 0xaa, 0x2, 0xd, 0x1e, 0x15, 0x9, 0x47, 0x13, 0x3, 0x29}, + {540000000, 0xb4, 0x0, 0x2, 0xd, 0x1f, 0x15, 0x9, 0x48, 0x13, 0x3, 0x29}, + {550000000, 0xb7, 0x55, 0x2, 0xd, 0x20, 0x16, 0x9, 0x4a, 0x14, 0x3, 0x2a}, + {560000000, 0xba, 0xaa, 0x2, 0xe, 0x20, 0x16, 0xa, 0x4a, 0x14, 0x3, 0x2a}, + {570000000, 0xbe, 0x0, 0x2, 0xe, 0x20, 0x16, 0xa, 0x4c, 0x14, 0x3, 0x2a}, + {580000000, 0xc1, 0x55, 0x2, 0xe, 0x21, 0x16, 0xa, 0x4d, 0x14, 0x3, 0x2a}, + {590000000, 0xc4, 0xaa, 0x2, 0xe, 0x22, 0x17, 0xa, 0x4f, 0x15, 0x3, 0x2b}, + {600000000, 0xc8, 0x0, 0x2, 0xe, 0x23, 0x17, 0xa, 0x50, 0x15, 0x3, 0x2b}, + {610000000, 0xcb, 0x55, 0x2, 0xf, 0x22, 0x17, 0xb, 0x50, 0x15, 0x3, 0x2b}, + {620000000, 0xce, 0xaa, 0x2, 0xf, 0x23, 0x18, 0xb, 0x52, 0x16, 0x3, 0x2c}, + {630000000, 0x69, 0x0, 0x1, 0x7, 0x12, 0xd, 0x5, 0x2a, 0xc, 0x1, 0x15}, + {640000000, 0x6a, 0xaa, 0x1, 0x7, 0x13, 0xe, 0x5, 0x2b, 0xd, 0x1, 0x16}, + {650000000, 0x6c, 0x55, 0x1, 0x7, 0x13, 0xe, 0x5, 0x2c, 0xd, 0x1, 0x16}, + {660000000, 0x6e, 0x0, 0x1, 0x7, 0x13, 0xe, 0x5, 0x2d, 0xd, 0x1, 0x16}, + {670000000, 0x6f, 0xaa, 0x1, 0x8, 0x13, 0xe, 0x5, 0x2d, 0xd, 0x1, 0x16}, + {680000000, 0x71, 0x55, 0x1, 0x8, 0x13, 0xe, 0x5, 0x2e, 0xd, 0x1, 0x16}, + {690000000, 0x73, 0x0, 0x1, 0x8, 0x14, 0xe, 0x6, 0x2e, 0xd, 0x1, 0x16}, + {700000000, 0x74, 0xaa, 0x1, 0x8, 0x14, 0xf, 0x6, 0x2f, 0xe, 0x1, 0x16}, + {710000000, 0x76, 0x55, 0x1, 0x8, 0x14, 0xf, 0x6, 0x2f, 0xe, 0x1, 0x17}, + {720000000, 0x78, 0x0, 0x1, 0x8, 0x15, 0xf, 0x6, 0x30, 0xe, 0x1, 0x17}, + {730000000, 0x79, 0xaa, 0x1, 0x8, 0x15, 0xf, 0x6, 0x31, 0xe, 0x1, 0x17}, + {740000000, 0x7b, 0x55, 0x1, 0x8, 0x15, 0xf, 0x6, 0x32, 0xe, 0x1, 0x17}, + {750000000, 0x7d, 0x0, 0x1, 0x8, 0x16, 0xf, 0x6, 0x32, 0xe, 0x1, 0x17}, + {760000000, 0x7e, 0xaa, 0x1, 0x9, 0x15, 0xf, 0x6, 0x33, 0xe, 0x1, 0x17}, + {770000000, 0x80, 0x55, 0x1, 0x9, 0x15, 0x10, 0x6, 0x34, 0xf, 0x1, 0x18}, + {780000000, 0x82, 0x0, 0x1, 0x9, 0x16, 0x10, 0x6, 0x35, 0xf, 0x1, 0x18,}, + {790000000, 0x83, 0xaa, 0x1, 0x9, 0x16, 0x10, 0x7, 0x34, 0xf, 0x1, 0x18}, + {800000000, 0x85, 0x55, 0x1, 0x9, 0x17, 0x10, 0x7, 0x35, 0xf, 0x1, 0x18}, + {810000000, 0x87, 0x0, 0x1, 0x9, 0x17, 0x10, 0x7, 0x36, 0xf, 0x1, 0x18}, + {820000000, 0x88, 0xaa, 0x1, 0x9, 0x17, 0x10, 0x7, 0x37, 0xf, 0x1, 0x18}, + {830000000, 0x8a, 0x55, 0x1, 0x9, 0x18, 0x10, 0x7, 0x37, 0xf, 0x1, 0x18}, + {840000000, 0x8c, 0x0, 0x1, 0x9, 0x18, 0x11, 0x7, 0x38, 0x10, 0x1, 0x19}, + {850000000, 0x8d, 0xaa, 0x1, 0xa, 0x17, 0x11, 0x7, 0x39, 0x10, 0x1, 0x19}, + {860000000, 0x8f, 0x55, 0x1, 0xa, 0x18, 0x11, 0x7, 0x39, 0x10, 0x1, 0x19}, + {870000000, 0x91, 0x0, 0x1, 0xa, 0x18, 0x11, 0x7, 0x3a, 0x10, 0x1, 0x19}, + {880000000, 0x92, 0xaa, 0x1, 0xa, 0x18, 0x11, 0x7, 0x3b, 0x10, 0x1, 0x19}, + {890000000, 0x94, 0x55, 0x1, 0xa, 0x19, 0x11, 0x7, 0x3c, 0x10, 0x1, 0x19}, + {900000000, 0x96, 0x0, 0x1, 0xa, 0x19, 0x12, 0x8, 0x3c, 0x10, 0x1, 0x19}, + {910000000, 0x97, 0xaa, 0x1, 0xa, 0x19, 0x12, 0x8, 0x3c, 0x11, 0x1, 0x1a}, + {920000000, 0x99, 0x55, 0x1, 0xa, 0x1a, 0x12, 0x8, 0x3d, 0x11, 0x1, 0x1a}, + {930000000, 0x9b, 0x0, 0x1, 0xa, 0x1a, 0x12, 0x8, 0x3e, 0x11, 0x1, 0x1a}, + {940000000, 0x9c, 0xaa, 0x1, 0xb, 0x1a, 0x12, 0x8, 0x3e, 0x11, 0x1, 0x1a}, + {950000000, 0x9e, 0x55, 0x1, 0xb, 0x1a, 0x12, 0x8, 0x3f, 0x11, 0x1, 0x1a}, + {960000000, 0xa0, 0x0, 0x1, 0xb, 0x1a, 0x12, 0x8, 0x40, 0x11, 0x1, 0x1a}, + {970000000, 0xa1, 0xaa, 0x1, 0xb, 0x1b, 0x13, 0x8, 0x41, 0x12, 0x1, 0x1b}, + {980000000, 0xa3, 0x55, 0x1, 0xb, 0x1b, 0x13, 0x8, 0x42, 0x12, 0x1, 0x1b}, + {990000000, 0xa5, 0x0, 0x1, 0xb, 0x1b, 0x13, 0x8, 0x42, 0x12, 0x1, 0x1b}, + {1000000000, 0xa6, 0xaa, 0x1, 0xb, 0x1c, 0x13, 0x9, 0x42, 0x12, 0x1, 0x1b}, +}; + +struct stf_dphy_info { + /** + * @maps: + * + * Physical lanes and logic lanes mapping table. + * + * The default order is: + * [data lane 0, data lane 1, data lane 2, date lane 3, clk lane] + */ + u8 maps[STF_MAP_LANES_NUM]; +}; + +struct stf_dphy { + struct device *dev; + void __iomem *topsys; + struct clk *txesc_clk; + struct reset_control *sys_rst; + + struct phy_configure_opts_mipi_dphy config; + + struct phy *phy; + const struct stf_dphy_info *info; +}; + +static u32 stf_dphy_get_config_index(u32 bitrate) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(reg_configs); i++) { + if (reg_configs[i].bitrate == bitrate) + return i; + } + + return 0; +} + +static void stf_dphy_hw_reset(struct stf_dphy *dphy, int assert) +{ + int rc; + u32 status = 0; + + writel(FIELD_PREP(STF_DPHY_RESETB, assert), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(100)); + + if (assert) { + rc = readl_poll_timeout_atomic(dphy->topsys + + STF_DPHY_APBIFSAIF_SYSCFG(8), + status, + !(FIELD_GET(STF_DPHY_RGS_CDTX_PLL_UNLOCK, status)), + STF_DPHY_HW_DELAY_US, STF_DPHY_HW_TIMEOUT_US); + if (rc) + dev_err(dphy->dev, "MIPI dphy-tx # PLL Locked\n"); + } +} + +static int stf_dphy_configure(struct phy *phy, union phy_configure_opts *opts) +{ + struct stf_dphy *dphy = phy_get_drvdata(phy); + const struct stf_dphy_info *info = dphy->info; + const struct stf_dphy_config *p = reg_configs; + unsigned long alignment = STF_DPHY_BITRATE_ALIGN; + u32 bitrate = opts->mipi_dphy.hs_clk_rate; + u32 tmp; + u32 i; + + if (bitrate % alignment) + bitrate += alignment - (bitrate % alignment); + + i = stf_dphy_get_config_index(bitrate); + + tmp = readl(dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(100)); + tmp &= ~STF_DPHY_REFCLK_IN_SEL; + tmp |= FIELD_PREP(STF_DPHY_REFCLK_IN_SEL, STF_DPHY_REFCLK_12M); + writel(tmp, dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(100)); + + writel(FIELD_PREP(STF_DPHY_RG_CDTX_L0N_HSTX_RES, 0x10) | + FIELD_PREP(STF_DPHY_RG_CDTX_L0P_HSTX_RES, 0x10), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(8)); + + writel(FIELD_PREP(STF_DPHY_RG_CDTX_L0N_HSTX_RES, 0x10) | + FIELD_PREP(STF_DPHY_RG_CDTX_L2N_HSTX_RES, 0x10) | + FIELD_PREP(STF_DPHY_RG_CDTX_L3N_HSTX_RES, 0x10) | + FIELD_PREP(STF_DPHY_RG_CDTX_L1P_HSTX_RES, 0x10) | + FIELD_PREP(STF_DPHY_RG_CDTX_L2P_HSTX_RES, 0x10) | + FIELD_PREP(STF_DPHY_RG_CDTX_L3P_HSTX_RES, 0x10), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(12)); + + writel(FIELD_PREP(STF_DPHY_RG_CDTX_L4N_HSTX_RES, 0x10) | + FIELD_PREP(STF_DPHY_RG_CDTX_L4P_HSTX_RES, 0x10), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(16)); + + /* Lane setting */ + writel(FIELD_PREP(STF_DPHY_AON_POWER_READY_N, + STF_DPHY_AON_POWER_READY_N_ACTIVE) | + FIELD_PREP(STF_DPHY_CFG_L0_SWAP_SEL, info->maps[0]) | + FIELD_PREP(STF_DPHY_CFG_L1_SWAP_SEL, info->maps[1]) | + FIELD_PREP(STF_DPHY_CFG_L2_SWAP_SEL, info->maps[2]) | + FIELD_PREP(STF_DPHY_CFG_L3_SWAP_SEL, info->maps[3]) | + FIELD_PREP(STF_DPHY_CFG_L4_SWAP_SEL, info->maps[4]), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(0)); + + /* PLL setting */ + writel(FIELD_PREP(STF_DPHY_RG_CDTX_PLL_SSC_EN, 0x0), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(28)); + + writel(FIELD_PREP(STF_DPHY_RG_CDTX_PLL_LDO_STB_X2_EN, 0x1) | + FIELD_PREP(STF_DPHY_RG_CDTX_PLL_FM_EN, 0x1) | + FIELD_PREP(STF_DPHY_RG_CDTX_PLL_PRE_DIV, 0x0) | + FIELD_PREP(STF_DPHY_RG_CDTX_PLL_FBK_INT, p[i].pll_fbk_int), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(24)); + + writel(FIELD_PREP(STF_DPHY_RG_CDTX_PLL_FBK_FRA, + STF_DPHY_LSHIFT_16(p[i].pll_fbk_fra_val) | + STF_DPHY_LSHIFT_8(p[i].pll_fbk_fra_val) | + p[i].pll_fbk_fra_val), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(20)); + + writel(FIELD_PREP(STF_DPHY_RG_EXTD_CYCLE_SEL, p[i].extd_cycle_sel), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(40)); + + writel(FIELD_PREP(STF_DPHY_RG_DLANE_HS_PRE_TIME, p[i].dlane_hs_pre_time) | + FIELD_PREP(STF_DPHY_RG_DLANE_HS_ZERO_TIME, p[i].dlane_hs_zero_time) | + FIELD_PREP(STF_DPHY_RG_DLANE_HS_TRAIL_TIME, p[i].dlane_hs_trail_time) | + FIELD_PREP(STF_DPHY_RG_CLANE_HS_ZERO_TIME, p[i].clane_hs_zero_time), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(36)); + + writel(FIELD_PREP(STF_DPHY_RG_CLANE_HS_PRE_TIME, p[i].clane_hs_pre_time) | + FIELD_PREP(STF_DPHY_RG_CLANE_HS_TRAIL_TIME, p[i].clane_hs_trail_time) | + FIELD_PREP(STF_DPHY_RG_CLANE_HS_CLK_PRE_TIME, p[i].clane_hs_clk_pre_time) | + FIELD_PREP(STF_DPHY_RG_CLANE_HS_CLK_POST_TIME, p[i].clane_hs_clk_post_time), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(32)); + + return 0; +} + +static int stf_dphy_init(struct phy *phy) +{ + struct stf_dphy *dphy = phy_get_drvdata(phy); + int ret; + + stf_dphy_hw_reset(dphy, 1); + + writel(FIELD_PREP(STF_DPHY_SCFG_PPI_C_READY_SEL, 0) | + FIELD_PREP(STF_DPHY_SCFG_DSI_TXREADY_ESC_SEL, 0), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(48)); + + writel(FIELD_PREP(STF_DPHY_SCFG_C_HS_PRE_ZERO_TIME, 0x30), + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(44)); + + ret = clk_prepare_enable(dphy->txesc_clk); + if (ret) { + dev_err(dphy->dev, "Failed to prepare/enable txesc_clk\n"); + return ret; + } + + ret = reset_control_deassert(dphy->sys_rst); + if (ret) { + dev_err(dphy->dev, "Failed to deassert sys_rst\n"); + return ret; + } + + return 0; +} + +static int stf_dphy_exit(struct phy *phy) +{ + struct stf_dphy *dphy = phy_get_drvdata(phy); + int ret; + + ret = reset_control_assert(dphy->sys_rst); + if (ret) { + dev_err(dphy->dev, "Failed to assert sys_rst\n"); + return ret; + } + + clk_disable_unprepare(dphy->txesc_clk); + + stf_dphy_hw_reset(dphy, 0); + + return 0; +} + +static int stf_dphy_power_on(struct phy *phy) +{ + struct stf_dphy *dphy = phy_get_drvdata(phy); + + return pm_runtime_resume_and_get(dphy->dev); +} + +static int stf_dphy_validate(struct phy *phy, enum phy_mode mode, int submode, + union phy_configure_opts *opts) +{ + if (mode != PHY_MODE_MIPI_DPHY) + return -EINVAL; + + return 0; +} + +static int stf_dphy_power_off(struct phy *phy) +{ + struct stf_dphy *dphy = phy_get_drvdata(phy); + + return pm_runtime_put_sync(dphy->dev); +} + +static const struct phy_ops stf_dphy_ops = { + .power_on = stf_dphy_power_on, + .power_off = stf_dphy_power_off, + .init = stf_dphy_init, + .exit = stf_dphy_exit, + .configure = stf_dphy_configure, + .validate = stf_dphy_validate, + .owner = THIS_MODULE, +}; + +static int stf_dphy_probe(struct platform_device *pdev) +{ + struct phy_provider *phy_provider; + struct stf_dphy *dphy; + + dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL); + if (!dphy) + return -ENOMEM; + + dphy->info = of_device_get_match_data(&pdev->dev); + + dphy->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, dphy); + + dphy->topsys = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(dphy->topsys)) + return PTR_ERR(dphy->topsys); + + pm_runtime_enable(&pdev->dev); + + dphy->txesc_clk = devm_clk_get(&pdev->dev, "txesc"); + if (IS_ERR(dphy->txesc_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(dphy->txesc_clk), + "Failed to get txesc clock\n"); + + dphy->sys_rst = devm_reset_control_get_exclusive(&pdev->dev, "sys"); + if (IS_ERR(dphy->sys_rst)) + return dev_err_probe(&pdev->dev, PTR_ERR(dphy->sys_rst), + "Failed to get sys reset\n"); + + dphy->phy = devm_phy_create(&pdev->dev, NULL, &stf_dphy_ops); + if (IS_ERR(dphy->phy)) + return dev_err_probe(&pdev->dev, PTR_ERR(dphy->phy), + "Failed to create phy\n"); + + phy_set_drvdata(dphy->phy, dphy); + + phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate); + if (IS_ERR(phy_provider)) + return dev_err_probe(&pdev->dev, PTR_ERR(phy_provider), + "Failed to register phy\n"); + + return 0; +} + +static const struct stf_dphy_info starfive_dphy_info = { + .maps = {0, 1, 2, 3, 4}, +}; + +static const struct of_device_id stf_dphy_dt_ids[] = { + { + .compatible = "starfive,jh7110-dphy-tx", + .data = &starfive_dphy_info, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, stf_dphy_dt_ids); + +static struct platform_driver stf_dphy_driver = { + .driver = { + .name = "starfive-dphy-tx", + .of_match_table = stf_dphy_dt_ids, + }, + .probe = stf_dphy_probe, +}; +module_platform_driver(stf_dphy_driver); + +MODULE_AUTHOR("Keith Zhao <keith.zhao@starfivetech.com>"); +MODULE_AUTHOR("Shengyang Chen <shengyang.chen@starfivetech.com>"); +MODULE_DESCRIPTION("StarFive JH7110 DPHY TX driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index 8b3b937de624..673449607c02 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -30,7 +30,6 @@ #define LANE_R058 0x258 #define LANE_R06c 0x26c #define LANE_R070 0x270 -#define LANE_R070 0x270 #define LANE_R19C 0x39c #define COMLANE_R004 0xa04 diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index 00d7e6a6de03..7f626c597025 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -1076,27 +1076,12 @@ static int wiz_clock_register(struct wiz *wiz) return ret; } -static int wiz_clock_init(struct wiz *wiz, struct device_node *node) +static void wiz_clock_init(struct wiz *wiz) { - const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel; - struct device *dev = wiz->dev; - struct device_node *clk_node; - const char *node_name; unsigned long rate; - struct clk *clk; - int ret; - int i; - clk = devm_clk_get(dev, "core_ref_clk"); - if (IS_ERR(clk)) { - dev_err(dev, "core_ref_clk clock not found\n"); - ret = PTR_ERR(clk); - return ret; - } - wiz->input_clks[WIZ_CORE_REFCLK] = clk; - - rate = clk_get_rate(clk); - if (rate >= 100000000) + rate = clk_get_rate(wiz->input_clks[WIZ_CORE_REFCLK]); + if (rate >= REF_CLK_100MHZ) regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x1); else regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3); @@ -1120,35 +1105,55 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node) break; } - if (wiz->data->pma_cmn_refclk1_int_mode) { - clk = devm_clk_get(dev, "core_ref1_clk"); - if (IS_ERR(clk)) { - dev_err(dev, "core_ref1_clk clock not found\n"); - ret = PTR_ERR(clk); - return ret; - } - wiz->input_clks[WIZ_CORE_REFCLK1] = clk; - - rate = clk_get_rate(clk); - if (rate >= 100000000) + if (wiz->input_clks[WIZ_CORE_REFCLK1]) { + rate = clk_get_rate(wiz->input_clks[WIZ_CORE_REFCLK1]); + if (rate >= REF_CLK_100MHZ) regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x1); else regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x3); } - clk = devm_clk_get(dev, "ext_ref_clk"); - if (IS_ERR(clk)) { - dev_err(dev, "ext_ref_clk clock not found\n"); - ret = PTR_ERR(clk); - return ret; - } - wiz->input_clks[WIZ_EXT_REFCLK] = clk; - - rate = clk_get_rate(clk); - if (rate >= 100000000) + rate = clk_get_rate(wiz->input_clks[WIZ_EXT_REFCLK]); + if (rate >= REF_CLK_100MHZ) regmap_field_write(wiz->pma_cmn_refclk_mode, 0x0); else regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2); +} + +static int wiz_clock_probe(struct wiz *wiz, struct device_node *node) +{ + const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel; + struct device *dev = wiz->dev; + struct device_node *clk_node; + const char *node_name; + struct clk *clk; + int ret; + int i; + + clk = devm_clk_get(dev, "core_ref_clk"); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), + "core_ref_clk clock not found\n"); + + wiz->input_clks[WIZ_CORE_REFCLK] = clk; + + if (wiz->data->pma_cmn_refclk1_int_mode) { + clk = devm_clk_get(dev, "core_ref1_clk"); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), + "core_ref1_clk clock not found\n"); + + wiz->input_clks[WIZ_CORE_REFCLK1] = clk; + } + + clk = devm_clk_get(dev, "ext_ref_clk"); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), + "ext_ref_clk clock not found\n"); + + wiz->input_clks[WIZ_EXT_REFCLK] = clk; + + wiz_clock_init(wiz); switch (wiz->type) { case AM64_WIZ_10G: @@ -1157,8 +1162,9 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node) case J721S2_WIZ_10G: ret = wiz_clock_register(wiz); if (ret) - dev_err(dev, "Failed to register wiz clocks\n"); - return ret; + return dev_err_probe(dev, ret, "Failed to register wiz clocks\n"); + + return 0; default: break; } @@ -1167,16 +1173,15 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node) node_name = clk_mux_sel[i].node_name; clk_node = of_get_child_by_name(node, node_name); if (!clk_node) { - dev_err(dev, "Unable to get %s node\n", node_name); - ret = -EINVAL; + ret = dev_err_probe(dev, -EINVAL, "Unable to get %s node\n", node_name); goto err; } ret = wiz_mux_of_clk_register(wiz, clk_node, wiz->mux_sel_field[i], clk_mux_sel[i].table); if (ret) { - dev_err(dev, "Failed to register %s clock\n", - node_name); + dev_err_probe(dev, ret, "Failed to register %s clock\n", + node_name); of_node_put(clk_node); goto err; } @@ -1188,16 +1193,15 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node) node_name = clk_div_sel[i].node_name; clk_node = of_get_child_by_name(node, node_name); if (!clk_node) { - dev_err(dev, "Unable to get %s node\n", node_name); - ret = -EINVAL; + ret = dev_err_probe(dev, -EINVAL, "Unable to get %s node\n", node_name); goto err; } ret = wiz_div_clk_register(wiz, clk_node, wiz->div_sel_field[i], clk_div_sel[i].table); if (ret) { - dev_err(dev, "Failed to register %s clock\n", - node_name); + dev_err_probe(dev, ret, "Failed to register %s clock\n", + node_name); of_node_put(clk_node); goto err; } @@ -1593,7 +1597,7 @@ static int wiz_probe(struct platform_device *pdev) goto err_get_sync; } - ret = wiz_clock_init(wiz, node); + ret = wiz_clock_probe(wiz, node); if (ret < 0) { dev_warn(dev, "Failed to initialize clocks\n"); goto err_get_sync; @@ -1655,12 +1659,41 @@ static void wiz_remove(struct platform_device *pdev) pm_runtime_disable(dev); } +static int wiz_resume_noirq(struct device *dev) +{ + struct device_node *node = dev->of_node; + struct wiz *wiz = dev_get_drvdata(dev); + int ret; + + /* Enable supplemental Control override if available */ + if (wiz->sup_legacy_clk_override) + regmap_field_write(wiz->sup_legacy_clk_override, 1); + + wiz_clock_init(wiz); + + ret = wiz_init(wiz); + if (ret) { + dev_err(dev, "WIZ initialization failed\n"); + goto err_wiz_init; + } + + return 0; + +err_wiz_init: + wiz_clock_cleanup(wiz, node); + + return ret; +} + +static DEFINE_NOIRQ_DEV_PM_OPS(wiz_pm_ops, NULL, wiz_resume_noirq); + static struct platform_driver wiz_driver = { .probe = wiz_probe, .remove_new = wiz_remove, .driver = { .name = "wiz", .of_match_table = wiz_id_table, + .pm = pm_sleep_ptr(&wiz_pm_ops), }, }; module_platform_driver(wiz_driver); diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c index dc8319bda43d..cb15041371c9 100644 --- a/drivers/phy/xilinx/phy-zynqmp.c +++ b/drivers/phy/xilinx/phy-zynqmp.c @@ -13,6 +13,7 @@ */ #include <linux/clk.h> +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/kernel.h> @@ -80,7 +81,8 @@ /* Reference clock selection parameters */ #define L0_Ln_REF_CLK_SEL(n) (0x2860 + (n) * 4) -#define L0_REF_CLK_SEL_MASK 0x8f +#define L0_REF_CLK_LCL_SEL BIT(7) +#define L0_REF_CLK_SEL_MASK 0x9f /* Calibration digital logic parameters */ #define L3_TM_CALIB_DIG19 0xec4c @@ -122,6 +124,15 @@ #define ICM_PROTOCOL_DP 0x4 #define ICM_PROTOCOL_SGMII 0x5 +static const char *const xpsgtr_icm_str[] = { + [ICM_PROTOCOL_PD] = "none", + [ICM_PROTOCOL_PCIE] = "PCIe", + [ICM_PROTOCOL_SATA] = "SATA", + [ICM_PROTOCOL_USB] = "USB", + [ICM_PROTOCOL_DP] = "DisplayPort", + [ICM_PROTOCOL_SGMII] = "SGMII", +}; + /* Test Mode common reset control parameters */ #define TM_CMN_RST 0x10018 #define TM_CMN_RST_EN 0x1 @@ -146,22 +157,6 @@ /* Total number of controllers */ #define CONTROLLERS_PER_LANE 5 -/* Protocol Type parameters */ -#define XPSGTR_TYPE_USB0 0 /* USB controller 0 */ -#define XPSGTR_TYPE_USB1 1 /* USB controller 1 */ -#define XPSGTR_TYPE_SATA_0 2 /* SATA controller lane 0 */ -#define XPSGTR_TYPE_SATA_1 3 /* SATA controller lane 1 */ -#define XPSGTR_TYPE_PCIE_0 4 /* PCIe controller lane 0 */ -#define XPSGTR_TYPE_PCIE_1 5 /* PCIe controller lane 1 */ -#define XPSGTR_TYPE_PCIE_2 6 /* PCIe controller lane 2 */ -#define XPSGTR_TYPE_PCIE_3 7 /* PCIe controller lane 3 */ -#define XPSGTR_TYPE_DP_0 8 /* Display Port controller lane 0 */ -#define XPSGTR_TYPE_DP_1 9 /* Display Port controller lane 1 */ -#define XPSGTR_TYPE_SGMII0 10 /* Ethernet SGMII controller 0 */ -#define XPSGTR_TYPE_SGMII1 11 /* Ethernet SGMII controller 1 */ -#define XPSGTR_TYPE_SGMII2 12 /* Ethernet SGMII controller 2 */ -#define XPSGTR_TYPE_SGMII3 13 /* Ethernet SGMII controller 3 */ - /* Timeout values */ #define TIMEOUT_US 1000 @@ -184,7 +179,8 @@ struct xpsgtr_ssc { /** * struct xpsgtr_phy - representation of a lane * @phy: pointer to the kernel PHY device - * @type: controller which uses this lane + * @instance: instance of the protocol type (such as the lane within a + * protocol, or the USB/Ethernet controller) * @lane: lane number * @protocol: protocol in which the lane operates * @skip_phy_init: skip phy_init() if true @@ -193,7 +189,7 @@ struct xpsgtr_ssc { */ struct xpsgtr_phy { struct phy *phy; - u8 type; + u8 instance; u8 lane; u8 protocol; bool skip_phy_init; @@ -308,10 +304,30 @@ static int xpsgtr_wait_pll_lock(struct phy *phy) struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy); struct xpsgtr_dev *gtr_dev = gtr_phy->dev; unsigned int timeout = TIMEOUT_US; + u8 protocol = gtr_phy->protocol; int ret; dev_dbg(gtr_dev->dev, "Waiting for PLL lock\n"); + /* + * For DP and PCIe, only the instance 0 PLL is used. Switch to that phy + * so we wait on the right PLL. + */ + if ((protocol == ICM_PROTOCOL_DP || protocol == ICM_PROTOCOL_PCIE) && + gtr_phy->instance) { + int i; + + for (i = 0; i < NUM_LANES; i++) { + gtr_phy = >r_dev->phys[i]; + + if (gtr_phy->protocol == protocol && !gtr_phy->instance) + goto got_phy; + } + + return -EBUSY; + } + +got_phy: while (1) { u32 reg = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1); @@ -330,8 +346,8 @@ static int xpsgtr_wait_pll_lock(struct phy *phy) if (ret == -ETIMEDOUT) dev_err(gtr_dev->dev, - "lane %u (type %u, protocol %u): PLL lock timeout\n", - gtr_phy->lane, gtr_phy->type, gtr_phy->protocol); + "lane %u (protocol %u, instance %u): PLL lock timeout\n", + gtr_phy->lane, gtr_phy->protocol, gtr_phy->instance); return ret; } @@ -349,11 +365,12 @@ static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy) PLL_FREQ_MASK, ssc->pll_ref_clk); /* Enable lane clock sharing, if required */ - if (gtr_phy->refclk != gtr_phy->lane) { - /* Lane3 Ref Clock Selection Register */ + if (gtr_phy->refclk == gtr_phy->lane) + xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane), + L0_REF_CLK_SEL_MASK, L0_REF_CLK_LCL_SEL); + else xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane), L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk); - } /* SSC step size [7:0] */ xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB, @@ -573,7 +590,7 @@ static int xpsgtr_phy_init(struct phy *phy) mutex_lock(>r_dev->gtr_mutex); /* Configure and enable the clock when peripheral phy_init call */ - if (clk_prepare_enable(gtr_dev->clk[gtr_phy->lane])) + if (clk_prepare_enable(gtr_dev->clk[gtr_phy->refclk])) goto out; /* Skip initialization if not required. */ @@ -625,7 +642,7 @@ static int xpsgtr_phy_exit(struct phy *phy) gtr_phy->skip_phy_init = false; /* Ensure that disable clock only, which configure for lane */ - clk_disable_unprepare(gtr_dev->clk[gtr_phy->lane]); + clk_disable_unprepare(gtr_dev->clk[gtr_phy->refclk]); return 0; } @@ -638,16 +655,7 @@ static int xpsgtr_phy_power_on(struct phy *phy) /* Skip initialization if not required. */ if (!xpsgtr_phy_init_required(gtr_phy)) return ret; - /* - * Wait for the PLL to lock. For DP, only wait on DP0 to avoid - * cumulating waits for both lanes. The user is expected to initialize - * lane 0 last. - */ - if (gtr_phy->protocol != ICM_PROTOCOL_DP || - gtr_phy->type == XPSGTR_TYPE_DP_0) - ret = xpsgtr_wait_pll_lock(phy); - - return ret; + return xpsgtr_wait_pll_lock(phy); } static int xpsgtr_phy_configure(struct phy *phy, union phy_configure_opts *opts) @@ -674,73 +682,33 @@ static const struct phy_ops xpsgtr_phyops = { * OF Xlate Support */ -/* Set the lane type and protocol based on the PHY type and instance number. */ +/* Set the lane protocol and instance based on the PHY type and instance number. */ static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type, unsigned int phy_instance) { unsigned int num_phy_types; - const int *phy_types; switch (phy_type) { - case PHY_TYPE_SATA: { - static const int types[] = { - XPSGTR_TYPE_SATA_0, - XPSGTR_TYPE_SATA_1, - }; - - phy_types = types; - num_phy_types = ARRAY_SIZE(types); + case PHY_TYPE_SATA: + num_phy_types = 2; gtr_phy->protocol = ICM_PROTOCOL_SATA; break; - } - case PHY_TYPE_USB3: { - static const int types[] = { - XPSGTR_TYPE_USB0, - XPSGTR_TYPE_USB1, - }; - - phy_types = types; - num_phy_types = ARRAY_SIZE(types); + case PHY_TYPE_USB3: + num_phy_types = 2; gtr_phy->protocol = ICM_PROTOCOL_USB; break; - } - case PHY_TYPE_DP: { - static const int types[] = { - XPSGTR_TYPE_DP_0, - XPSGTR_TYPE_DP_1, - }; - - phy_types = types; - num_phy_types = ARRAY_SIZE(types); + case PHY_TYPE_DP: + num_phy_types = 2; gtr_phy->protocol = ICM_PROTOCOL_DP; break; - } - case PHY_TYPE_PCIE: { - static const int types[] = { - XPSGTR_TYPE_PCIE_0, - XPSGTR_TYPE_PCIE_1, - XPSGTR_TYPE_PCIE_2, - XPSGTR_TYPE_PCIE_3, - }; - - phy_types = types; - num_phy_types = ARRAY_SIZE(types); + case PHY_TYPE_PCIE: + num_phy_types = 4; gtr_phy->protocol = ICM_PROTOCOL_PCIE; break; - } - case PHY_TYPE_SGMII: { - static const int types[] = { - XPSGTR_TYPE_SGMII0, - XPSGTR_TYPE_SGMII1, - XPSGTR_TYPE_SGMII2, - XPSGTR_TYPE_SGMII3, - }; - - phy_types = types; - num_phy_types = ARRAY_SIZE(types); + case PHY_TYPE_SGMII: + num_phy_types = 4; gtr_phy->protocol = ICM_PROTOCOL_SGMII; break; - } default: return -EINVAL; } @@ -748,22 +716,25 @@ static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type, if (phy_instance >= num_phy_types) return -EINVAL; - gtr_phy->type = phy_types[phy_instance]; + gtr_phy->instance = phy_instance; return 0; } /* - * Valid combinations of controllers and lanes (Interconnect Matrix). + * Valid combinations of controllers and lanes (Interconnect Matrix). Each + * "instance" represents one controller for a lane. For PCIe and DP, the + * "instance" is the logical lane in the link. For SATA, USB, and SGMII, + * the instance is the index of the controller. + * + * This information is only used to validate the devicetree reference, and is + * not used when programming the hardware. */ static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = { - { XPSGTR_TYPE_PCIE_0, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0, - XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII0 }, - { XPSGTR_TYPE_PCIE_1, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB0, - XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII1 }, - { XPSGTR_TYPE_PCIE_2, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0, - XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII2 }, - { XPSGTR_TYPE_PCIE_3, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB1, - XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII3 } + /* PCIe, SATA, USB, DP, SGMII */ + { 0, 0, 0, 1, 0 }, /* Lane 0 */ + { 1, 1, 0, 0, 1 }, /* Lane 1 */ + { 2, 0, 0, 1, 2 }, /* Lane 2 */ + { 3, 1, 1, 0, 3 }, /* Lane 3 */ }; /* Translate OF phandle and args to PHY instance. */ @@ -798,6 +769,7 @@ static struct phy *xpsgtr_xlate(struct device *dev, phy_type = args->args[1]; phy_instance = args->args[2]; + guard(mutex)(>r_phy->phy->mutex); ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance); if (ret < 0) { dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n"); @@ -818,7 +790,7 @@ static struct phy *xpsgtr_xlate(struct device *dev, * is allowed to operate on the lane. */ for (i = 0; i < CONTROLLERS_PER_LANE; i++) { - if (icm_matrix[phy_lane][i] == gtr_phy->type) + if (icm_matrix[phy_lane][i] == gtr_phy->instance) return gtr_phy->phy; } @@ -826,6 +798,34 @@ static struct phy *xpsgtr_xlate(struct device *dev, } /* + * DebugFS + */ + +static int xpsgtr_status_read(struct seq_file *seq, void *data) +{ + struct device *dev = seq->private; + struct xpsgtr_phy *gtr_phy = dev_get_drvdata(dev); + struct clk *clk; + u32 pll_status; + + mutex_lock(>r_phy->phy->mutex); + pll_status = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1); + clk = gtr_phy->dev->clk[gtr_phy->refclk]; + + seq_printf(seq, "Lane: %u\n", gtr_phy->lane); + seq_printf(seq, "Protocol: %s\n", + xpsgtr_icm_str[gtr_phy->protocol]); + seq_printf(seq, "Instance: %u\n", gtr_phy->instance); + seq_printf(seq, "Reference clock: %u (%pC)\n", gtr_phy->refclk, clk); + seq_printf(seq, "Reference rate: %lu\n", clk_get_rate(clk)); + seq_printf(seq, "PLL locked: %s\n", + pll_status & PLL_STATUS_LOCKED ? "yes" : "no"); + + mutex_unlock(>r_phy->phy->mutex); + return 0; +} + +/* * Power Management */ @@ -974,6 +974,8 @@ static int xpsgtr_probe(struct platform_device *pdev) gtr_phy->phy = phy; phy_set_drvdata(phy, gtr_phy); + debugfs_create_devm_seqfile(&phy->dev, "status", phy->debugfs, + xpsgtr_status_read); } /* Register the PHY provider. */ |