diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-07 01:50:07 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-08-07 01:50:07 +0200 |
commit | dfdf16ecfd6abafc22b7f02364d9bb879ca8a5ee (patch) | |
tree | 634f3ddf9bacc60daddc01366a78420c9a86e66e /drivers/scsi/ufs | |
parent | Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma (diff) | |
parent | scsi: scsi_transport_srp: Sanitize scsi_target_block/unblock sequences (diff) | |
download | linux-dfdf16ecfd6abafc22b7f02364d9bb879ca8a5ee.tar.xz linux-dfdf16ecfd6abafc22b7f02364d9bb879ca8a5ee.zip |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This consists of the usual driver updates (ufs, qla2xxx, tcmu, lpfc,
hpsa, zfcp, scsi_debug) and minor bug fixes.
We also have a huge docbook fix update like most other subsystems and
no major update to the core (the few non trivial updates are either
minor fixes or removing an unused feature [scsi_sdb_cache])"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (307 commits)
scsi: scsi_transport_srp: Sanitize scsi_target_block/unblock sequences
scsi: ufs-mediatek: Apply DELAY_AFTER_LPM quirk to Micron devices
scsi: ufs: Introduce device quirk "DELAY_AFTER_LPM"
scsi: virtio-scsi: Correctly handle the case where all LUNs are unplugged
scsi: scsi_debug: Implement tur_ms_to_ready parameter
scsi: scsi_debug: Fix request sense
scsi: lpfc: Fix typo in comment for ULP
scsi: ufs-mediatek: Prevent LPM operation on undeclared VCC
scsi: iscsi: Do not put host in iscsi_set_flashnode_param()
scsi: hpsa: Correct ctrl queue depth
scsi: target: tcmu: Make TMR notification optional
scsi: target: tcmu: Implement tmr_notify callback
scsi: target: tcmu: Fix and simplify timeout handling
scsi: target: tcmu: Factor out new helper ring_insert_padding
scsi: target: tcmu: Do not queue aborted commands
scsi: target: tcmu: Use priv pointer in se_cmd
scsi: target: Add tmr_notify backend function
scsi: target: Modify core_tmr_abort_task()
scsi: target: iscsi: Fix inconsistent debug message
scsi: target: iscsi: Fix login error when receiving
...
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r-- | drivers/scsi/ufs/Kconfig | 22 | ||||
-rw-r--r-- | drivers/scsi/ufs/Makefile | 6 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-exynos.c | 1297 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-exynos.h | 287 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-mediatek.c | 140 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-mediatek.h | 4 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-qcom-ice.c | 245 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-qcom.c | 21 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs-qcom.h | 27 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs.h | 38 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs_bsg.c | 5 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufs_quirks.h | 10 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-crypto.c | 245 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-crypto.h | 77 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-pci.c | 25 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd-pltfrm.c | 27 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd.c | 541 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshcd.h | 105 | ||||
-rw-r--r-- | drivers/scsi/ufs/ufshci.h | 94 | ||||
-rw-r--r-- | drivers/scsi/ufs/unipro.h | 33 |
20 files changed, 2801 insertions, 448 deletions
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index d35378be89e8..f6394999b98c 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -99,6 +99,7 @@ config SCSI_UFS_DWC_TC_PLATFORM config SCSI_UFS_QCOM tristate "QCOM specific hooks to UFS controller platform driver" depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM + select QCOM_SCM select RESET_CONTROLLER help This selects the QCOM specific additions to UFSHCD platform driver. @@ -160,3 +161,24 @@ config SCSI_UFS_BSG Select this if you need a bsg device node for your UFS controller. If unsure, say N. + +config SCSI_UFS_EXYNOS + tristate "EXYNOS specific hooks to UFS controller platform driver" + depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST) + select PHY_SAMSUNG_UFS + help + This selects the EXYNOS specific additions to UFSHCD platform driver. + UFS host on EXYNOS includes HCI and UNIPRO layer, and associates with + UFS-PHY driver. + + Select this if you have UFS host controller on EXYNOS chipset. + If unsure, say N. + +config SCSI_UFS_CRYPTO + bool "UFS Crypto Engine Support" + depends on SCSI_UFSHCD && BLK_INLINE_ENCRYPTION + help + Enable Crypto Engine Support in UFS. + Enabling this makes it possible for the kernel to use the crypto + capabilities of the UFS device (if present) to perform crypto + operations on data being transferred to/from the device. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 94c6c5d7334b..4679af1b564e 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -3,10 +3,14 @@ obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o -obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o +obj-$(CONFIG_SCSI_UFS_QCOM) += ufs_qcom.o +ufs_qcom-y += ufs-qcom.o +ufs_qcom-$(CONFIG_SCSI_UFS_CRYPTO) += ufs-qcom-ice.o +obj-$(CONFIG_SCSI_UFS_EXYNOS) += ufs-exynos.o obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o ufshcd-core-y += ufshcd.o ufs-sysfs.o ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o +ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c new file mode 100644 index 000000000000..8f1b6f61a776 --- /dev/null +++ b/drivers/scsi/ufs/ufs-exynos.c @@ -0,0 +1,1297 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * UFS Host Controller driver for Exynos specific extensions + * + * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd. + * Author: Seungwon Jeon <essuuj@gmail.com> + * Author: Alim Akhtar <alim.akhtar@samsung.com> + * + */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> + +#include "ufshcd.h" +#include "ufshcd-pltfrm.h" +#include "ufshci.h" +#include "unipro.h" + +#include "ufs-exynos.h" + +/* + * Exynos's Vendor specific registers for UFSHCI + */ +#define HCI_TXPRDT_ENTRY_SIZE 0x00 +#define PRDT_PREFECT_EN BIT(31) +#define PRDT_SET_SIZE(x) ((x) & 0x1F) +#define HCI_RXPRDT_ENTRY_SIZE 0x04 +#define HCI_1US_TO_CNT_VAL 0x0C +#define CNT_VAL_1US_MASK 0x3FF +#define HCI_UTRL_NEXUS_TYPE 0x40 +#define HCI_UTMRL_NEXUS_TYPE 0x44 +#define HCI_SW_RST 0x50 +#define UFS_LINK_SW_RST BIT(0) +#define UFS_UNIPRO_SW_RST BIT(1) +#define UFS_SW_RST_MASK (UFS_UNIPRO_SW_RST | UFS_LINK_SW_RST) +#define HCI_DATA_REORDER 0x60 +#define HCI_UNIPRO_APB_CLK_CTRL 0x68 +#define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF)) +#define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C +#define HCI_GPIO_OUT 0x70 +#define HCI_ERR_EN_PA_LAYER 0x78 +#define HCI_ERR_EN_DL_LAYER 0x7C +#define HCI_ERR_EN_N_LAYER 0x80 +#define HCI_ERR_EN_T_LAYER 0x84 +#define HCI_ERR_EN_DME_LAYER 0x88 +#define HCI_CLKSTOP_CTRL 0xB0 +#define REFCLK_STOP BIT(2) +#define UNIPRO_MCLK_STOP BIT(1) +#define UNIPRO_PCLK_STOP BIT(0) +#define CLK_STOP_MASK (REFCLK_STOP |\ + UNIPRO_MCLK_STOP |\ + UNIPRO_PCLK_STOP) +#define HCI_MISC 0xB4 +#define REFCLK_CTRL_EN BIT(7) +#define UNIPRO_PCLK_CTRL_EN BIT(6) +#define UNIPRO_MCLK_CTRL_EN BIT(5) +#define HCI_CORECLK_CTRL_EN BIT(4) +#define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\ + UNIPRO_PCLK_CTRL_EN |\ + UNIPRO_MCLK_CTRL_EN) +/* Device fatal error */ +#define DFES_ERR_EN BIT(31) +#define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\ + UIC_DATA_LINK_LAYER_ERROR_PA_INIT) +#define DFES_DEF_L3_ERRS (UIC_NETWORK_UNSUPPORTED_HEADER_TYPE |\ + UIC_NETWORK_BAD_DEVICEID_ENC |\ + UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING) +#define DFES_DEF_L4_ERRS (UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE |\ + UIC_TRANSPORT_UNKNOWN_CPORTID |\ + UIC_TRANSPORT_NO_CONNECTION_RX |\ + UIC_TRANSPORT_BAD_TC) + +enum { + UNIPRO_L1_5 = 0,/* PHY Adapter */ + UNIPRO_L2, /* Data Link */ + UNIPRO_L3, /* Network */ + UNIPRO_L4, /* Transport */ + UNIPRO_DME, /* DME */ +}; + +/* + * UNIPRO registers + */ +#define UNIPRO_COMP_VERSION 0x000 +#define UNIPRO_DME_PWR_REQ 0x090 +#define UNIPRO_DME_PWR_REQ_POWERMODE 0x094 +#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER0 0x098 +#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER1 0x09C +#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER2 0x0A0 +#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER0 0x0A4 +#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER1 0x0A8 +#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER2 0x0AC + +/* + * UFS Protector registers + */ +#define UFSPRSECURITY 0x010 +#define NSSMU BIT(14) +#define UFSPSBEGIN0 0x200 +#define UFSPSEND0 0x204 +#define UFSPSLUN0 0x208 +#define UFSPSCTRL0 0x20C + +#define CNTR_DIV_VAL 40 + +static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en); +static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en); + +static inline void exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs *ufs) +{ + exynos_ufs_auto_ctrl_hcc(ufs, true); +} + +static inline void exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs *ufs) +{ + exynos_ufs_auto_ctrl_hcc(ufs, false); +} + +static inline void exynos_ufs_disable_auto_ctrl_hcc_save( + struct exynos_ufs *ufs, u32 *val) +{ + *val = hci_readl(ufs, HCI_MISC); + exynos_ufs_auto_ctrl_hcc(ufs, false); +} + +static inline void exynos_ufs_auto_ctrl_hcc_restore( + struct exynos_ufs *ufs, u32 *val) +{ + hci_writel(ufs, *val, HCI_MISC); +} + +static inline void exynos_ufs_gate_clks(struct exynos_ufs *ufs) +{ + exynos_ufs_ctrl_clkstop(ufs, true); +} + +static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs) +{ + exynos_ufs_ctrl_clkstop(ufs, false); +} + +static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs) +{ + return 0; +} + +static int exynos7_ufs_pre_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite; + int i; + + exynos_ufs_enable_ov_tm(hba); + for_each_ufs_tx_lane(ufs, i) + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x297, i), 0x17); + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x362, i), 0xff); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x363, i), 0x00); + } + exynos_ufs_disable_ov_tm(hba); + + for_each_ufs_tx_lane(ufs, i) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1); + udelay(1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12)); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1); + udelay(1600); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val); + + return 0; +} + +static int exynos7_ufs_post_link(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + int i; + + exynos_ufs_enable_ov_tm(hba); + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x28b, i), 0x83); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x29a, i), 0x07); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x277, i), + TX_LINERESET_N(exynos_ufs_calc_time_cntr(ufs, 200000))); + } + exynos_ufs_disable_ov_tm(hba); + + exynos_ufs_enable_dbg_mode(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xbb8); + exynos_ufs_disable_dbg_mode(hba); + + return 0; +} + +static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + unipro_writel(ufs, 0x22, UNIPRO_DBG_FORCE_DME_CTRL_STATE); + + return 0; +} + +static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx); + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_RXPHY_CFGUPDT), 0x1); + + if (lanes == 1) { + exynos_ufs_enable_dbg_mode(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 0x1); + exynos_ufs_disable_dbg_mode(hba); + } + + return 0; +} + +/* + * exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w + * Control should be disabled in the below cases + * - Before host controller S/W reset + * - Access to UFS protector's register + */ +static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en) +{ + u32 misc = hci_readl(ufs, HCI_MISC); + + if (en) + hci_writel(ufs, misc | HCI_CORECLK_CTRL_EN, HCI_MISC); + else + hci_writel(ufs, misc & ~HCI_CORECLK_CTRL_EN, HCI_MISC); +} + +static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en) +{ + u32 ctrl = hci_readl(ufs, HCI_CLKSTOP_CTRL); + u32 misc = hci_readl(ufs, HCI_MISC); + + if (en) { + hci_writel(ufs, misc | CLK_CTRL_EN_MASK, HCI_MISC); + hci_writel(ufs, ctrl | CLK_STOP_MASK, HCI_CLKSTOP_CTRL); + } else { + hci_writel(ufs, ctrl & ~CLK_STOP_MASK, HCI_CLKSTOP_CTRL); + hci_writel(ufs, misc & ~CLK_CTRL_EN_MASK, HCI_MISC); + } +} + +static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct list_head *head = &hba->clk_list_head; + struct ufs_clk_info *clki; + u32 pclk_rate; + u32 f_min, f_max; + u8 div = 0; + int ret = 0; + + if (list_empty(head)) + goto out; + + list_for_each_entry(clki, head, list) { + if (!IS_ERR(clki->clk)) { + if (!strcmp(clki->name, "core_clk")) + ufs->clk_hci_core = clki->clk; + else if (!strcmp(clki->name, "sclk_unipro_main")) + ufs->clk_unipro_main = clki->clk; + } + } + + if (!ufs->clk_hci_core || !ufs->clk_unipro_main) { + dev_err(hba->dev, "failed to get clk info\n"); + ret = -EINVAL; + goto out; + } + + ufs->mclk_rate = clk_get_rate(ufs->clk_unipro_main); + pclk_rate = clk_get_rate(ufs->clk_hci_core); + f_min = ufs->pclk_avail_min; + f_max = ufs->pclk_avail_max; + + if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) { + do { + pclk_rate /= (div + 1); + + if (pclk_rate <= f_max) + break; + div++; + } while (pclk_rate >= f_min); + } + + if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) { + dev_err(hba->dev, "not available pclk range %d\n", pclk_rate); + ret = -EINVAL; + goto out; + } + + ufs->pclk_rate = pclk_rate; + ufs->pclk_div = div; + +out: + return ret; +} + +static void exynos_ufs_set_unipro_pclk_div(struct exynos_ufs *ufs) +{ + if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) { + u32 val; + + val = hci_readl(ufs, HCI_UNIPRO_APB_CLK_CTRL); + hci_writel(ufs, UNIPRO_APB_CLK(val, ufs->pclk_div), + HCI_UNIPRO_APB_CLK_CTRL); + } +} + +static void exynos_ufs_set_pwm_clk_div(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + ufshcd_dme_set(hba, + UIC_ARG_MIB(CMN_PWM_CLK_CTRL), attr->cmn_pwm_clk_ctrl); +} + +static void exynos_ufs_calc_pwm_clk_div(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + const unsigned int div = 30, mult = 20; + const unsigned long pwm_min = 3 * 1000 * 1000; + const unsigned long pwm_max = 9 * 1000 * 1000; + const int divs[] = {32, 16, 8, 4}; + unsigned long clk = 0, _clk, clk_period; + int i = 0, clk_idx = -1; + + clk_period = UNIPRO_PCLK_PERIOD(ufs); + for (i = 0; i < ARRAY_SIZE(divs); i++) { + _clk = NSEC_PER_SEC * mult / (clk_period * divs[i] * div); + if (_clk >= pwm_min && _clk <= pwm_max) { + if (_clk > clk) { + clk_idx = i; + clk = _clk; + } + } + } + + if (clk_idx == -1) { + ufshcd_dme_get(hba, UIC_ARG_MIB(CMN_PWM_CLK_CTRL), &clk_idx); + dev_err(hba->dev, + "failed to decide pwm clock divider, will not change\n"); + } + + attr->cmn_pwm_clk_ctrl = clk_idx & PWM_CLK_CTRL_MASK; +} + +long exynos_ufs_calc_time_cntr(struct exynos_ufs *ufs, long period) +{ + const int precise = 10; + long pclk_rate = ufs->pclk_rate; + long clk_period, fraction; + + clk_period = UNIPRO_PCLK_PERIOD(ufs); + fraction = ((NSEC_PER_SEC % pclk_rate) * precise) / pclk_rate; + + return (period * precise) / ((clk_period * precise) + fraction); +} + +static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs) +{ + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; + + t_cfg->tx_linereset_p = + exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec); + t_cfg->tx_linereset_n = + exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_n_nsec); + t_cfg->tx_high_z_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->tx_high_z_cnt_nsec); + t_cfg->tx_base_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->tx_base_unit_nsec); + t_cfg->tx_gran_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->tx_gran_unit_nsec); + t_cfg->tx_sleep_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->tx_sleep_cnt); + + t_cfg->rx_linereset = + exynos_ufs_calc_time_cntr(ufs, attr->rx_dif_p_nsec); + t_cfg->rx_hibern8_wait = + exynos_ufs_calc_time_cntr(ufs, attr->rx_hibern8_wait_nsec); + t_cfg->rx_base_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->rx_base_unit_nsec); + t_cfg->rx_gran_n_val = + exynos_ufs_calc_time_cntr(ufs, attr->rx_gran_unit_nsec); + t_cfg->rx_sleep_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->rx_sleep_cnt); + t_cfg->rx_stall_cnt = + exynos_ufs_calc_time_cntr(ufs, attr->rx_stall_cnt); +} + +static void exynos_ufs_config_phy_time_attr(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg; + int i; + + exynos_ufs_set_pwm_clk_div(ufs); + + exynos_ufs_enable_ov_tm(hba); + + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_FILLER_ENABLE, i), + ufs->drv_data->uic_attr->rx_filler_enable); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_LINERESET_VAL, i), + RX_LINERESET(t_cfg->rx_linereset)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_07_00, i), + RX_BASE_NVAL_L(t_cfg->rx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_15_08, i), + RX_BASE_NVAL_H(t_cfg->rx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_07_00, i), + RX_GRAN_NVAL_L(t_cfg->rx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_10_08, i), + RX_GRAN_NVAL_H(t_cfg->rx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_SLEEP_CNT_TIMER, i), + RX_OV_SLEEP_CNT(t_cfg->rx_sleep_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_STALL_CNT_TIMER, i), + RX_OV_STALL_CNT(t_cfg->rx_stall_cnt)); + } + + for_each_ufs_tx_lane(ufs, i) { + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_LINERESET_P_VAL, i), + TX_LINERESET_P(t_cfg->tx_linereset_p)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_07_00, i), + TX_HIGH_Z_CNT_L(t_cfg->tx_high_z_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_11_08, i), + TX_HIGH_Z_CNT_H(t_cfg->tx_high_z_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_07_00, i), + TX_BASE_NVAL_L(t_cfg->tx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_15_08, i), + TX_BASE_NVAL_H(t_cfg->tx_base_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_07_00, i), + TX_GRAN_NVAL_L(t_cfg->tx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_10_08, i), + TX_GRAN_NVAL_H(t_cfg->tx_gran_n_val)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_OV_SLEEP_CNT_TIMER, i), + TX_OV_H8_ENTER_EN | + TX_OV_SLEEP_CNT(t_cfg->tx_sleep_cnt)); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_MIN_ACTIVATETIME, i), + ufs->drv_data->uic_attr->tx_min_activatetime); + } + + exynos_ufs_disable_ov_tm(hba); +} + +static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + int i; + + exynos_ufs_enable_ov_tm(hba); + + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G1_SYNC_LENGTH_CAP, i), + attr->rx_hs_g1_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G2_SYNC_LENGTH_CAP, i), + attr->rx_hs_g2_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G3_SYNC_LENGTH_CAP, i), + attr->rx_hs_g3_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G1_PREP_LENGTH_CAP, i), + attr->rx_hs_g1_prep_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G2_PREP_LENGTH_CAP, i), + attr->rx_hs_g2_prep_sync_len_cap); + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HS_G3_PREP_LENGTH_CAP, i), + attr->rx_hs_g3_prep_sync_len_cap); + } + + if (attr->rx_adv_fine_gran_sup_en == 0) { + for_each_ufs_rx_lane(ufs, i) { + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, i), 0); + + if (attr->rx_min_actv_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_MIN_ACTIVATETIME_CAP, + i), attr->rx_min_actv_time_cap); + + if (attr->rx_hibern8_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAP, i), + attr->rx_hibern8_time_cap); + } + } else if (attr->rx_adv_fine_gran_sup_en == 1) { + for_each_ufs_rx_lane(ufs, i) { + if (attr->rx_adv_fine_gran_step) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, + i), RX_ADV_FINE_GRAN_STEP( + attr->rx_adv_fine_gran_step)); + + if (attr->rx_adv_min_actv_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL( + RX_ADV_MIN_ACTIVATETIME_CAP, i), + attr->rx_adv_min_actv_time_cap); + + if (attr->rx_adv_hibern8_time_cap) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_ADV_HIBERN8TIME_CAP, + i), + attr->rx_adv_hibern8_time_cap); + } + } + + exynos_ufs_disable_ov_tm(hba); +} + +static void exynos_ufs_establish_connt(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + enum { + DEV_ID = 0x00, + PEER_DEV_ID = 0x01, + PEER_CPORT_ID = 0x00, + TRAFFIC_CLASS = 0x00, + }; + + /* allow cport attributes to be set */ + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_IDLE); + + /* local unipro attributes */ + ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID); + ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), TRUE); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), TRAFFIC_CLASS); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED); +} + +static void exynos_ufs_config_smu(struct exynos_ufs *ufs) +{ + u32 reg, val; + + exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); + + /* make encryption disabled by default */ + reg = ufsp_readl(ufs, UFSPRSECURITY); + ufsp_writel(ufs, reg | NSSMU, UFSPRSECURITY); + ufsp_writel(ufs, 0x0, UFSPSBEGIN0); + ufsp_writel(ufs, 0xffffffff, UFSPSEND0); + ufsp_writel(ufs, 0xff, UFSPSLUN0); + ufsp_writel(ufs, 0xf1, UFSPSCTRL0); + + exynos_ufs_auto_ctrl_hcc_restore(ufs, &val); +} + +static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr) +{ + struct ufs_hba *hba = ufs->hba; + u8 g = max_t(u32, pwr->gear_rx, pwr->gear_tx); + u32 mask, sync_len; + enum { + SYNC_LEN_G1 = 80 * 1000, /* 80us */ + SYNC_LEN_G2 = 40 * 1000, /* 44us */ + SYNC_LEN_G3 = 20 * 1000, /* 20us */ + }; + int i; + + if (g == 1) + sync_len = SYNC_LEN_G1; + else if (g == 2) + sync_len = SYNC_LEN_G2; + else if (g == 3) + sync_len = SYNC_LEN_G3; + else + return; + + mask = exynos_ufs_calc_time_cntr(ufs, sync_len); + mask = (mask >> 8) & 0xff; + + exynos_ufs_enable_ov_tm(hba); + + for_each_ufs_rx_lane(ufs, i) + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(RX_SYNC_MASK_LENGTH, i), mask); + + exynos_ufs_disable_ov_tm(hba); +} + +static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct phy *generic_phy = ufs->phy; + struct ufs_dev_params ufs_exynos_cap; + int ret; + + if (!dev_req_params) { + pr_err("%s: incoming dev_req_params is NULL\n", __func__); + ret = -EINVAL; + goto out; + } + + + ufs_exynos_cap.tx_lanes = UFS_EXYNOS_LIMIT_NUM_LANES_TX; + ufs_exynos_cap.rx_lanes = UFS_EXYNOS_LIMIT_NUM_LANES_RX; + ufs_exynos_cap.hs_rx_gear = UFS_EXYNOS_LIMIT_HSGEAR_RX; + ufs_exynos_cap.hs_tx_gear = UFS_EXYNOS_LIMIT_HSGEAR_TX; + ufs_exynos_cap.pwm_rx_gear = UFS_EXYNOS_LIMIT_PWMGEAR_RX; + ufs_exynos_cap.pwm_tx_gear = UFS_EXYNOS_LIMIT_PWMGEAR_TX; + ufs_exynos_cap.rx_pwr_pwm = UFS_EXYNOS_LIMIT_RX_PWR_PWM; + ufs_exynos_cap.tx_pwr_pwm = UFS_EXYNOS_LIMIT_TX_PWR_PWM; + ufs_exynos_cap.rx_pwr_hs = UFS_EXYNOS_LIMIT_RX_PWR_HS; + ufs_exynos_cap.tx_pwr_hs = UFS_EXYNOS_LIMIT_TX_PWR_HS; + ufs_exynos_cap.hs_rate = UFS_EXYNOS_LIMIT_HS_RATE; + ufs_exynos_cap.desired_working_mode = + UFS_EXYNOS_LIMIT_DESIRED_MODE; + + ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap, + dev_max_params, dev_req_params); + if (ret) { + pr_err("%s: failed to determine capabilities\n", __func__); + goto out; + } + + if (ufs->drv_data->pre_pwr_change) + ufs->drv_data->pre_pwr_change(ufs, dev_req_params); + + if (ufshcd_is_hs_mode(dev_req_params)) { + exynos_ufs_config_sync_pattern_mask(ufs, dev_req_params); + + switch (dev_req_params->hs_rate) { + case PA_HS_MODE_A: + case PA_HS_MODE_B: + phy_calibrate(generic_phy); + break; + } + } + + return 0; +out: + return ret; +} + +#define PWR_MODE_STR_LEN 64 +static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba, + struct ufs_pa_layer_attr *pwr_max, + struct ufs_pa_layer_attr *pwr_req) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct phy *generic_phy = ufs->phy; + int gear = max_t(u32, pwr_req->gear_rx, pwr_req->gear_tx); + int lanes = max_t(u32, pwr_req->lane_rx, pwr_req->lane_tx); + char pwr_str[PWR_MODE_STR_LEN] = ""; + + /* let default be PWM Gear 1, Lane 1 */ + if (!gear) + gear = 1; + + if (!lanes) + lanes = 1; + + if (ufs->drv_data->post_pwr_change) + ufs->drv_data->post_pwr_change(ufs, pwr_req); + + if ((ufshcd_is_hs_mode(pwr_req))) { + switch (pwr_req->hs_rate) { + case PA_HS_MODE_A: + case PA_HS_MODE_B: + phy_calibrate(generic_phy); + break; + } + + snprintf(pwr_str, PWR_MODE_STR_LEN, "%s series_%s G_%d L_%d", + "FAST", pwr_req->hs_rate == PA_HS_MODE_A ? "A" : "B", + gear, lanes); + } else { + snprintf(pwr_str, PWR_MODE_STR_LEN, "%s G_%d L_%d", + "SLOW", gear, lanes); + } + + dev_info(hba->dev, "Power mode changed to : %s\n", pwr_str); + + return 0; +} + +static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba, + int tag, bool op) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + u32 type; + + type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE); + + if (op) + hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE); + else + hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE); +} + +static void exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba *hba, + int tag, u8 func) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + u32 type; + + type = hci_readl(ufs, HCI_UTMRL_NEXUS_TYPE); + + switch (func) { + case UFS_ABORT_TASK: + case UFS_QUERY_TASK: + hci_writel(ufs, type | (1 << tag), HCI_UTMRL_NEXUS_TYPE); + break; + case UFS_ABORT_TASK_SET: + case UFS_CLEAR_TASK_SET: + case UFS_LOGICAL_RESET: + case UFS_QUERY_TASK_SET: + hci_writel(ufs, type & ~(1 << tag), HCI_UTMRL_NEXUS_TYPE); + break; + } +} + +static int exynos_ufs_phy_init(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + struct phy *generic_phy = ufs->phy; + int ret = 0; + + if (ufs->avail_ln_rx == 0 || ufs->avail_ln_tx == 0) { + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES), + &ufs->avail_ln_rx); + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES), + &ufs->avail_ln_tx); + WARN(ufs->avail_ln_rx != ufs->avail_ln_tx, + "available data lane is not equal(rx:%d, tx:%d)\n", + ufs->avail_ln_rx, ufs->avail_ln_tx); + } + + phy_set_bus_width(generic_phy, ufs->avail_ln_rx); + ret = phy_init(generic_phy); + if (ret) { + dev_err(hba->dev, "%s: phy init failed, ret = %d\n", + __func__, ret); + goto out_exit_phy; + } + + return 0; + +out_exit_phy: + phy_exit(generic_phy); + + return ret; +} + +static void exynos_ufs_config_unipro(struct exynos_ufs *ufs) +{ + struct ufs_hba *hba = ufs->hba; + + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD), + DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate)); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS), + ufs->drv_data->uic_attr->tx_trailingclks); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), + ufs->drv_data->uic_attr->pa_dbg_option_suite); +} + +static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index) +{ + switch (index) { + case UNIPRO_L1_5: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_PA_LAYER); + break; + case UNIPRO_L2: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DL_LAYER); + break; + case UNIPRO_L3: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_N_LAYER); + break; + case UNIPRO_L4: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_T_LAYER); + break; + case UNIPRO_DME: + hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DME_LAYER); + break; + } +} + +static int exynos_ufs_pre_link(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + /* hci */ + exynos_ufs_config_intr(ufs, DFES_DEF_L2_ERRS, UNIPRO_L2); + exynos_ufs_config_intr(ufs, DFES_DEF_L3_ERRS, UNIPRO_L3); + exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4); + exynos_ufs_set_unipro_pclk_div(ufs); + + /* unipro */ + exynos_ufs_config_unipro(ufs); + + /* m-phy */ + exynos_ufs_phy_init(ufs); + exynos_ufs_config_phy_time_attr(ufs); + exynos_ufs_config_phy_cap_attr(ufs); + + if (ufs->drv_data->pre_link) + ufs->drv_data->pre_link(ufs); + + return 0; +} + +static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs) +{ + u32 val; + + val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL); + hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL); +} + +static int exynos_ufs_post_link(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct phy *generic_phy = ufs->phy; + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + exynos_ufs_establish_connt(ufs); + exynos_ufs_fit_aggr_timeout(ufs); + + hci_writel(ufs, 0xa, HCI_DATA_REORDER); + hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE); + hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE); + hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE); + hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE); + hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN); + + if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB) + ufshcd_dme_set(hba, + UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), TRUE); + + if (attr->pa_granularity) { + exynos_ufs_enable_dbg_mode(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_GRANULARITY), + attr->pa_granularity); + exynos_ufs_disable_dbg_mode(hba); + + if (attr->pa_tactivate) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), + attr->pa_tactivate); + if (attr->pa_hibern8time && + !(ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER)) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + attr->pa_hibern8time); + } + + if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) { + if (!attr->pa_granularity) + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), + &attr->pa_granularity); + if (!attr->pa_hibern8time) + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), + &attr->pa_hibern8time); + /* + * not wait for HIBERN8 time to exit hibernation + */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 0); + + if (attr->pa_granularity < 1 || attr->pa_granularity > 6) { + /* Valid range for granularity: 1 ~ 6 */ + dev_warn(hba->dev, + "%s: pa_granularity %d is invalid, assuming backwards compatibility\n", + __func__, + attr->pa_granularity); + attr->pa_granularity = 6; + } + } + + phy_calibrate(generic_phy); + + if (ufs->drv_data->post_link) + ufs->drv_data->post_link(ufs); + + return 0; +} + +static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs) +{ + struct device_node *np = dev->of_node; + struct exynos_ufs_drv_data *drv_data = &exynos_ufs_drvs; + struct exynos_ufs_uic_attr *attr; + int ret = 0; + + while (drv_data->compatible) { + if (of_device_is_compatible(np, drv_data->compatible)) { + ufs->drv_data = drv_data; + break; + } + drv_data++; + } + + if (ufs->drv_data && ufs->drv_data->uic_attr) { + attr = ufs->drv_data->uic_attr; + } else { + dev_err(dev, "failed to get uic attributes\n"); + ret = -EINVAL; + goto out; + } + + ufs->pclk_avail_min = PCLK_AVAIL_MIN; + ufs->pclk_avail_max = PCLK_AVAIL_MAX; + + attr->rx_adv_fine_gran_sup_en = RX_ADV_FINE_GRAN_SUP_EN; + attr->rx_adv_fine_gran_step = RX_ADV_FINE_GRAN_STEP_VAL; + attr->rx_adv_min_actv_time_cap = RX_ADV_MIN_ACTV_TIME_CAP; + attr->pa_granularity = PA_GRANULARITY_VAL; + attr->pa_tactivate = PA_TACTIVATE_VAL; + attr->pa_hibern8time = PA_HIBERN8TIME_VAL; + +out: + return ret; +} + +static int exynos_ufs_init(struct ufs_hba *hba) +{ + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct exynos_ufs *ufs; + struct resource *res; + int ret; + + ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL); + if (!ufs) + return -ENOMEM; + + /* exynos-specific hci */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vs_hci"); + ufs->reg_hci = devm_ioremap_resource(dev, res); + if (IS_ERR(ufs->reg_hci)) { + dev_err(dev, "cannot ioremap for hci vendor register\n"); + return PTR_ERR(ufs->reg_hci); + } + + /* unipro */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "unipro"); + ufs->reg_unipro = devm_ioremap_resource(dev, res); + if (IS_ERR(ufs->reg_unipro)) { + dev_err(dev, "cannot ioremap for unipro register\n"); + return PTR_ERR(ufs->reg_unipro); + } + + /* ufs protector */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ufsp"); + ufs->reg_ufsp = devm_ioremap_resource(dev, res); + if (IS_ERR(ufs->reg_ufsp)) { + dev_err(dev, "cannot ioremap for ufs protector register\n"); + return PTR_ERR(ufs->reg_ufsp); + } + + ret = exynos_ufs_parse_dt(dev, ufs); + if (ret) { + dev_err(dev, "failed to get dt info.\n"); + goto out; + } + + ufs->phy = devm_phy_get(dev, "ufs-phy"); + if (IS_ERR(ufs->phy)) { + ret = PTR_ERR(ufs->phy); + dev_err(dev, "failed to get ufs-phy\n"); + goto out; + } + + ret = phy_power_on(ufs->phy); + if (ret) + goto phy_off; + + ufs->hba = hba; + ufs->opts = ufs->drv_data->opts; + ufs->rx_sel_idx = PA_MAXDATALANES; + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX) + ufs->rx_sel_idx = 0; + hba->priv = (void *)ufs; + hba->quirks = ufs->drv_data->quirks; + if (ufs->drv_data->drv_init) { + ret = ufs->drv_data->drv_init(dev, ufs); + if (ret) { + dev_err(dev, "failed to init drv-data\n"); + goto out; + } + } + + ret = exynos_ufs_get_clk_info(ufs); + if (ret) + goto out; + exynos_ufs_specify_phy_time_attr(ufs); + exynos_ufs_config_smu(ufs); + return 0; + +phy_off: + phy_power_off(ufs->phy); +out: + hba->priv = NULL; + return ret; +} + +static int exynos_ufs_host_reset(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + unsigned long timeout = jiffies + msecs_to_jiffies(1); + u32 val; + int ret = 0; + + exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val); + + hci_writel(ufs, UFS_SW_RST_MASK, HCI_SW_RST); + + do { + if (!(hci_readl(ufs, HCI_SW_RST) & UFS_SW_RST_MASK)) + goto out; + } while (time_before(jiffies, timeout)); + + dev_err(hba->dev, "timeout host sw-reset\n"); + ret = -ETIMEDOUT; + +out: + exynos_ufs_auto_ctrl_hcc_restore(ufs, &val); + return ret; +} + +static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + hci_writel(ufs, 0 << 0, HCI_GPIO_OUT); + udelay(5); + hci_writel(ufs, 1 << 0, HCI_GPIO_OUT); +} + +static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; + + if (!enter) { + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_disable_auto_ctrl_hcc(ufs); + exynos_ufs_ungate_clks(ufs); + + if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) { + const unsigned int granularity_tbl[] = { + 1, 4, 8, 16, 32, 100 + }; + int h8_time = attr->pa_hibern8time * + granularity_tbl[attr->pa_granularity - 1]; + unsigned long us; + s64 delta; + + do { + delta = h8_time - ktime_us_delta(ktime_get(), + ufs->entry_hibern8_t); + if (delta <= 0) + break; + + us = min_t(s64, delta, USEC_PER_MSEC); + if (us >= 10) + usleep_range(us, us + 10); + } while (1); + } + } +} + +static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (!enter) { + u32 cur_mode = 0; + u32 pwrmode; + + if (ufshcd_is_hs_mode(&ufs->dev_req_params)) + pwrmode = FAST_MODE; + else + pwrmode = SLOW_MODE; + + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode); + if (cur_mode != (pwrmode << 4 | pwrmode)) { + dev_warn(hba->dev, "%s: power mode change\n", __func__); + hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf; + hba->pwr_info.pwr_tx = cur_mode & 0xf; + ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); + } + + if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)) + exynos_ufs_establish_connt(ufs); + } else { + ufs->entry_hibern8_t = ktime_get(); + exynos_ufs_gate_clks(ufs); + if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) + exynos_ufs_enable_auto_ctrl_hcc(ufs); + } +} + +static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + int ret = 0; + + switch (status) { + case PRE_CHANGE: + ret = exynos_ufs_host_reset(hba); + if (ret) + return ret; + exynos_ufs_dev_hw_reset(hba); + break; + case POST_CHANGE: + exynos_ufs_calc_pwm_clk_div(ufs); + if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)) + exynos_ufs_enable_auto_ctrl_hcc(ufs); + break; + } + + return ret; +} + +static int exynos_ufs_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int ret = 0; + + switch (status) { + case PRE_CHANGE: + ret = exynos_ufs_pre_link(hba); + break; + case POST_CHANGE: + ret = exynos_ufs_post_link(hba); + break; + } + + return ret; +} + +static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + int ret = 0; + + switch (status) { + case PRE_CHANGE: + ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params, + dev_req_params); + break; + case POST_CHANGE: + ret = exynos_ufs_post_pwr_mode(hba, NULL, dev_req_params); + break; + } + + return ret; +} + +static void exynos_ufs_hibern8_notify(struct ufs_hba *hba, + enum uic_cmd_dme enter, + enum ufs_notify_change_status notify) +{ + switch ((u8)notify) { + case PRE_CHANGE: + exynos_ufs_pre_hibern8(hba, enter); + break; + case POST_CHANGE: + exynos_ufs_post_hibern8(hba, enter); + break; + } +} + +static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (!ufshcd_is_link_active(hba)) + phy_power_off(ufs->phy); + + return 0; +} + +static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) +{ + struct exynos_ufs *ufs = ufshcd_get_variant(hba); + + if (!ufshcd_is_link_active(hba)) + phy_power_on(ufs->phy); + + exynos_ufs_config_smu(ufs); + + return 0; +} + +static struct ufs_hba_variant_ops ufs_hba_exynos_ops = { + .name = "exynos_ufs", + .init = exynos_ufs_init, + .hce_enable_notify = exynos_ufs_hce_enable_notify, + .link_startup_notify = exynos_ufs_link_startup_notify, + .pwr_change_notify = exynos_ufs_pwr_change_notify, + .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req, + .setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req, + .hibern8_notify = exynos_ufs_hibern8_notify, + .suspend = exynos_ufs_suspend, + .resume = exynos_ufs_resume, +}; + +static int exynos_ufs_probe(struct platform_device *pdev) +{ + int err; + struct device *dev = &pdev->dev; + + err = ufshcd_pltfrm_init(pdev, &ufs_hba_exynos_ops); + if (err) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); + + return err; +} + +static int exynos_ufs_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); + return 0; +} + +struct exynos_ufs_drv_data exynos_ufs_drvs = { + + .compatible = "samsung,exynos7-ufs", + .uic_attr = &exynos7_uic_attr, + .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN | + UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR | + UFSHCI_QUIRK_BROKEN_HCE | + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR | + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR, + .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL | + EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | + EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX | + EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB | + EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER, + .drv_init = exynos7_ufs_drv_init, + .pre_link = exynos7_ufs_pre_link, + .post_link = exynos7_ufs_post_link, + .pre_pwr_change = exynos7_ufs_pre_pwr_change, + .post_pwr_change = exynos7_ufs_post_pwr_change, +}; + +static const struct of_device_id exynos_ufs_of_match[] = { + { .compatible = "samsung,exynos7-ufs", + .data = &exynos_ufs_drvs }, + {}, +}; + +static const struct dev_pm_ops exynos_ufs_pm_ops = { + .suspend = ufshcd_pltfrm_suspend, + .resume = ufshcd_pltfrm_resume, + .runtime_suspend = ufshcd_pltfrm_runtime_suspend, + .runtime_resume = ufshcd_pltfrm_runtime_resume, + .runtime_idle = ufshcd_pltfrm_runtime_idle, +}; + +static struct platform_driver exynos_ufs_pltform = { + .probe = exynos_ufs_probe, + .remove = exynos_ufs_remove, + .shutdown = ufshcd_pltfrm_shutdown, + .driver = { + .name = "exynos-ufshc", + .pm = &exynos_ufs_pm_ops, + .of_match_table = of_match_ptr(exynos_ufs_of_match), + }, +}; +module_platform_driver(exynos_ufs_pltform); + +MODULE_AUTHOR("Alim Akhtar <alim.akhtar@samsung.com>"); +MODULE_AUTHOR("Seungwon Jeon <essuuj@gmail.com>"); +MODULE_DESCRIPTION("Exynos UFS HCI Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h new file mode 100644 index 000000000000..76d6e39efb2f --- /dev/null +++ b/drivers/scsi/ufs/ufs-exynos.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * UFS Host Controller driver for Exynos specific extensions + * + * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd. + * + */ + +#ifndef _UFS_EXYNOS_H_ +#define _UFS_EXYNOS_H_ + +/* + * UNIPRO registers + */ +#define UNIPRO_DBG_FORCE_DME_CTRL_STATE 0x150 + +/* + * MIBs for PA debug registers + */ +#define PA_DBG_CLK_PERIOD 0x9514 +#define PA_DBG_TXPHY_CFGUPDT 0x9518 +#define PA_DBG_RXPHY_CFGUPDT 0x9519 +#define PA_DBG_MODE 0x9529 +#define PA_DBG_SKIP_RESET_PHY 0x9539 +#define PA_DBG_OV_TM 0x9540 +#define PA_DBG_SKIP_LINE_RESET 0x9541 +#define PA_DBG_LINE_RESET_REQ 0x9543 +#define PA_DBG_OPTION_SUITE 0x9564 +#define PA_DBG_OPTION_SUITE_DYN 0x9565 + +/* + * MIBs for Transport Layer debug registers + */ +#define T_DBG_SKIP_INIT_HIBERN8_EXIT 0xc001 + +/* + * Exynos MPHY attributes + */ +#define TX_LINERESET_N_VAL 0x0277 +#define TX_LINERESET_N(v) (((v) >> 10) & 0xFF) +#define TX_LINERESET_P_VAL 0x027D +#define TX_LINERESET_P(v) (((v) >> 12) & 0xFF) +#define TX_OV_SLEEP_CNT_TIMER 0x028E +#define TX_OV_H8_ENTER_EN (1 << 7) +#define TX_OV_SLEEP_CNT(v) (((v) >> 5) & 0x7F) +#define TX_HIGH_Z_CNT_11_08 0x028C +#define TX_HIGH_Z_CNT_H(v) (((v) >> 8) & 0xF) +#define TX_HIGH_Z_CNT_07_00 0x028D +#define TX_HIGH_Z_CNT_L(v) ((v) & 0xFF) +#define TX_BASE_NVAL_07_00 0x0293 +#define TX_BASE_NVAL_L(v) ((v) & 0xFF) +#define TX_BASE_NVAL_15_08 0x0294 +#define TX_BASE_NVAL_H(v) (((v) >> 8) & 0xFF) +#define TX_GRAN_NVAL_07_00 0x0295 +#define TX_GRAN_NVAL_L(v) ((v) & 0xFF) +#define TX_GRAN_NVAL_10_08 0x0296 +#define TX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3) + +#define RX_FILLER_ENABLE 0x0316 +#define RX_FILLER_EN (1 << 1) +#define RX_LINERESET_VAL 0x0317 +#define RX_LINERESET(v) (((v) >> 12) & 0xFF) +#define RX_LCC_IGNORE 0x0318 +#define RX_SYNC_MASK_LENGTH 0x0321 +#define RX_HIBERN8_WAIT_VAL_BIT_20_16 0x0331 +#define RX_HIBERN8_WAIT_VAL_BIT_15_08 0x0332 +#define RX_HIBERN8_WAIT_VAL_BIT_07_00 0x0333 +#define RX_OV_SLEEP_CNT_TIMER 0x0340 +#define RX_OV_SLEEP_CNT(v) (((v) >> 6) & 0x1F) +#define RX_OV_STALL_CNT_TIMER 0x0341 +#define RX_OV_STALL_CNT(v) (((v) >> 4) & 0xFF) +#define RX_BASE_NVAL_07_00 0x0355 +#define RX_BASE_NVAL_L(v) ((v) & 0xFF) +#define RX_BASE_NVAL_15_08 0x0354 +#define RX_BASE_NVAL_H(v) (((v) >> 8) & 0xFF) +#define RX_GRAN_NVAL_07_00 0x0353 +#define RX_GRAN_NVAL_L(v) ((v) & 0xFF) +#define RX_GRAN_NVAL_10_08 0x0352 +#define RX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3) + +#define CMN_PWM_CLK_CTRL 0x0402 +#define PWM_CLK_CTRL_MASK 0x3 + +#define IATOVAL_NSEC 20000 /* unit: ns */ +#define UNIPRO_PCLK_PERIOD(ufs) (NSEC_PER_SEC / ufs->pclk_rate) + +struct exynos_ufs; + +/* vendor specific pre-defined parameters */ +#define SLOW 1 +#define FAST 2 + +#define UFS_EXYNOS_LIMIT_NUM_LANES_RX 2 +#define UFS_EXYNOS_LIMIT_NUM_LANES_TX 2 +#define UFS_EXYNOS_LIMIT_HSGEAR_RX UFS_HS_G3 +#define UFS_EXYNOS_LIMIT_HSGEAR_TX UFS_HS_G3 +#define UFS_EXYNOS_LIMIT_PWMGEAR_RX UFS_PWM_G4 +#define UFS_EXYNOS_LIMIT_PWMGEAR_TX UFS_PWM_G4 +#define UFS_EXYNOS_LIMIT_RX_PWR_PWM SLOW_MODE +#define UFS_EXYNOS_LIMIT_TX_PWR_PWM SLOW_MODE +#define UFS_EXYNOS_LIMIT_RX_PWR_HS FAST_MODE +#define UFS_EXYNOS_LIMIT_TX_PWR_HS FAST_MODE +#define UFS_EXYNOS_LIMIT_HS_RATE PA_HS_MODE_B +#define UFS_EXYNOS_LIMIT_DESIRED_MODE FAST + +#define RX_ADV_FINE_GRAN_SUP_EN 0x1 +#define RX_ADV_FINE_GRAN_STEP_VAL 0x3 +#define RX_ADV_MIN_ACTV_TIME_CAP 0x9 + +#define PA_GRANULARITY_VAL 0x6 +#define PA_TACTIVATE_VAL 0x3 +#define PA_HIBERN8TIME_VAL 0x20 + +#define PCLK_AVAIL_MIN 70000000 +#define PCLK_AVAIL_MAX 133000000 + +struct exynos_ufs_uic_attr { + /* TX Attributes */ + unsigned int tx_trailingclks; + unsigned int tx_dif_p_nsec; + unsigned int tx_dif_n_nsec; + unsigned int tx_high_z_cnt_nsec; + unsigned int tx_base_unit_nsec; + unsigned int tx_gran_unit_nsec; + unsigned int tx_sleep_cnt; + unsigned int tx_min_activatetime; + /* RX Attributes */ + unsigned int rx_filler_enable; + unsigned int rx_dif_p_nsec; + unsigned int rx_hibern8_wait_nsec; + unsigned int rx_base_unit_nsec; + unsigned int rx_gran_unit_nsec; + unsigned int rx_sleep_cnt; + unsigned int rx_stall_cnt; + unsigned int rx_hs_g1_sync_len_cap; + unsigned int rx_hs_g2_sync_len_cap; + unsigned int rx_hs_g3_sync_len_cap; + unsigned int rx_hs_g1_prep_sync_len_cap; + unsigned int rx_hs_g2_prep_sync_len_cap; + unsigned int rx_hs_g3_prep_sync_len_cap; + /* Common Attributes */ + unsigned int cmn_pwm_clk_ctrl; + /* Internal Attributes */ + unsigned int pa_dbg_option_suite; + /* Changeable Attributes */ + unsigned int rx_adv_fine_gran_sup_en; + unsigned int rx_adv_fine_gran_step; + unsigned int rx_min_actv_time_cap; + unsigned int rx_hibern8_time_cap; + unsigned int rx_adv_min_actv_time_cap; + unsigned int rx_adv_hibern8_time_cap; + unsigned int pa_granularity; + unsigned int pa_tactivate; + unsigned int pa_hibern8time; +}; + +struct exynos_ufs_drv_data { + char *compatible; + struct exynos_ufs_uic_attr *uic_attr; + unsigned int quirks; + unsigned int opts; + /* SoC's specific operations */ + int (*drv_init)(struct device *dev, struct exynos_ufs *ufs); + int (*pre_link)(struct exynos_ufs *ufs); + int (*post_link)(struct exynos_ufs *ufs); + int (*pre_pwr_change)(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr); + int (*post_pwr_change)(struct exynos_ufs *ufs, + struct ufs_pa_layer_attr *pwr); +}; + +struct ufs_phy_time_cfg { + u32 tx_linereset_p; + u32 tx_linereset_n; + u32 tx_high_z_cnt; + u32 tx_base_n_val; + u32 tx_gran_n_val; + u32 tx_sleep_cnt; + u32 rx_linereset; + u32 rx_hibern8_wait; + u32 rx_base_n_val; + u32 rx_gran_n_val; + u32 rx_sleep_cnt; + u32 rx_stall_cnt; +}; + +struct exynos_ufs { + struct ufs_hba *hba; + struct phy *phy; + void __iomem *reg_hci; + void __iomem *reg_unipro; + void __iomem *reg_ufsp; + struct clk *clk_hci_core; + struct clk *clk_unipro_main; + struct clk *clk_apb; + u32 pclk_rate; + u32 pclk_div; + u32 pclk_avail_min; + u32 pclk_avail_max; + u32 mclk_rate; + int avail_ln_rx; + int avail_ln_tx; + int rx_sel_idx; + struct ufs_pa_layer_attr dev_req_params; + struct ufs_phy_time_cfg t_cfg; + ktime_t entry_hibern8_t; + struct exynos_ufs_drv_data *drv_data; + + u32 opts; +#define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0) +#define EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB BIT(1) +#define EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL BIT(2) +#define EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX BIT(3) +#define EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER BIT(4) +}; + +#define for_each_ufs_rx_lane(ufs, i) \ + for (i = (ufs)->rx_sel_idx; \ + i < (ufs)->rx_sel_idx + (ufs)->avail_ln_rx; i++) +#define for_each_ufs_tx_lane(ufs, i) \ + for (i = 0; i < (ufs)->avail_ln_tx; i++) + +#define EXYNOS_UFS_MMIO_FUNC(name) \ +static inline void name##_writel(struct exynos_ufs *ufs, u32 val, u32 reg)\ +{ \ + writel(val, ufs->reg_##name + reg); \ +} \ + \ +static inline u32 name##_readl(struct exynos_ufs *ufs, u32 reg) \ +{ \ + return readl(ufs->reg_##name + reg); \ +} + +EXYNOS_UFS_MMIO_FUNC(hci); +EXYNOS_UFS_MMIO_FUNC(unipro); +EXYNOS_UFS_MMIO_FUNC(ufsp); +#undef EXYNOS_UFS_MMIO_FUNC + +long exynos_ufs_calc_time_cntr(struct exynos_ufs *, long); + +static inline void exynos_ufs_enable_ov_tm(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), TRUE); +} + +static inline void exynos_ufs_disable_ov_tm(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), FALSE); +} + +static inline void exynos_ufs_enable_dbg_mode(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), TRUE); +} + +static inline void exynos_ufs_disable_dbg_mode(struct ufs_hba *hba) +{ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), FALSE); +} + +struct exynos_ufs_drv_data exynos_ufs_drvs; + +struct exynos_ufs_uic_attr exynos7_uic_attr = { + .tx_trailingclks = 0x10, + .tx_dif_p_nsec = 3000000, /* unit: ns */ + .tx_dif_n_nsec = 1000000, /* unit: ns */ + .tx_high_z_cnt_nsec = 20000, /* unit: ns */ + .tx_base_unit_nsec = 100000, /* unit: ns */ + .tx_gran_unit_nsec = 4000, /* unit: ns */ + .tx_sleep_cnt = 1000, /* unit: ns */ + .tx_min_activatetime = 0xa, + .rx_filler_enable = 0x2, + .rx_dif_p_nsec = 1000000, /* unit: ns */ + .rx_hibern8_wait_nsec = 4000000, /* unit: ns */ + .rx_base_unit_nsec = 100000, /* unit: ns */ + .rx_gran_unit_nsec = 4000, /* unit: ns */ + .rx_sleep_cnt = 1280, /* unit: ns */ + .rx_stall_cnt = 320, /* unit: ns */ + .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf), + .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf), + .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf), + .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf), + .pa_dbg_option_suite = 0x30103, +}; +#endif /* _UFS_EXYNOS_H_ */ diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index d56ce8d97d4e..29cd017c1aa0 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -12,9 +12,11 @@ #include <linux/of_address.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/regulator/consumer.h> #include <linux/soc/mediatek/mtk_sip_svc.h> #include "ufshcd.h" +#include "ufshcd-crypto.h" #include "ufshcd-pltfrm.h" #include "ufs_quirks.h" #include "unipro.h" @@ -24,6 +26,9 @@ arm_smccc_smc(MTK_SIP_UFS_CONTROL, \ cmd, val, 0, 0, 0, 0, 0, &(res)) +#define ufs_mtk_crypto_ctrl(res, enable) \ + ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res) + #define ufs_mtk_ref_clk_notify(on, res) \ ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res) @@ -31,6 +36,8 @@ ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res) static struct ufs_dev_fix ufs_mtk_dev_fixups[] = { + UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_AFTER_LPM), UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR", UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES), END_FIX @@ -72,6 +79,18 @@ static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) } } +static void ufs_mtk_crypto_enable(struct ufs_hba *hba) +{ + struct arm_smccc_res res; + + ufs_mtk_crypto_ctrl(res, 1); + if (res.a0) { + dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n", + __func__, res.a0); + hba->caps &= ~UFSHCD_CAP_CRYPTO; + } +} + static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, enum ufs_notify_change_status status) { @@ -82,6 +101,9 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, hba->vps->hba_enable_delay_us = 0; else hba->vps->hba_enable_delay_us = 600; + + if (hba->caps & UFSHCD_CAP_CRYPTO) + ufs_mtk_crypto_enable(hba); } return 0; @@ -112,6 +134,12 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba) if (err) host->mphy = NULL; + /* + * Allow unbound mphy because not every platform needs specific + * mphy control. + */ + if (err == -ENODEV) + err = 0; return err; } @@ -120,7 +148,7 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct arm_smccc_res res; - unsigned long timeout; + ktime_t timeout, time_checked; u32 value; if (host->ref_clk_enabled == on) @@ -135,8 +163,9 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) } /* Wait for ack */ - timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS); + timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US); do { + time_checked = ktime_get(); value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL); /* Wait until ack bit equals to req bit */ @@ -144,7 +173,7 @@ static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) goto out; usleep_range(100, 200); - } while (time_before(jiffies, timeout)); + } while (ktime_before(time_checked, timeout)); dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); @@ -177,15 +206,47 @@ static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba, host->ref_clk_ungating_wait_us = ungating_us; } -static u32 ufs_mtk_link_get_state(struct ufs_hba *hba) +static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, + unsigned long max_wait_ms) { + ktime_t timeout, time_checked; u32 val; - ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); - val = ufshcd_readl(hba, REG_UFS_PROBE); - val = val >> 28; + timeout = ktime_add_us(ktime_get(), ms_to_ktime(max_wait_ms)); + do { + time_checked = ktime_get(); + ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); + val = ufshcd_readl(hba, REG_UFS_PROBE); + val = val >> 28; + + if (val == state) + return 0; + + /* Sleep for max. 200us */ + usleep_range(100, 200); + } while (ktime_before(time_checked, timeout)); - return val; + if (val == state) + return 0; + + return -ETIMEDOUT; +} + +static void ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct phy *mphy = host->mphy; + + if (!mphy) + return; + + if (on && !host->mphy_powered_on) + phy_power_on(mphy); + else if (!on && host->mphy_powered_on) + phy_power_off(mphy); + else + return; + host->mphy_powered_on = on; } /** @@ -201,6 +262,7 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, { struct ufs_mtk_host *host = ufshcd_get_variant(hba); int ret = 0; + bool clk_pwr_off = false; /* * In case ufs_mtk_init() is not yet done, simply ignore. @@ -211,22 +273,29 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, return 0; if (!on && status == PRE_CHANGE) { - if (!ufshcd_is_link_active(hba)) { - ufs_mtk_setup_ref_clk(hba, on); - ret = phy_power_off(host->mphy); - } else { + if (ufshcd_is_link_off(hba)) { + clk_pwr_off = true; + } else if (ufshcd_is_link_hibern8(hba) || + (!ufshcd_can_hibern8_during_gating(hba) && + ufshcd_is_auto_hibern8_enabled(hba))) { /* - * Gate ref-clk if link state is in Hibern8 - * triggered by Auto-Hibern8. + * Gate ref-clk and poweroff mphy if link state is in + * OFF or Hibern8 by either Auto-Hibern8 or + * ufshcd_link_state_transition(). */ - if (!ufshcd_can_hibern8_during_gating(hba) && - ufshcd_is_auto_hibern8_enabled(hba) && - ufs_mtk_link_get_state(hba) == - VS_LINK_HIBERN8) - ufs_mtk_setup_ref_clk(hba, on); + ret = ufs_mtk_wait_link_state(hba, + VS_LINK_HIBERN8, + 15); + if (!ret) + clk_pwr_off = true; + } + + if (clk_pwr_off) { + ufs_mtk_setup_ref_clk(hba, on); + ufs_mtk_mphy_power_on(hba, on); } } else if (on && status == POST_CHANGE) { - ret = phy_power_on(host->mphy); + ufs_mtk_mphy_power_on(hba, on); ufs_mtk_setup_ref_clk(hba, on); } @@ -269,6 +338,9 @@ static int ufs_mtk_init(struct ufs_hba *hba) /* Enable clock-gating */ hba->caps |= UFSHCD_CAP_CLK_GATING; + /* Enable inline encryption */ + hba->caps |= UFSHCD_CAP_CRYPTO; + /* Enable WriteBooster */ hba->caps |= UFSHCD_CAP_WB_EN; hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); @@ -502,10 +574,22 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) return 0; } +static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm) +{ + if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc) + return; + + if (lpm & !hba->vreg_info.vcc->enabled) + regulator_set_mode(hba->vreg_info.vccq2->reg, + REGULATOR_MODE_IDLE); + else if (!lpm) + regulator_set_mode(hba->vreg_info.vccq2->reg, + REGULATOR_MODE_NORMAL); +} + static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) { int err; - struct ufs_mtk_host *host = ufshcd_get_variant(hba); if (ufshcd_is_link_hibern8(hba)) { err = ufs_mtk_link_set_lpm(hba); @@ -518,23 +602,23 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) ufshcd_set_link_off(hba); return -EAGAIN; } + /* + * Make sure no error will be returned to prevent + * ufshcd_suspend() re-enabling regulators while vreg is still + * in low-power mode. + */ + ufs_mtk_vreg_set_lpm(hba, true); } - if (!ufshcd_is_link_active(hba)) - phy_power_off(host->mphy); - return 0; } static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { - struct ufs_mtk_host *host = ufshcd_get_variant(hba); int err; - if (!ufshcd_is_link_active(hba)) - phy_power_on(host->mphy); - if (ufshcd_is_link_hibern8(hba)) { + ufs_mtk_vreg_set_lpm(hba, false); err = ufs_mtk_link_set_hpm(hba); if (err) { err = ufshcd_link_recovery(hba); diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h index 5bbd3e9cbae2..8ed24d5fcff9 100644 --- a/drivers/scsi/ufs/ufs-mediatek.h +++ b/drivers/scsi/ufs/ufs-mediatek.h @@ -28,7 +28,7 @@ #define REFCLK_REQUEST BIT(0) #define REFCLK_ACK BIT(1) -#define REFCLK_REQ_TIMEOUT_MS 3 +#define REFCLK_REQ_TIMEOUT_US 3000 /* * Vendor specific pre-defined parameters @@ -70,6 +70,7 @@ enum { */ #define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276) #define UFS_MTK_SIP_DEVICE_RESET BIT(1) +#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2) #define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3) /* @@ -91,6 +92,7 @@ enum { struct ufs_mtk_host { struct ufs_hba *hba; struct phy *mphy; + bool mphy_powered_on; bool unipro_lpm; bool ref_clk_enabled; u16 ref_clk_ungating_wait_us; diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c new file mode 100644 index 000000000000..bbb0ad7590ec --- /dev/null +++ b/drivers/scsi/ufs/ufs-qcom-ice.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qualcomm ICE (Inline Crypto Engine) support. + * + * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * Copyright 2019 Google LLC + */ + +#include <linux/platform_device.h> +#include <linux/qcom_scm.h> + +#include "ufshcd-crypto.h" +#include "ufs-qcom.h" + +#define AES_256_XTS_KEY_SIZE 64 + +/* QCOM ICE registers */ + +#define QCOM_ICE_REG_CONTROL 0x0000 +#define QCOM_ICE_REG_RESET 0x0004 +#define QCOM_ICE_REG_VERSION 0x0008 +#define QCOM_ICE_REG_FUSE_SETTING 0x0010 +#define QCOM_ICE_REG_PARAMETERS_1 0x0014 +#define QCOM_ICE_REG_PARAMETERS_2 0x0018 +#define QCOM_ICE_REG_PARAMETERS_3 0x001C +#define QCOM_ICE_REG_PARAMETERS_4 0x0020 +#define QCOM_ICE_REG_PARAMETERS_5 0x0024 + +/* QCOM ICE v3.X only */ +#define QCOM_ICE_GENERAL_ERR_STTS 0x0040 +#define QCOM_ICE_INVALID_CCFG_ERR_STTS 0x0030 +#define QCOM_ICE_GENERAL_ERR_MASK 0x0044 + +/* QCOM ICE v2.X only */ +#define QCOM_ICE_REG_NON_SEC_IRQ_STTS 0x0040 +#define QCOM_ICE_REG_NON_SEC_IRQ_MASK 0x0044 + +#define QCOM_ICE_REG_NON_SEC_IRQ_CLR 0x0048 +#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME1 0x0050 +#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME2 0x0054 +#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME1 0x0058 +#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME2 0x005C +#define QCOM_ICE_REG_STREAM1_BIST_ERROR_VEC 0x0060 +#define QCOM_ICE_REG_STREAM2_BIST_ERROR_VEC 0x0064 +#define QCOM_ICE_REG_STREAM1_BIST_FINISH_VEC 0x0068 +#define QCOM_ICE_REG_STREAM2_BIST_FINISH_VEC 0x006C +#define QCOM_ICE_REG_BIST_STATUS 0x0070 +#define QCOM_ICE_REG_BYPASS_STATUS 0x0074 +#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 +#define QCOM_ICE_REG_ENDIAN_SWAP 0x1004 +#define QCOM_ICE_REG_TEST_BUS_CONTROL 0x1010 +#define QCOM_ICE_REG_TEST_BUS_REG 0x1014 + +/* BIST ("built-in self-test"?) status flags */ +#define QCOM_ICE_BIST_STATUS_MASK 0xF0000000 + +#define QCOM_ICE_FUSE_SETTING_MASK 0x1 +#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 +#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 + +#define qcom_ice_writel(host, val, reg) \ + writel((val), (host)->ice_mmio + (reg)) +#define qcom_ice_readl(host, reg) \ + readl((host)->ice_mmio + (reg)) + +static bool qcom_ice_supported(struct ufs_qcom_host *host) +{ + struct device *dev = host->hba->dev; + u32 regval = qcom_ice_readl(host, QCOM_ICE_REG_VERSION); + int major = regval >> 24; + int minor = (regval >> 16) & 0xFF; + int step = regval & 0xFFFF; + + /* For now this driver only supports ICE version 3. */ + if (major != 3) { + dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n", + major, minor, step); + return false; + } + + dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n", + major, minor, step); + + /* If fuses are blown, ICE might not work in the standard way. */ + regval = qcom_ice_readl(host, QCOM_ICE_REG_FUSE_SETTING); + if (regval & (QCOM_ICE_FUSE_SETTING_MASK | + QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK | + QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) { + dev_warn(dev, "Fuses are blown; ICE is unusable!\n"); + return false; + } + return true; +} + +int ufs_qcom_ice_init(struct ufs_qcom_host *host) +{ + struct ufs_hba *hba = host->hba; + struct device *dev = hba->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + int err; + + if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) & + MASK_CRYPTO_SUPPORT)) + return 0; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"); + if (!res) { + dev_warn(dev, "ICE registers not found\n"); + goto disable; + } + + if (!qcom_scm_ice_available()) { + dev_warn(dev, "ICE SCM interface not found\n"); + goto disable; + } + + host->ice_mmio = devm_ioremap_resource(dev, res); + if (IS_ERR(host->ice_mmio)) { + err = PTR_ERR(host->ice_mmio); + dev_err(dev, "Failed to map ICE registers; err=%d\n", err); + return err; + } + + if (!qcom_ice_supported(host)) + goto disable; + + return 0; + +disable: + dev_warn(dev, "Disabling inline encryption support\n"); + hba->caps &= ~UFSHCD_CAP_CRYPTO; + return 0; +} + +static void qcom_ice_low_power_mode_enable(struct ufs_qcom_host *host) +{ + u32 regval; + + regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL); + /* + * Enable low power mode sequence + * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0 + */ + regval |= 0x7000; + qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); +} + +static void qcom_ice_optimization_enable(struct ufs_qcom_host *host) +{ + u32 regval; + + /* ICE Optimizations Enable Sequence */ + regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL); + regval |= 0xD807100; + /* ICE HPG requires delay before writing */ + udelay(5); + qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); + udelay(5); +} + +int ufs_qcom_ice_enable(struct ufs_qcom_host *host) +{ + if (!(host->hba->caps & UFSHCD_CAP_CRYPTO)) + return 0; + qcom_ice_low_power_mode_enable(host); + qcom_ice_optimization_enable(host); + return ufs_qcom_ice_resume(host); +} + +/* Poll until all BIST bits are reset */ +static int qcom_ice_wait_bist_status(struct ufs_qcom_host *host) +{ + int count; + u32 reg; + + for (count = 0; count < 100; count++) { + reg = qcom_ice_readl(host, QCOM_ICE_REG_BIST_STATUS); + if (!(reg & QCOM_ICE_BIST_STATUS_MASK)) + break; + udelay(50); + } + if (reg) + return -ETIMEDOUT; + return 0; +} + +int ufs_qcom_ice_resume(struct ufs_qcom_host *host) +{ + int err; + + if (!(host->hba->caps & UFSHCD_CAP_CRYPTO)) + return 0; + + err = qcom_ice_wait_bist_status(host); + if (err) { + dev_err(host->hba->dev, "BIST status error (%d)\n", err); + return err; + } + return 0; +} + +/* + * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires + * vendor-specific SCM calls for this; it doesn't support the standard way. + */ +int ufs_qcom_ice_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot) +{ + union ufs_crypto_cap_entry cap; + union { + u8 bytes[AES_256_XTS_KEY_SIZE]; + u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)]; + } key; + int i; + int err; + + if (!(cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE)) + return qcom_scm_ice_invalidate_key(slot); + + /* Only AES-256-XTS has been tested so far. */ + cap = hba->crypto_cap_array[cfg->crypto_cap_idx]; + if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS || + cap.key_size != UFS_CRYPTO_KEY_SIZE_256) { + dev_err_ratelimited(hba->dev, + "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n", + cap.algorithm_id, cap.key_size); + return -EINVAL; + } + + memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE); + + /* + * The SCM call byte-swaps the 32-bit words of the key. So we have to + * do the same, in order for the final key be correct. + */ + for (i = 0; i < ARRAY_SIZE(key.words); i++) + __cpu_to_be32s(&key.words[i]); + + err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE, + QCOM_SCM_ICE_CIPHER_AES_256_XTS, + cfg->data_unit_size); + memzero_explicit(&key, sizeof(key)); + return err; +} diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 2e6ddb5cdfc2..d0d75527830e 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -246,7 +246,7 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) mb(); } -/** +/* * ufs_qcom_host_reset - reset host controller and PHY */ static int ufs_qcom_host_reset(struct ufs_hba *hba) @@ -365,7 +365,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, /* check if UFS PHY moved from DISABLED to HIBERN8 */ err = ufs_qcom_check_hibern8(hba); ufs_qcom_enable_hw_clk_gating(hba); - + ufs_qcom_ice_enable(host); break; default: dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); @@ -375,7 +375,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, return err; } -/** +/* * Returns zero for success and non-zero in case of a failure */ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, @@ -613,6 +613,10 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) return err; } + err = ufs_qcom_ice_resume(host); + if (err) + return err; + hba->is_sys_suspended = false; return 0; } @@ -1071,6 +1075,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba) hba->caps |= UFSHCD_CAP_CLK_SCALING; hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; hba->caps |= UFSHCD_CAP_WB_EN; + hba->caps |= UFSHCD_CAP_CRYPTO; if (host->hw_ver.major >= 0x2) { host->caps = UFS_QCOM_CAP_QUNIPRO | @@ -1275,7 +1280,8 @@ static int ufs_qcom_init(struct ufs_hba *hba) host->dev_ref_clk_en_mask = BIT(26); } else { /* "dev_ref_clk_ctrl_mem" is optional resource */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "dev_ref_clk_ctrl_mem"); if (res) { host->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res); @@ -1297,6 +1303,10 @@ static int ufs_qcom_init(struct ufs_hba *hba) ufs_qcom_set_caps(hba); ufs_qcom_advertise_quirks(hba); + err = ufs_qcom_ice_init(host); + if (err) + goto out_variant_clear; + ufs_qcom_set_bus_vote(hba, true); ufs_qcom_setup_clocks(hba, true, POST_CHANGE); @@ -1713,7 +1723,7 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, } #endif -/** +/* * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * * The variant operations configure the necessary controller and PHY @@ -1735,6 +1745,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .dbg_register_dump = ufs_qcom_dump_dbg_regs, .device_reset = ufs_qcom_device_reset, .config_scaling_param = ufs_qcom_config_scaling_param, + .program_key = ufs_qcom_ice_program_key, }; /** diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 2d95e7cc7187..97247d17e258 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -227,6 +227,9 @@ struct ufs_qcom_host { void __iomem *dev_ref_clk_ctrl_mmio; bool is_dev_ref_clk_enabled; struct ufs_hw_version hw_ver; +#ifdef CONFIG_SCSI_UFS_CRYPTO + void __iomem *ice_mmio; +#endif u32 dev_ref_clk_en_mask; @@ -264,4 +267,28 @@ static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host) return false; } +/* ufs-qcom-ice.c */ + +#ifdef CONFIG_SCSI_UFS_CRYPTO +int ufs_qcom_ice_init(struct ufs_qcom_host *host); +int ufs_qcom_ice_enable(struct ufs_qcom_host *host); +int ufs_qcom_ice_resume(struct ufs_qcom_host *host); +int ufs_qcom_ice_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot); +#else +static inline int ufs_qcom_ice_init(struct ufs_qcom_host *host) +{ + return 0; +} +static inline int ufs_qcom_ice_enable(struct ufs_qcom_host *host) +{ + return 0; +} +static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) +{ + return 0; +} +#define ufs_qcom_ice_program_key NULL +#endif /* !CONFIG_SCSI_UFS_CRYPTO */ + #endif /* UFS_QCOM_H_ */ diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index c70845d41449..f8ab16f30fdc 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -1,36 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Universal Flash Storage Host controller driver - * - * This code is based on drivers/scsi/ufs/ufs.h * Copyright (C) 2011-2013 Samsung India Software Operations * * Authors: * Santosh Yaraganavi <santosh.sy@samsung.com> * Vinayak Holikatti <h.vinayak@samsung.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * See the COPYING file in the top-level directory or visit - * <http://www.gnu.org/licenses/gpl-2.0.html> - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This program is provided "AS IS" and "WITH ALL FAULTS" and - * without warranty of any kind. You are solely responsible for - * determining the appropriateness of using and distributing - * the program and assume all risks associated with your exercise - * of rights with respect to the program, including but not limited - * to infringement of third party rights, the risks and costs of - * program errors, damage to or loss of data, programs or equipment, - * and unavailability or interruption of operations. Under no - * circumstances will the contributor of this Program be liable for - * any damages of any kind arising from your use or distribution of - * this program. */ #ifndef _UFS_H @@ -63,6 +38,7 @@ #define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F #define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID) #define UFS_UPIU_WLUN_ID (1 << 7) +#define UFS_RPMB_UNIT 0xC4 /* WriteBooster buffer is available only for the logical unit from 0 to 7 */ #define UFS_UPIU_MAX_WB_LUN_ID 8 @@ -200,16 +176,6 @@ enum desc_header_offset { QUERY_DESC_DESC_TYPE_OFFSET = 0x01, }; -enum ufs_desc_def_size { - QUERY_DESC_DEVICE_DEF_SIZE = 0x59, - QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, - QUERY_DESC_UNIT_DEF_SIZE = 0x2D, - QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, - QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48, - QUERY_DESC_POWER_DEF_SIZE = 0x62, - QUERY_DESC_HEALTH_DEF_SIZE = 0x25, -}; - /* Unit descriptor parameters offsets in bytes*/ enum unit_desc_param { UNIT_DESC_PARAM_LEN = 0x0, diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index 516a7f573942..bcfbbd0d5c45 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -11,13 +11,12 @@ static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len, { int desc_size = be16_to_cpu(qr->length); int desc_id = qr->idn; - int ret; if (desc_size <= 0) return -EINVAL; - ret = ufshcd_map_desc_id_to_length(hba, desc_id, desc_len); - if (ret || !*desc_len) + ufshcd_map_desc_id_to_length(hba, desc_id, desc_len); + if (!*desc_len) return -EINVAL; *desc_len = min_t(int, *desc_len, desc_size); diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index e3175a63c676..07f559ac5883 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -12,9 +12,10 @@ #define UFS_ANY_VENDOR 0xFFFF #define UFS_ANY_MODEL "ANY_MODEL" -#define UFS_VENDOR_TOSHIBA 0x198 +#define UFS_VENDOR_MICRON 0x12C #define UFS_VENDOR_SAMSUNG 0x1CE #define UFS_VENDOR_SKHYNIX 0x1AD +#define UFS_VENDOR_TOSHIBA 0x198 #define UFS_VENDOR_WDC 0x145 /** @@ -108,4 +109,11 @@ struct ufs_dev_fix { */ #define UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES (1 << 10) +/* + * Some UFS devices require delay after VCC power rail is turned-off. + * Enable this quirk to introduce 5ms delays after VCC power-off during + * suspend flow. + */ +#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11) + #endif /* UFS_QUIRKS_H_ */ diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c new file mode 100644 index 000000000000..d2edbd960ebf --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-crypto.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 Google LLC + */ + +#include "ufshcd.h" +#include "ufshcd-crypto.h" + +/* Blk-crypto modes supported by UFS crypto */ +static const struct ufs_crypto_alg_entry { + enum ufs_crypto_alg ufs_alg; + enum ufs_crypto_key_size ufs_key_size; +} ufs_crypto_algs[BLK_ENCRYPTION_MODE_MAX] = { + [BLK_ENCRYPTION_MODE_AES_256_XTS] = { + .ufs_alg = UFS_CRYPTO_ALG_AES_XTS, + .ufs_key_size = UFS_CRYPTO_KEY_SIZE_256, + }, +}; + +static int ufshcd_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot) +{ + int i; + u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg); + int err = 0; + + ufshcd_hold(hba, false); + + if (hba->vops && hba->vops->program_key) { + err = hba->vops->program_key(hba, cfg, slot); + goto out; + } + + /* Ensure that CFGE is cleared before programming the key */ + ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0])); + for (i = 0; i < 16; i++) { + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[i]), + slot_offset + i * sizeof(cfg->reg_val[0])); + } + /* Write dword 17 */ + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[17]), + slot_offset + 17 * sizeof(cfg->reg_val[0])); + /* Dword 16 must be written last */ + ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]), + slot_offset + 16 * sizeof(cfg->reg_val[0])); +out: + ufshcd_release(hba); + return err; +} + +static int ufshcd_crypto_keyslot_program(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct ufs_hba *hba = container_of(ksm, struct ufs_hba, ksm); + const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array; + const struct ufs_crypto_alg_entry *alg = + &ufs_crypto_algs[key->crypto_cfg.crypto_mode]; + u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512; + int i; + int cap_idx = -1; + union ufs_crypto_cfg_entry cfg = { 0 }; + int err; + + BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0); + for (i = 0; i < hba->crypto_capabilities.num_crypto_cap; i++) { + if (ccap_array[i].algorithm_id == alg->ufs_alg && + ccap_array[i].key_size == alg->ufs_key_size && + (ccap_array[i].sdus_mask & data_unit_mask)) { + cap_idx = i; + break; + } + } + + if (WARN_ON(cap_idx < 0)) + return -EOPNOTSUPP; + + cfg.data_unit_size = data_unit_mask; + cfg.crypto_cap_idx = cap_idx; + cfg.config_enable = UFS_CRYPTO_CONFIGURATION_ENABLE; + + if (ccap_array[cap_idx].algorithm_id == UFS_CRYPTO_ALG_AES_XTS) { + /* In XTS mode, the blk_crypto_key's size is already doubled */ + memcpy(cfg.crypto_key, key->raw, key->size/2); + memcpy(cfg.crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2, + key->raw + key->size/2, key->size/2); + } else { + memcpy(cfg.crypto_key, key->raw, key->size); + } + + err = ufshcd_program_key(hba, &cfg, slot); + + memzero_explicit(&cfg, sizeof(cfg)); + return err; +} + +static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot) +{ + /* + * Clear the crypto cfg on the device. Clearing CFGE + * might not be sufficient, so just clear the entire cfg. + */ + union ufs_crypto_cfg_entry cfg = { 0 }; + + return ufshcd_program_key(hba, &cfg, slot); +} + +static int ufshcd_crypto_keyslot_evict(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct ufs_hba *hba = container_of(ksm, struct ufs_hba, ksm); + + return ufshcd_clear_keyslot(hba, slot); +} + +bool ufshcd_crypto_enable(struct ufs_hba *hba) +{ + if (!(hba->caps & UFSHCD_CAP_CRYPTO)) + return false; + + /* Reset might clear all keys, so reprogram all the keys. */ + blk_ksm_reprogram_all_keys(&hba->ksm); + return true; +} + +static const struct blk_ksm_ll_ops ufshcd_ksm_ops = { + .keyslot_program = ufshcd_crypto_keyslot_program, + .keyslot_evict = ufshcd_crypto_keyslot_evict, +}; + +static enum blk_crypto_mode_num +ufshcd_find_blk_crypto_mode(union ufs_crypto_cap_entry cap) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ufs_crypto_algs); i++) { + BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0); + if (ufs_crypto_algs[i].ufs_alg == cap.algorithm_id && + ufs_crypto_algs[i].ufs_key_size == cap.key_size) { + return i; + } + } + return BLK_ENCRYPTION_MODE_INVALID; +} + +/** + * ufshcd_hba_init_crypto_capabilities - Read crypto capabilities, init crypto + * fields in hba + * @hba: Per adapter instance + * + * Return: 0 if crypto was initialized or is not supported, else a -errno value. + */ +int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) +{ + int cap_idx; + int err = 0; + enum blk_crypto_mode_num blk_mode_num; + + /* + * Don't use crypto if either the hardware doesn't advertise the + * standard crypto capability bit *or* if the vendor specific driver + * hasn't advertised that crypto is supported. + */ + if (!(hba->capabilities & MASK_CRYPTO_SUPPORT) || + !(hba->caps & UFSHCD_CAP_CRYPTO)) + goto out; + + hba->crypto_capabilities.reg_val = + cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP)); + hba->crypto_cfg_register = + (u32)hba->crypto_capabilities.config_array_ptr * 0x100; + hba->crypto_cap_array = + devm_kcalloc(hba->dev, hba->crypto_capabilities.num_crypto_cap, + sizeof(hba->crypto_cap_array[0]), GFP_KERNEL); + if (!hba->crypto_cap_array) { + err = -ENOMEM; + goto out; + } + + /* The actual number of configurations supported is (CFGC+1) */ + err = blk_ksm_init(&hba->ksm, + hba->crypto_capabilities.config_count + 1); + if (err) + goto out_free_caps; + + hba->ksm.ksm_ll_ops = ufshcd_ksm_ops; + /* UFS only supports 8 bytes for any DUN */ + hba->ksm.max_dun_bytes_supported = 8; + hba->ksm.dev = hba->dev; + + /* + * Cache all the UFS crypto capabilities and advertise the supported + * crypto modes and data unit sizes to the block layer. + */ + for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap; + cap_idx++) { + hba->crypto_cap_array[cap_idx].reg_val = + cpu_to_le32(ufshcd_readl(hba, + REG_UFS_CRYPTOCAP + + cap_idx * sizeof(__le32))); + blk_mode_num = ufshcd_find_blk_crypto_mode( + hba->crypto_cap_array[cap_idx]); + if (blk_mode_num != BLK_ENCRYPTION_MODE_INVALID) + hba->ksm.crypto_modes_supported[blk_mode_num] |= + hba->crypto_cap_array[cap_idx].sdus_mask * 512; + } + + return 0; + +out_free_caps: + devm_kfree(hba->dev, hba->crypto_cap_array); +out: + /* Indicate that init failed by clearing UFSHCD_CAP_CRYPTO */ + hba->caps &= ~UFSHCD_CAP_CRYPTO; + return err; +} + +/** + * ufshcd_init_crypto - Initialize crypto hardware + * @hba: Per adapter instance + */ +void ufshcd_init_crypto(struct ufs_hba *hba) +{ + int slot; + + if (!(hba->caps & UFSHCD_CAP_CRYPTO)) + return; + + /* Clear all keyslots - the number of keyslots is (CFGC + 1) */ + for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++) + ufshcd_clear_keyslot(hba, slot); +} + +void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, + struct request_queue *q) +{ + if (hba->caps & UFSHCD_CAP_CRYPTO) + blk_ksm_register(&hba->ksm, q); +} + +void ufshcd_crypto_destroy_keyslot_manager(struct ufs_hba *hba) +{ + blk_ksm_destroy(&hba->ksm); +} diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h new file mode 100644 index 000000000000..d53851be5541 --- /dev/null +++ b/drivers/scsi/ufs/ufshcd-crypto.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ + +#ifndef _UFSHCD_CRYPTO_H +#define _UFSHCD_CRYPTO_H + +#ifdef CONFIG_SCSI_UFS_CRYPTO +#include "ufshcd.h" +#include "ufshci.h" + +static inline void ufshcd_prepare_lrbp_crypto(struct request *rq, + struct ufshcd_lrb *lrbp) +{ + if (!rq || !rq->crypt_keyslot) { + lrbp->crypto_key_slot = -1; + return; + } + + lrbp->crypto_key_slot = blk_ksm_get_slot_idx(rq->crypt_keyslot); + lrbp->data_unit_num = rq->crypt_ctx->bc_dun[0]; +} + +static inline void +ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0, + u32 *dword_1, u32 *dword_3) +{ + if (lrbp->crypto_key_slot >= 0) { + *dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD; + *dword_0 |= lrbp->crypto_key_slot; + *dword_1 = lower_32_bits(lrbp->data_unit_num); + *dword_3 = upper_32_bits(lrbp->data_unit_num); + } +} + +bool ufshcd_crypto_enable(struct ufs_hba *hba); + +int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba); + +void ufshcd_init_crypto(struct ufs_hba *hba); + +void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, + struct request_queue *q); + +void ufshcd_crypto_destroy_keyslot_manager(struct ufs_hba *hba); + +#else /* CONFIG_SCSI_UFS_CRYPTO */ + +static inline void ufshcd_prepare_lrbp_crypto(struct request *rq, + struct ufshcd_lrb *lrbp) { } + +static inline void +ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0, + u32 *dword_1, u32 *dword_3) { } + +static inline bool ufshcd_crypto_enable(struct ufs_hba *hba) +{ + return false; +} + +static inline int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba) +{ + return 0; +} + +static inline void ufshcd_init_crypto(struct ufs_hba *hba) { } + +static inline void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba, + struct request_queue *q) { } + +static inline void ufshcd_crypto_destroy_keyslot_manager(struct ufs_hba *hba) +{ } + +#endif /* CONFIG_SCSI_UFS_CRYPTO */ + +#endif /* _UFSHCD_CRYPTO_H */ diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index 8f78a8151499..f407b13883ac 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Universal Flash Storage Host controller PCI glue driver * @@ -7,30 +8,6 @@ * Authors: * Santosh Yaraganavi <santosh.sy@samsung.com> * Vinayak Holikatti <h.vinayak@samsung.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * See the COPYING file in the top-level directory or visit - * <http://www.gnu.org/licenses/gpl-2.0.html> - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This program is provided "AS IS" and "WITH ALL FAULTS" and - * without warranty of any kind. You are solely responsible for - * determining the appropriateness of using and distributing - * the program and assume all risks associated with your exercise - * of rights with respect to the program, including but not limited - * to infringement of third party rights, the risks and costs of - * program errors, damage to or loss of data, programs or equipment, - * and unavailability or interruption of operations. Under no - * circumstances will the contributor of this Program be liable for - * any damages of any kind arising from your use or distribution of - * this program. */ #include "ufshcd.h" diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 76f9be71c31b..3db0af66c71c 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -1,36 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Universal Flash Storage Host controller Platform bus based glue driver - * - * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c * Copyright (C) 2011-2013 Samsung India Software Operations * * Authors: * Santosh Yaraganavi <santosh.sy@samsung.com> * Vinayak Holikatti <h.vinayak@samsung.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * See the COPYING file in the top-level directory or visit - * <http://www.gnu.org/licenses/gpl-2.0.html> - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This program is provided "AS IS" and "WITH ALL FAULTS" and - * without warranty of any kind. You are solely responsible for - * determining the appropriateness of using and distributing - * the program and assume all risks associated with your exercise - * of rights with respect to the program, including but not limited - * to infringement of third party rights, the risks and costs of - * program errors, damage to or loss of data, programs or equipment, - * and unavailability or interruption of operations. Under no - * circumstances will the contributor of this Program be liable for - * any damages of any kind arising from your use or distribution of - * this program. */ #include <linux/platform_device.h> diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index ad4fc829cbb2..307622284239 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1,40 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * Universal Flash Storage Host controller driver Core - * - * This code is based on drivers/scsi/ufs/ufshcd.c * Copyright (C) 2011-2013 Samsung India Software Operations * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * Authors: * Santosh Yaraganavi <santosh.sy@samsung.com> * Vinayak Holikatti <h.vinayak@samsung.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * See the COPYING file in the top-level directory or visit - * <http://www.gnu.org/licenses/gpl-2.0.html> - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This program is provided "AS IS" and "WITH ALL FAULTS" and - * without warranty of any kind. You are solely responsible for - * determining the appropriateness of using and distributing - * the program and assume all risks associated with your exercise - * of rights with respect to the program, including but not limited - * to infringement of third party rights, the risks and costs of - * program errors, damage to or loss of data, programs or equipment, - * and unavailability or interruption of operations. Under no - * circumstances will the contributor of this Program be liable for - * any damages of any kind arising from your use or distribution of - * this program. - * - * The Linux Foundation chooses to take subject only to the GPLv2 - * license terms, and distributes only under these terms. */ #include <linux/async.h> @@ -48,6 +20,7 @@ #include "unipro.h" #include "ufs-sysfs.h" #include "ufs_bsg.h" +#include "ufshcd-crypto.h" #include <asm/unaligned.h> #include <linux/blkdev.h> @@ -216,23 +189,22 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, static struct ufs_dev_fix ufs_fixups[] = { /* UFS cards deviations table */ - UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | + UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS), - UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, - UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE), + UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME), + UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/, + UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME), UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG", UFS_DEVICE_QUIRK_PA_TACTIVATE), UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG", UFS_DEVICE_QUIRK_PA_TACTIVATE), - UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, - UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME), - UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/, - UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME), - END_FIX }; @@ -340,6 +312,26 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, &descp->input_param1); } +static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, + struct uic_command *ucmd, + const char *str) +{ + u32 cmd; + + if (!trace_ufshcd_uic_command_enabled()) + return; + + if (!strcmp(str, "send")) + cmd = ucmd->command; + else + cmd = ufshcd_readl(hba, REG_UIC_COMMAND); + + trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd, + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1), + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2), + ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3)); +} + static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, const char *str) { @@ -672,7 +664,11 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) */ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) { - ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); + if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) + ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); + else + ufshcd_writel(hba, ~(1 << pos), + REG_UTP_TRANSFER_REQ_LIST_CLEAR); } /** @@ -682,7 +678,10 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) */ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) { - ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); + if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) + ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); + else + ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); } /** @@ -839,7 +838,12 @@ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) */ static inline void ufshcd_hba_start(struct ufs_hba *hba) { - ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); + u32 val = CONTROLLER_ENABLE; + + if (ufshcd_crypto_enable(hba)) + val |= CRYPTO_GENERAL_ENABLE; + + ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE); } /** @@ -1314,6 +1318,7 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev, unsigned long flags; struct list_head *clk_list = &hba->clk_list_head; struct ufs_clk_info *clki; + ktime_t curr_t; if (!ufshcd_is_clkscaling_supported(hba)) return -EINVAL; @@ -1321,6 +1326,7 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev, memset(stat, 0, sizeof(*stat)); spin_lock_irqsave(hba->host->host_lock, flags); + curr_t = ktime_get(); if (!scaling->window_start_t) goto start_window; @@ -1332,18 +1338,17 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev, */ stat->current_frequency = clki->curr_freq; if (scaling->is_busy_started) - scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), - scaling->busy_start_t)); + scaling->tot_busy_t += ktime_us_delta(curr_t, + scaling->busy_start_t); - stat->total_time = jiffies_to_usecs((long)jiffies - - (long)scaling->window_start_t); + stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t); stat->busy_time = scaling->tot_busy_t; start_window: - scaling->window_start_t = jiffies; + scaling->window_start_t = curr_t; scaling->tot_busy_t = 0; if (hba->outstanding_reqs) { - scaling->busy_start_t = ktime_get(); + scaling->busy_start_t = curr_t; scaling->is_busy_started = true; } else { scaling->busy_start_t = 0; @@ -1877,6 +1882,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) { bool queue_resume_work = false; + ktime_t curr_t = ktime_get(); if (!ufshcd_is_clkscaling_supported(hba)) return; @@ -1892,13 +1898,13 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) &hba->clk_scaling.resume_work); if (!hba->clk_scaling.window_start_t) { - hba->clk_scaling.window_start_t = jiffies; + hba->clk_scaling.window_start_t = curr_t; hba->clk_scaling.tot_busy_t = 0; hba->clk_scaling.is_busy_started = false; } if (!hba->clk_scaling.is_busy_started) { - hba->clk_scaling.busy_start_t = ktime_get(); + hba->clk_scaling.busy_start_t = curr_t; hba->clk_scaling.is_busy_started = true; } } @@ -1925,8 +1931,11 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) static inline void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { - hba->lrb[task_tag].issue_time_stamp = ktime_get(); - hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0); + struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; + + lrbp->issue_time_stamp = ktime_get(); + lrbp->compl_time_stamp = ktime_set(0, 0); + ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false)); ufshcd_add_command_trace(hba, task_tag, "send"); ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); @@ -1996,15 +2005,26 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) /** * ufshcd_hba_capabilities - Read controller capabilities * @hba: per adapter instance + * + * Return: 0 on success, negative on error. */ -static inline void ufshcd_hba_capabilities(struct ufs_hba *hba) +static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) { + int err; + hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); /* nutrs and nutmrs are 0 based values */ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; hba->nutmrs = ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1; + + /* Read crypto capabilities */ + err = ufshcd_hba_init_crypto_capabilities(hba); + if (err) + dev_err(hba->dev, "crypto setup failed\n"); + + return err; } /** @@ -2052,6 +2072,8 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); + ufshcd_add_uic_command_trace(hba, uic_cmd, "send"); + /* Write UIC Cmd */ ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, REG_UIC_COMMAND); @@ -2161,8 +2183,14 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) return sg_segments; if (sg_segments) { - lrbp->utr_descriptor_ptr->prd_table_length = - cpu_to_le16((u16)sg_segments); + + if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) + lrbp->utr_descriptor_ptr->prd_table_length = + cpu_to_le16((sg_segments * + sizeof(struct ufshcd_sg_entry))); + else + lrbp->utr_descriptor_ptr->prd_table_length = + cpu_to_le16((u16) (sg_segments)); prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr; @@ -2232,11 +2260,13 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs) * @cmd_dir: requests data direction */ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, - u32 *upiu_flags, enum dma_data_direction cmd_dir) + u8 *upiu_flags, enum dma_data_direction cmd_dir) { struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr; u32 data_direction; u32 dword_0; + u32 dword_1 = 0; + u32 dword_3 = 0; if (cmd_dir == DMA_FROM_DEVICE) { data_direction = UTP_DEVICE_TO_HOST; @@ -2254,10 +2284,12 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, if (lrbp->intr_cmd) dword_0 |= UTP_REQ_DESC_INT_CMD; + /* Prepare crypto related dwords */ + ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3); + /* Transfer request descriptor header fields */ req_desc->header.dword_0 = cpu_to_le32(dword_0); - /* dword_1 is reserved, hence it is set to 0 */ - req_desc->header.dword_1 = 0; + req_desc->header.dword_1 = cpu_to_le32(dword_1); /* * assigning invalid value for command status. Controller * updates OCS on command completion, with the command @@ -2265,8 +2297,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, */ req_desc->header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS); - /* dword_3 is reserved, hence it is set to 0 */ - req_desc->header.dword_3 = 0; + req_desc->header.dword_3 = cpu_to_le32(dword_3); req_desc->prd_table_length = 0; } @@ -2278,7 +2309,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, * @upiu_flags: flags */ static -void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) +void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags) { struct scsi_cmnd *cmd = lrbp->cmd; struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; @@ -2311,7 +2342,7 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) * @upiu_flags: flags */ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp, u32 upiu_flags) + struct ufshcd_lrb *lrbp, u8 upiu_flags) { struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; struct ufs_query *query = &hba->dev_cmd.query; @@ -2367,7 +2398,7 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp) */ static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { - u32 upiu_flags; + u8 upiu_flags; int ret = 0; if ((hba->ufs_version == UFSHCI_VERSION_10) || @@ -2395,7 +2426,7 @@ static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) */ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { - u32 upiu_flags; + u8 upiu_flags; int ret = 0; if ((hba->ufs_version == UFSHCI_VERSION_10) || @@ -2521,6 +2552,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->task_tag = tag; lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; + + ufshcd_prepare_lrbp_crypto(cmd->request, lrbp); + lrbp->req_abort_skip = false; ufshcd_comp_scsi_upiu(hba, lrbp); @@ -2536,7 +2570,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) /* issue command to the controller */ spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_vops_setup_xfer_req(hba, tag, true); ufshcd_send_command(hba, tag); out_unlock: spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -2554,6 +2587,7 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, lrbp->task_tag = tag; lrbp->lun = 0; /* device management cmd is not specific to any LUN */ lrbp->intr_cmd = true; /* No interrupt aggregation */ + ufshcd_prepare_lrbp_crypto(NULL, lrbp); hba->dev_cmd.type = cmd_type; return ufshcd_comp_devman_upiu(hba, lrbp); @@ -2723,7 +2757,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, /* Make sure descriptors are ready before ringing the doorbell */ wmb(); spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_vops_setup_xfer_req(hba, tag, false); ufshcd_send_command(hba, tag); spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -3053,94 +3086,36 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, } /** - * ufshcd_read_desc_length - read the specified descriptor length from header - * @hba: Pointer to adapter instance - * @desc_id: descriptor idn value - * @desc_index: descriptor index - * @desc_length: pointer to variable to read the length of descriptor - * - * Return 0 in case of success, non-zero otherwise - */ -static int ufshcd_read_desc_length(struct ufs_hba *hba, - enum desc_idn desc_id, - int desc_index, - int *desc_length) -{ - int ret; - u8 header[QUERY_DESC_HDR_SIZE]; - int header_len = QUERY_DESC_HDR_SIZE; - - if (desc_id >= QUERY_DESC_IDN_MAX) - return -EINVAL; - - ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, - desc_id, desc_index, 0, header, - &header_len); - - if (ret) { - dev_err(hba->dev, "%s: Failed to get descriptor header id %d", - __func__, desc_id); - return ret; - } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) { - dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch", - __func__, header[QUERY_DESC_DESC_TYPE_OFFSET], - desc_id); - ret = -EINVAL; - } - - *desc_length = header[QUERY_DESC_LENGTH_OFFSET]; - return ret; - -} - -/** * ufshcd_map_desc_id_to_length - map descriptor IDN to its length * @hba: Pointer to adapter instance * @desc_id: descriptor idn value * @desc_len: mapped desc length (out) - * - * Return 0 in case of success, non-zero otherwise */ -int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, - enum desc_idn desc_id, int *desc_len) +void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, + int *desc_len) { - switch (desc_id) { - case QUERY_DESC_IDN_DEVICE: - *desc_len = hba->desc_size.dev_desc; - break; - case QUERY_DESC_IDN_POWER: - *desc_len = hba->desc_size.pwr_desc; - break; - case QUERY_DESC_IDN_GEOMETRY: - *desc_len = hba->desc_size.geom_desc; - break; - case QUERY_DESC_IDN_CONFIGURATION: - *desc_len = hba->desc_size.conf_desc; - break; - case QUERY_DESC_IDN_UNIT: - *desc_len = hba->desc_size.unit_desc; - break; - case QUERY_DESC_IDN_INTERCONNECT: - *desc_len = hba->desc_size.interc_desc; - break; - case QUERY_DESC_IDN_STRING: - *desc_len = QUERY_DESC_MAX_SIZE; - break; - case QUERY_DESC_IDN_HEALTH: - *desc_len = hba->desc_size.hlth_desc; - break; - case QUERY_DESC_IDN_RFU_0: - case QUERY_DESC_IDN_RFU_1: - *desc_len = 0; - break; - default: + if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 || + desc_id == QUERY_DESC_IDN_RFU_1) *desc_len = 0; - return -EINVAL; - } - return 0; + else + *desc_len = hba->desc_size[desc_id]; } EXPORT_SYMBOL(ufshcd_map_desc_id_to_length); +static void ufshcd_update_desc_length(struct ufs_hba *hba, + enum desc_idn desc_id, int desc_index, + unsigned char desc_len) +{ + if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE && + desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT) + /* For UFS 3.1, the normal unit descriptor is 10 bytes larger + * than the RPMB unit, however, both descriptors share the same + * desc_idn, to cover both unit descriptors with one length, we + * choose the normal unit descriptor length by desc_index. + */ + hba->desc_size[desc_id] = desc_len; +} + /** * ufshcd_read_desc_param - read the specified descriptor parameter * @hba: Pointer to adapter instance @@ -3168,16 +3143,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) return -EINVAL; - /* Get the max length of descriptor from structure filled up at probe - * time. - */ - ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); - - /* Sanity checks */ - if (ret || !buff_len) { - dev_err(hba->dev, "%s: Failed to get full descriptor length", - __func__); - return ret; + /* Get the length of descriptor */ + ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); + if (!buff_len) { + dev_err(hba->dev, "%s: Failed to get desc length", __func__); + return -EINVAL; } /* Check whether we need temp memory */ @@ -3209,9 +3179,13 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, goto out; } + /* Update descriptor length */ + buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET]; + ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len); + /* Check wherher we will not copy more data, than available */ - if (is_kmalloc && param_size > buff_len) - param_size = buff_len; + if (is_kmalloc && (param_offset + param_size) > buff_len) + param_size = buff_len - param_offset; if (is_kmalloc) memcpy(param_read_buf, &desc_buf[param_offset], param_size); @@ -3221,16 +3195,6 @@ out: return ret; } -static inline int ufshcd_read_desc(struct ufs_hba *hba, - enum desc_idn desc_id, - int desc_index, - void *buf, - u32 size) -{ - return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); -} - - /** * struct uc_string_id - unicode string * @@ -3278,9 +3242,8 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, if (!uc_str) return -ENOMEM; - ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, - desc_index, uc_str, - QUERY_DESC_MAX_SIZE); + ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0, + (u8 *)uc_str, QUERY_DESC_MAX_SIZE); if (ret < 0) { dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", QUERY_REQ_RETRIES, ret); @@ -3511,11 +3474,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); /* Response upiu and prdt offset should be in double words */ - utrdlp[i].response_upiu_offset = - cpu_to_le16(response_offset >> 2); - utrdlp[i].prd_table_offset = cpu_to_le16(prdt_offset >> 2); - utrdlp[i].response_upiu_length = - cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); + if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { + utrdlp[i].response_upiu_offset = + cpu_to_le16(response_offset); + utrdlp[i].prd_table_offset = + cpu_to_le16(prdt_offset); + utrdlp[i].response_upiu_length = + cpu_to_le16(ALIGNED_UPIU_SIZE); + } else { + utrdlp[i].response_upiu_offset = + cpu_to_le16(response_offset >> 2); + utrdlp[i].prd_table_offset = + cpu_to_le16(prdt_offset >> 2); + utrdlp[i].response_upiu_length = + cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); + } ufshcd_init_lrb(hba, &hba->lrb[i], i); } @@ -3545,6 +3518,52 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) "dme-link-startup: error code %d\n", ret); return ret; } +/** + * ufshcd_dme_reset - UIC command for DME_RESET + * @hba: per adapter instance + * + * DME_RESET command is issued in order to reset UniPro stack. + * This function now deals with cold reset. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dme_reset(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_RESET; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_err(hba->dev, + "dme-reset: error code %d\n", ret); + + return ret; +} + +/** + * ufshcd_dme_enable - UIC command for DME_ENABLE + * @hba: per adapter instance + * + * DME_ENABLE command is issued in order to enable UniPro stack. + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_dme_enable(struct ufs_hba *hba) +{ + struct uic_command uic_cmd = {0}; + int ret; + + uic_cmd.command = UIC_CMD_DME_ENABLE; + + ret = ufshcd_send_uic_cmd(hba, &uic_cmd); + if (ret) + dev_err(hba->dev, + "dme-reset: error code %d\n", ret); + + return ret; +} static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba) { @@ -4269,7 +4288,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba) } /** - * ufshcd_hba_enable - initialize the controller + * ufshcd_hba_execute_hce - initialize the controller * @hba: per adapter instance * * The controller resets itself and controller firmware initialization @@ -4278,7 +4297,7 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba) * * Returns 0 on success, non-zero value on failure */ -int ufshcd_hba_enable(struct ufs_hba *hba) +static int ufshcd_hba_execute_hce(struct ufs_hba *hba) { int retry; @@ -4326,6 +4345,32 @@ int ufshcd_hba_enable(struct ufs_hba *hba) return 0; } + +int ufshcd_hba_enable(struct ufs_hba *hba) +{ + int ret; + + if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) { + ufshcd_set_link_off(hba); + ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE); + + /* enable UIC related interrupts */ + ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); + ret = ufshcd_dme_reset(hba); + if (!ret) { + ret = ufshcd_dme_enable(hba); + if (!ret) + ufshcd_vops_hce_enable_notify(hba, POST_CHANGE); + if (ret) + dev_err(hba->dev, + "Host controller enable failed with non-hce\n"); + } + } else { + ret = ufshcd_hba_execute_hce(hba); + } + + return ret; +} EXPORT_SYMBOL_GPL(ufshcd_hba_enable); static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) @@ -4650,6 +4695,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) if (ufshcd_is_rpm_autosuspend_allowed(hba)) sdev->rpm_autosuspend = 1; + ufshcd_crypto_setup_rq_keyslot_manager(hba, q); + return 0; } @@ -4724,6 +4771,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) /* overall command status of utrd */ ocs = ufshcd_get_tr_ocs(lrbp); + if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) { + if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) & + MASK_RSP_UPIU_RESULT) + ocs = OCS_SUCCESS; + } + switch (ocs) { case OCS_SUCCESS: result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr); @@ -4792,6 +4845,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) case OCS_MISMATCH_RESP_UPIU_SIZE: case OCS_PEER_COMM_FAILURE: case OCS_FATAL_ERROR: + case OCS_DEVICE_FATAL_ERROR: + case OCS_INVALID_CRYPTO_CONFIG: + case OCS_GENERAL_CRYPTO_ERROR: default: result |= DID_ERROR << 16; dev_err(hba->dev, @@ -4833,6 +4889,10 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) complete(hba->uic_async_done); retval = IRQ_HANDLED; } + + if (retval == IRQ_HANDLED) + ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, + "complete"); return retval; } @@ -4851,6 +4911,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, for_each_set_bit(index, &completed_reqs, hba->nutrs) { lrbp = &hba->lrb[index]; + lrbp->compl_time_stamp = ktime_get(); cmd = lrbp->cmd; if (cmd) { ufshcd_add_command_trace(hba, index, "complete"); @@ -4859,13 +4920,11 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, cmd->result = result; /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; - lrbp->compl_time_stamp = ktime_get(); /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); __ufshcd_release(hba); } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { - lrbp->compl_time_stamp = ktime_get(); if (hba->dev_cmd.complete) { ufshcd_add_command_trace(hba, index, "dev_complete"); @@ -4902,7 +4961,8 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) * false interrupt if device completes another request after resetting * aggregation and before reading the DB. */ - if (ufshcd_is_intr_aggr_allowed(hba)) + if (ufshcd_is_intr_aggr_allowed(hba) && + !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) ufshcd_reset_intr_aggr(hba); tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); @@ -6090,7 +6150,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, int tag; struct completion wait; unsigned long flags; - u32 upiu_flags; + u8 upiu_flags; down_read(&hba->clk_scaling_lock); @@ -6112,6 +6172,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, lrbp->task_tag = tag; lrbp->lun = 0; lrbp->intr_cmd = true; + ufshcd_prepare_lrbp_crypto(NULL, lrbp); hba->dev_cmd.type = cmd_type; switch (hba->ufs_version) { @@ -6703,7 +6764,7 @@ out: static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) { int ret; - int buff_len = hba->desc_size.pwr_desc; + int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER]; u8 *desc_buf; u32 icc_level; @@ -6711,8 +6772,8 @@ static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) if (!desc_buf) return; - ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, - desc_buf, buff_len); + ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0, + desc_buf, buff_len); if (ret) { dev_err(hba->dev, "%s: Failed reading power descriptor.len = %d ret = %d", @@ -6815,20 +6876,31 @@ out: static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf) { + struct ufs_dev_info *dev_info = &hba->dev_info; u8 lun; u32 d_lu_wb_buf_alloc; if (!ufshcd_is_wb_allowed(hba)) return; + /* + * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or + * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES + * enabled + */ + if (!(dev_info->wspecversion >= 0x310 || + dev_info->wspecversion == 0x220 || + (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))) + goto wb_disabled; - if (hba->desc_size.dev_desc < DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4) + if (hba->desc_size[QUERY_DESC_IDN_DEVICE] < + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4) goto wb_disabled; - hba->dev_info.d_ext_ufs_feature_sup = + dev_info->d_ext_ufs_feature_sup = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); - if (!(hba->dev_info.d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP)) + if (!(dev_info->d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP)) goto wb_disabled; /* @@ -6837,17 +6909,17 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf) * a max of 1 lun would have wb buffer configured. * Now only shared buffer mode is supported. */ - hba->dev_info.b_wb_buffer_type = + dev_info->b_wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE]; - hba->dev_info.b_presrv_uspc_en = + dev_info->b_presrv_uspc_en = desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN]; - if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_SHARED) { - hba->dev_info.d_wb_alloc_units = + if (dev_info->b_wb_buffer_type == WB_BUF_MODE_SHARED) { + dev_info->d_wb_alloc_units = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS); - if (!hba->dev_info.d_wb_alloc_units) + if (!dev_info->d_wb_alloc_units) goto wb_disabled; } else { for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) { @@ -6858,7 +6930,7 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf) (u8 *)&d_lu_wb_buf_alloc, sizeof(d_lu_wb_buf_alloc)); if (d_lu_wb_buf_alloc) { - hba->dev_info.wb_dedicated_lu = lun; + dev_info->wb_dedicated_lu = lun; break; } } @@ -6903,21 +6975,18 @@ static void ufs_fixup_device_setup(struct ufs_hba *hba) static int ufs_get_device_desc(struct ufs_hba *hba) { int err; - size_t buff_len; u8 model_index; u8 *desc_buf; struct ufs_dev_info *dev_info = &hba->dev_info; - buff_len = max_t(size_t, hba->desc_size.dev_desc, - QUERY_DESC_MAX_SIZE + 1); - desc_buf = kmalloc(buff_len, GFP_KERNEL); + desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); if (!desc_buf) { err = -ENOMEM; goto out; } - err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf, - hba->desc_size.dev_desc); + err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf, + hba->desc_size[QUERY_DESC_IDN_DEVICE]); if (err) { dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", __func__, err); @@ -6947,14 +7016,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba) ufs_fixup_device_setup(hba); - /* - * Probe WB only for UFS-3.1 devices or UFS devices with quirk - * UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES enabled - */ - if (dev_info->wspecversion >= 0x310 || - dev_info->wspecversion == 0x220 || - (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)) - ufshcd_wb_probe(hba, desc_buf); + ufshcd_wb_probe(hba, desc_buf); /* * ufshcd_read_string_desc returns size of the string @@ -7146,61 +7208,21 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) hba->req_abort_count = 0; } -static void ufshcd_init_desc_sizes(struct ufs_hba *hba) -{ - int err; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, - &hba->desc_size.dev_desc); - if (err) - hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, - &hba->desc_size.pwr_desc); - if (err) - hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, - &hba->desc_size.interc_desc); - if (err) - hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, - &hba->desc_size.conf_desc); - if (err) - hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, - &hba->desc_size.unit_desc); - if (err) - hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, - &hba->desc_size.geom_desc); - if (err) - hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; - - err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, - &hba->desc_size.hlth_desc); - if (err) - hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; -} - static int ufshcd_device_geo_params_init(struct ufs_hba *hba) { int err; size_t buff_len; u8 *desc_buf; - buff_len = hba->desc_size.geom_desc; + buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY]; desc_buf = kmalloc(buff_len, GFP_KERNEL); if (!desc_buf) { err = -ENOMEM; goto out; } - err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0, - desc_buf, buff_len); + err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0, + desc_buf, buff_len); if (err) { dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", __func__, err); @@ -7288,10 +7310,11 @@ out: static int ufshcd_device_params_init(struct ufs_hba *hba) { bool flag; - int ret; + int ret, i; - /* Init check for device descriptor sizes */ - ufshcd_init_desc_sizes(hba); + /* Init device descriptor sizes */ + for (i = 0; i < QUERY_DESC_IDN_MAX; i++) + hba->desc_size[i] = QUERY_DESC_MAX_SIZE; /* Init UFS geometry descriptor related parameters */ ret = ufshcd_device_geo_params_init(hba); @@ -8084,6 +8107,8 @@ out: static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) { + bool vcc_off = false; + /* * It seems some UFS devices may keep drawing more than sleep current * (atleast for 500us) from UFS rails (especially from VCCQ rail). @@ -8112,13 +8137,22 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && !hba->dev_info.is_lu_power_on_wp) { ufshcd_setup_vreg(hba, false); + vcc_off = true; } else if (!ufshcd_is_ufs_dev_active(hba)) { ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); + vcc_off = true; if (!ufshcd_is_link_active(hba)) { ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); } } + + /* + * Some UFS devices require delay after VCC power rail is turned-off. + */ + if (vcc_off && hba->vreg_info.vcc && + hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM) + usleep_range(5000, 5100); } static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) @@ -8659,6 +8693,7 @@ EXPORT_SYMBOL_GPL(ufshcd_remove); */ void ufshcd_dealloc_host(struct ufs_hba *hba) { + ufshcd_crypto_destroy_keyslot_manager(hba); scsi_host_put(hba->host); } EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); @@ -8759,7 +8794,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto out_error; /* Read capabilities registers */ - ufshcd_hba_capabilities(hba); + err = ufshcd_hba_capabilities(hba); + if (err) + goto out_disable; /* Get UFS version supported by the controller */ hba->ufs_version = ufshcd_get_ufs_version(hba); @@ -8869,6 +8906,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Reset the attached device */ ufshcd_vops_device_reset(hba); + ufshcd_init_crypto(hba); + /* Host controller enable */ err = ufshcd_hba_enable(hba); if (err) { diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index bf97d616e597..b2ef18f1b746 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -1,37 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Universal Flash Storage Host controller driver - * - * This code is based on drivers/scsi/ufs/ufshcd.h * Copyright (C) 2011-2013 Samsung India Software Operations * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * Authors: * Santosh Yaraganavi <santosh.sy@samsung.com> * Vinayak Holikatti <h.vinayak@samsung.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * See the COPYING file in the top-level directory or visit - * <http://www.gnu.org/licenses/gpl-2.0.html> - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This program is provided "AS IS" and "WITH ALL FAULTS" and - * without warranty of any kind. You are solely responsible for - * determining the appropriateness of using and distributing - * the program and assume all risks associated with your exercise - * of rights with respect to the program, including but not limited - * to infringement of third party rights, the risks and costs of - * program errors, damage to or loss of data, programs or equipment, - * and unavailability or interruption of operations. Under no - * circumstances will the contributor of this Program be liable for - * any damages of any kind arising from your use or distribution of - * this program. */ #ifndef _UFSHCD_H @@ -57,6 +32,7 @@ #include <linux/regulator/consumer.h> #include <linux/bitfield.h> #include <linux/devfreq.h> +#include <linux/keyslot-manager.h> #include "unipro.h" #include <asm/irq.h> @@ -88,8 +64,6 @@ enum dev_cmd_type { * @argument1: UIC command argument 1 * @argument2: UIC command argument 2 * @argument3: UIC command argument 3 - * @cmd_active: Indicate if UIC command is outstanding - * @result: UIC command result * @done: UIC command completion */ struct uic_command { @@ -97,8 +71,6 @@ struct uic_command { u32 argument1; u32 argument2; u32 argument3; - int cmd_active; - int result; struct completion done; }; @@ -183,6 +155,8 @@ struct ufs_pm_lvl_states { * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation) * @issue_time_stamp: time stamp for debug purposes * @compl_time_stamp: time stamp for statistics + * @crypto_key_slot: the key slot to use for inline crypto (-1 if none) + * @data_unit_num: the data unit number for the first block for inline crypto * @req_abort_skip: skip request abort task flag */ struct ufshcd_lrb { @@ -207,6 +181,10 @@ struct ufshcd_lrb { bool intr_cmd; ktime_t issue_time_stamp; ktime_t compl_time_stamp; +#ifdef CONFIG_SCSI_UFS_CRYPTO + int crypto_key_slot; + u64 data_unit_num; +#endif bool req_abort_skip; }; @@ -236,16 +214,6 @@ struct ufs_dev_cmd { struct ufs_query query; }; -struct ufs_desc_size { - int dev_desc; - int pwr_desc; - int geom_desc; - int interc_desc; - int unit_desc; - int conf_desc; - int hlth_desc; -}; - /** * struct ufs_clk_info - UFS clock related info * @list: list headed by hba->clk_list_head @@ -313,6 +281,7 @@ struct ufs_pwr_mode_info { * @dbg_register_dump: used to dump controller debug information * @phy_initialization: used to initialize phys * @device_reset: called to issue a reset pulse on the UFS device + * @program_key: program or evict an inline encryption key */ struct ufs_hba_variant_ops { const char *name; @@ -346,6 +315,8 @@ struct ufs_hba_variant_ops { void (*config_scaling_param)(struct ufs_hba *hba, struct devfreq_dev_profile *profile, void *data); + int (*program_key)(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, int slot); }; /* clock gating state */ @@ -411,7 +382,7 @@ struct ufs_saved_pwr_info { struct ufs_clk_scaling { int active_reqs; unsigned long tot_busy_t; - unsigned long window_start_t; + ktime_t window_start_t; ktime_t busy_start_t; struct device_attribute enable_attr; struct ufs_saved_pwr_info saved_pwr_info; @@ -520,6 +491,35 @@ enum ufshcd_quirks { * ops (get_ufs_hci_version) to get the correct version. */ UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5, + + /* + * Clear handling for transfer/task request list is just opposite. + */ + UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6, + + /* + * This quirk needs to be enabled if host controller doesn't allow + * that the interrupt aggregation timer and counter are reset by s/w. + */ + UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7, + + /* + * This quirks needs to be enabled if host controller cannot be + * enabled via HCE register. + */ + UFSHCI_QUIRK_BROKEN_HCE = 1 << 8, + + /* + * This quirk needs to be enabled if the host controller regards + * resolution of the values of PRDTO and PRDTL in UTRD as byte. + */ + UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9, + + /* + * This quirk needs to be enabled if the host controller reports + * OCS FATAL ERROR with device error through sense data + */ + UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10, }; enum ufshcd_caps { @@ -564,6 +564,12 @@ enum ufshcd_caps { * provisioned to be used. This would increase the write performance. */ UFSHCD_CAP_WB_EN = 1 << 7, + + /* + * This capability allows the host controller driver to use the + * inline crypto engine, if it is present + */ + UFSHCD_CAP_CRYPTO = 1 << 8, }; struct ufs_hba_variant_params { @@ -624,6 +630,10 @@ struct ufs_hba_variant_params { * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for * device is known or not. * @scsi_block_reqs_cnt: reference counting for scsi block requests + * @crypto_capabilities: Content of crypto capabilities register (0x100) + * @crypto_cap_array: Array of crypto capabilities + * @crypto_cfg_register: Start of the crypto cfg array + * @ksm: the keyslot manager tied to this hba */ struct ufs_hba { void __iomem *mmio_base; @@ -738,7 +748,7 @@ struct ufs_hba { bool is_urgent_bkops_lvl_checked; struct rw_semaphore clk_scaling_lock; - struct ufs_desc_size desc_size; + unsigned char desc_size[QUERY_DESC_IDN_MAX]; atomic_t scsi_block_reqs_cnt; struct device bsg_dev; @@ -746,6 +756,13 @@ struct ufs_hba { bool wb_buf_flush_enabled; bool wb_enabled; struct delayed_work rpm_dev_flush_recheck_work; + +#ifdef CONFIG_SCSI_UFS_CRYPTO + union ufs_crypto_capabilities crypto_capabilities; + union ufs_crypto_cap_entry *crypto_cap_array; + u32 crypto_cfg_register; + struct blk_keyslot_manager ksm; +#endif }; /* Returns true if clocks can be gated. Otherwise false */ @@ -976,8 +993,8 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); -int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, - int *desc_length); +void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, + int *desc_length); u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index c2961d37cc1c..ba31b090f784 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -1,36 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Universal Flash Storage Host controller driver - * - * This code is based on drivers/scsi/ufs/ufshci.h * Copyright (C) 2011-2013 Samsung India Software Operations * * Authors: * Santosh Yaraganavi <santosh.sy@samsung.com> * Vinayak Holikatti <h.vinayak@samsung.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * See the COPYING file in the top-level directory or visit - * <http://www.gnu.org/licenses/gpl-2.0.html> - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This program is provided "AS IS" and "WITH ALL FAULTS" and - * without warranty of any kind. You are solely responsible for - * determining the appropriateness of using and distributing - * the program and assume all risks associated with your exercise - * of rights with respect to the program, including but not limited - * to infringement of third party rights, the risks and costs of - * program errors, damage to or loss of data, programs or equipment, - * and unavailability or interruption of operations. Under no - * circumstances will the contributor of this Program be liable for - * any damages of any kind arising from your use or distribution of - * this program. */ #ifndef _UFSHCI_H @@ -90,6 +65,7 @@ enum { MASK_64_ADDRESSING_SUPPORT = 0x01000000, MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000, MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, + MASK_CRYPTO_SUPPORT = 0x10000000, }; #define UFS_MASK(mask, offset) ((mask) << (offset)) @@ -143,6 +119,7 @@ enum { #define DEVICE_FATAL_ERROR 0x800 #define CONTROLLER_FATAL_ERROR 0x10000 #define SYSTEM_BUS_FATAL_ERROR 0x20000 +#define CRYPTO_ENGINE_FATAL_ERROR 0x40000 #define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\ UIC_HIBERNATE_EXIT) @@ -155,11 +132,13 @@ enum { #define UFSHCD_ERROR_MASK (UIC_ERROR |\ DEVICE_FATAL_ERROR |\ CONTROLLER_FATAL_ERROR |\ - SYSTEM_BUS_FATAL_ERROR) + SYSTEM_BUS_FATAL_ERROR |\ + CRYPTO_ENGINE_FATAL_ERROR) #define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\ CONTROLLER_FATAL_ERROR |\ - SYSTEM_BUS_FATAL_ERROR) + SYSTEM_BUS_FATAL_ERROR |\ + CRYPTO_ENGINE_FATAL_ERROR) /* HCS - Host Controller Status 30h */ #define DEVICE_PRESENT 0x1 @@ -318,6 +297,61 @@ enum { INTERRUPT_MASK_ALL_VER_21 = 0x71FFF, }; +/* CCAP - Crypto Capability 100h */ +union ufs_crypto_capabilities { + __le32 reg_val; + struct { + u8 num_crypto_cap; + u8 config_count; + u8 reserved; + u8 config_array_ptr; + }; +}; + +enum ufs_crypto_key_size { + UFS_CRYPTO_KEY_SIZE_INVALID = 0x0, + UFS_CRYPTO_KEY_SIZE_128 = 0x1, + UFS_CRYPTO_KEY_SIZE_192 = 0x2, + UFS_CRYPTO_KEY_SIZE_256 = 0x3, + UFS_CRYPTO_KEY_SIZE_512 = 0x4, +}; + +enum ufs_crypto_alg { + UFS_CRYPTO_ALG_AES_XTS = 0x0, + UFS_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1, + UFS_CRYPTO_ALG_AES_ECB = 0x2, + UFS_CRYPTO_ALG_ESSIV_AES_CBC = 0x3, +}; + +/* x-CRYPTOCAP - Crypto Capability X */ +union ufs_crypto_cap_entry { + __le32 reg_val; + struct { + u8 algorithm_id; + u8 sdus_mask; /* Supported data unit size mask */ + u8 key_size; + u8 reserved; + }; +}; + +#define UFS_CRYPTO_CONFIGURATION_ENABLE (1 << 7) +#define UFS_CRYPTO_KEY_MAX_SIZE 64 +/* x-CRYPTOCFG - Crypto Configuration X */ +union ufs_crypto_cfg_entry { + __le32 reg_val[32]; + struct { + u8 crypto_key[UFS_CRYPTO_KEY_MAX_SIZE]; + u8 data_unit_size; + u8 crypto_cap_idx; + u8 reserved_1; + u8 config_enable; + u8 reserved_multi_host; + u8 reserved_2; + u8 vsb[2]; + u8 reserved_3[56]; + }; +}; + /* * Request Descriptor Definitions */ @@ -339,6 +373,7 @@ enum { UTP_NATIVE_UFS_COMMAND = 0x10000000, UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000, UTP_REQ_DESC_INT_CMD = 0x01000000, + UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000, }; /* UTP Transfer Request Data Direction (DD) */ @@ -358,6 +393,9 @@ enum { OCS_PEER_COMM_FAILURE = 0x5, OCS_ABORTED = 0x6, OCS_FATAL_ERROR = 0x7, + OCS_DEVICE_FATAL_ERROR = 0x8, + OCS_INVALID_CRYPTO_CONFIG = 0x9, + OCS_GENERAL_CRYPTO_ERROR = 0xA, OCS_INVALID_COMMAND_STATUS = 0x0F, MASK_OCS = 0x0F, }; diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index 766d551df3fc..4ee64782fd48 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -64,8 +64,25 @@ #define CFGRXOVR4 0x00E9 #define RXSQCTRL 0x00B5 #define CFGRXOVR6 0x00BF +#define RX_HS_G1_SYNC_LENGTH_CAP 0x008B +#define RX_HS_G1_PREP_LENGTH_CAP 0x008C +#define RX_HS_G2_SYNC_LENGTH_CAP 0x0094 +#define RX_HS_G3_SYNC_LENGTH_CAP 0x0095 +#define RX_HS_G2_PREP_LENGTH_CAP 0x0096 +#define RX_HS_G3_PREP_LENGTH_CAP 0x0097 +#define RX_ADV_GRANULARITY_CAP 0x0098 +#define RX_MIN_ACTIVATETIME_CAP 0x008F +#define RX_HIBERN8TIME_CAP 0x0092 +#define RX_ADV_HIBERN8TIME_CAP 0x0099 +#define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A + #define is_mphy_tx_attr(attr) (attr < RX_MODE) +#define RX_ADV_FINE_GRAN_STEP(x) ((((x) & 0x3) << 1) | 0x1) +#define SYNC_LEN_FINE(x) ((x) & 0x3F) +#define SYNC_LEN_COARSE(x) ((1 << 6) | ((x) & 0x3F)) +#define PREP_LEN(x) ((x) & 0xF) + #define RX_MIN_ACTIVATETIME_UNIT_US 100 #define HIBERN8TIME_UNIT_US 100 @@ -124,6 +141,7 @@ #define PA_PACPREQEOBTIMEOUT 0x1591 #define PA_HIBERN8TIME 0x15A7 #define PA_LOCALVERINFO 0x15A9 +#define PA_GRANULARITY 0x15AA #define PA_TACTIVATE 0x15A8 #define PA_PACPFRAMECOUNT 0x15C0 #define PA_PACPERRORCOUNT 0x15C1 @@ -291,4 +309,19 @@ enum { TRUE, }; +/* CPort setting */ +#define E2EFC_ON (1 << 0) +#define E2EFC_OFF (0 << 0) +#define CSD_N_ON (0 << 1) +#define CSD_N_OFF (1 << 1) +#define CSV_N_ON (0 << 2) +#define CSV_N_OFF (1 << 2) +#define CPORT_DEF_FLAGS (CSV_N_OFF | CSD_N_OFF | E2EFC_OFF) + +/* CPort connection state */ +enum { + CPORT_IDLE = 0, + CPORT_CONNECTED, +}; + #endif /* _UNIPRO_H_ */ |