diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2024-07-15 20:44:31 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2024-07-15 20:44:31 +0200 |
commit | 281cfec53b4484ce2092c89b6909f5573cb23443 (patch) | |
tree | 5a32c3d51b06c847d516f8a90b388919c12df67c /drivers/thermal | |
parent | Merge branch 'thermal-core' (diff) | |
parent | thermal: intel: hfi: Give HFI instances package scope (diff) | |
download | linux-281cfec53b4484ce2092c89b6909f5573cb23443.tar.xz linux-281cfec53b4484ce2092c89b6909f5573cb23443.zip |
Merge branch 'thermal-intel'
Merge updates of Intel thermal drivers for 6.11-rc1:
- Switch Intel thermal drivers to new Intel CPU model defines (Tony
Luck).
- Clean up the int3400 and int3403 drivers (Erick Archer and David Alan
Gilbert).
- Improve intel_pch_thermal kernel log messages printed during suspend
to idle (Zhang Rui).
- Make the intel_tcc_cooling driver use a model-specific bitmask for
TCC offset (Ricardo Neri).
- Add DLVR and MSI interrupt support for the Lunar Lake platform to the
int340x thermal driver (Srinivas Pandruvada).
- Enable workload type hints (WLT) support and power floor interrupt
support for the Lunar Lake platform in int340x ((Srinivas Pandruvada).
- Make the HFI thermal driver use package scope for HFI instances as
per the Intel SDM (Zhang Rui).
* thermal-intel:
thermal: intel: hfi: Give HFI instances package scope
thermal: intel: int340x: Enable WLT and power floor support for Lunar Lake
thermal: intel: int340x: Support MSI interrupt for Lunar Lake
thermal: intel: int340x: Remove unnecessary calls to free irq
thermal: intel: int340x: Add DLVR support for Lunar Lake
thermal: intel: int340x: Capability to map user space to firmware values
thermal: intel: int340x: Cleanup of DLVR sysfs on driver remove
thermal: intel: intel_tcc_cooling: Use a model-specific bitmask for TCC offset
thermal: intel: intel_tcc: Add model checks for temperature registers
thermal: intel: intel_pch: Improve cooling log
thermal: int3403: remove unused struct 'int3403_performance_state'
thermal: int3400: Use sizeof(*pointer) instead of sizeof(type)
thermal: intel: intel_soc_dts_thermal: Switch to new Intel CPU model defines
thermal: intel: intel_tcc_cooling: Switch to new Intel CPU model defines
Diffstat (limited to 'drivers/thermal')
11 files changed, 412 insertions, 79 deletions
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index fa96972266e4..b0c0f0ffdcb0 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -571,7 +571,7 @@ static int int3400_thermal_probe(struct platform_device *pdev) if (!adev) return -ENODEV; - priv = kzalloc(sizeof(struct int3400_thermal_priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index 86901f9f54d8..c094a422ded3 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -25,17 +25,6 @@ struct int3403_sensor { struct int34x_thermal_zone *int340x_zone; }; -struct int3403_performance_state { - u64 performance; - u64 power; - u64 latency; - u64 linear; - u64 control; - u64 raw_performace; - char *raw_unit; - int reserved; -}; - struct int3403_cdev { struct thermal_cooling_device *cdev; unsigned long max_state; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index d75fae7b7ed2..7c46dd6bee73 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c @@ -440,7 +440,8 @@ void proc_thermal_mmio_remove(struct pci_dev *pdev, struct proc_thermal_device * proc_thermal_rapl_remove(); if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_FIVR || - proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DVFS) + proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DVFS || + proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_DLVR) proc_thermal_rfim_remove(pdev); if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_POWER_FLOOR) diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h index 674f3c85dfbc..d5eca6db2c00 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h @@ -65,6 +65,7 @@ struct rapl_mmio_regs { #define PROC_THERMAL_FEATURE_DLVR 0x10 #define PROC_THERMAL_FEATURE_WT_HINT 0x20 #define PROC_THERMAL_FEATURE_POWER_FLOOR 0x40 +#define PROC_THERMAL_FEATURE_MSI_SUPPORT 0x80 #if IS_ENABLED(CONFIG_PROC_THERMAL_MMIO_RAPL) int proc_thermal_rapl_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv); diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c index 4f41fe543340..114136893a59 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c @@ -63,6 +63,18 @@ static struct proc_thermal_mmio_info proc_thermal_mmio_info[] = { { PROC_THERMAL_MMIO_INT_STATUS_1, 0x7200, 8, 0x01 }, }; +/* List of supported MSI IDs (sources) */ +enum proc_thermal_msi_ids { + PKG_THERMAL, + DDR_THERMAL, + THERM_POWER_FLOOR, + WORKLOAD_CHANGE, + MSI_THERMAL_MAX +}; + +/* Stores IRQ associated with a MSI ID */ +static int proc_thermal_msi_map[MSI_THERMAL_MAX]; + #define B0D4_THERMAL_NOTIFY_DELAY 1000 static int notify_delay_ms = B0D4_THERMAL_NOTIFY_DELAY; @@ -146,22 +158,41 @@ static irqreturn_t proc_thermal_irq_thread_handler(int irq, void *devid) return IRQ_HANDLED; } +static int proc_thermal_match_msi_irq(int irq) +{ + int i; + + if (!use_msi) + goto msi_fail; + + for (i = 0; i < MSI_THERMAL_MAX; i++) { + if (proc_thermal_msi_map[i] == irq) + return i; + } + +msi_fail: + return -EOPNOTSUPP; +} + static irqreturn_t proc_thermal_irq_handler(int irq, void *devid) { struct proc_thermal_pci *pci_info = devid; struct proc_thermal_device *proc_priv; - int ret = IRQ_NONE; + int ret = IRQ_NONE, msi_id; u32 status; proc_priv = pci_info->proc_priv; + msi_id = proc_thermal_match_msi_irq(irq); + if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_WT_HINT) { - if (proc_thermal_check_wt_intr(pci_info->proc_priv)) + if (msi_id == WORKLOAD_CHANGE || proc_thermal_check_wt_intr(pci_info->proc_priv)) ret = IRQ_WAKE_THREAD; } if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_POWER_FLOOR) { - if (proc_thermal_check_power_floor_intr(pci_info->proc_priv)) + if (msi_id == THERM_POWER_FLOOR || + proc_thermal_check_power_floor_intr(pci_info->proc_priv)) ret = IRQ_WAKE_THREAD; } @@ -171,7 +202,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid) * interrupt before scheduling work function for thermal threshold. */ proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_INT_STATUS_0, &status); - if (status) { + if (msi_id == PKG_THERMAL || status) { /* Disable enable interrupt flag */ proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); pkg_thermal_schedule_work(&pci_info->work); @@ -245,6 +276,45 @@ static struct thermal_zone_params tzone_params = { .no_hwmon = true, }; +static bool msi_irq; + +static int proc_thermal_setup_msi(struct pci_dev *pdev, struct proc_thermal_pci *pci_info) +{ + int ret, i, irq; + + ret = pci_alloc_irq_vectors(pdev, 1, MSI_THERMAL_MAX, PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to allocate vectors!\n"); + return ret; + } + + dev_info(&pdev->dev, "msi enabled:%d msix enabled:%d\n", pdev->msi_enabled, + pdev->msix_enabled); + + for (i = 0; i < MSI_THERMAL_MAX; i++) { + irq = pci_irq_vector(pdev, i); + + ret = devm_request_threaded_irq(&pdev->dev, irq, proc_thermal_irq_handler, + proc_thermal_irq_thread_handler, + 0, KBUILD_MODNAME, pci_info); + if (ret) { + dev_err(&pdev->dev, "Request IRQ %d failed\n", irq); + goto err_free_msi_vectors; + } + + proc_thermal_msi_map[i] = irq; + } + + msi_irq = true; + + return 0; + +err_free_msi_vectors: + pci_free_irq_vectors(pdev); + + return ret; +} + static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct proc_thermal_device *proc_priv; @@ -254,7 +324,6 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_ .flags = THERMAL_TRIP_FLAG_RW_TEMP, }; int irq_flag = 0, irq, ret; - bool msi_irq = false; proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL); if (!proc_priv) @@ -300,27 +369,24 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_ goto err_del_legacy; } - if (use_msi && (pdev->msi_enabled || pdev->msix_enabled)) { - /* request and enable interrupt */ - ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to allocate vectors!\n"); - goto err_ret_tzone; - } + if (proc_priv->mmio_feature_mask & PROC_THERMAL_FEATURE_MSI_SUPPORT) + use_msi = true; - irq = pci_irq_vector(pdev, 0); - msi_irq = true; + if (use_msi) { + ret = proc_thermal_setup_msi(pdev, pci_info); + if (ret) + goto err_ret_tzone; } else { irq_flag = IRQF_SHARED; irq = pdev->irq; - } - ret = devm_request_threaded_irq(&pdev->dev, irq, - proc_thermal_irq_handler, proc_thermal_irq_thread_handler, - irq_flag, KBUILD_MODNAME, pci_info); - if (ret) { - dev_err(&pdev->dev, "Request IRQ %d failed\n", pdev->irq); - goto err_free_vectors; + ret = devm_request_threaded_irq(&pdev->dev, irq, proc_thermal_irq_handler, + proc_thermal_irq_thread_handler, irq_flag, + KBUILD_MODNAME, pci_info); + if (ret) { + dev_err(&pdev->dev, "Request IRQ %d failed\n", pdev->irq); + goto err_ret_tzone; + } } ret = thermal_zone_device_enable(pci_info->tzone); @@ -353,9 +419,6 @@ static void proc_thermal_pci_remove(struct pci_dev *pdev) proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0); proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0); - devm_free_irq(&pdev->dev, pdev->irq, pci_info); - pci_free_irq_vectors(pdev); - thermal_zone_device_unregister(pci_info->tzone); proc_thermal_mmio_remove(pdev, pci_info->proc_priv); if (!pci_info->no_legacy) @@ -409,7 +472,9 @@ static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend, static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) }, - { PCI_DEVICE_DATA(INTEL, LNLM_THERMAL, PROC_THERMAL_FEATURE_RAPL) }, + { PCI_DEVICE_DATA(INTEL, LNLM_THERMAL, PROC_THERMAL_FEATURE_MSI_SUPPORT | + PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR | + PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) }, { PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) }, diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c index e56db75a94fb..0e2dc1426282 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c @@ -19,6 +19,12 @@ struct mmio_reg { u16 shift; }; +struct mapping_table { + const char *attr_name; + const u32 value; + const char *mapped_str; +}; + /* These will represent sysfs attribute names */ static const char * const fivr_strings[] = { "vco_ref_code_lo", @@ -62,6 +68,78 @@ static const struct mmio_reg dlvr_mmio_regs[] = { { 1, 0x15A10, 1, 0x1, 16}, /* dlvr_pll_busy */ }; +static const struct mmio_reg lnl_dlvr_mmio_regs[] = { + { 0, 0x5A08, 5, 0x1F, 0}, /* dlvr_spread_spectrum_pct */ + { 0, 0x5A08, 1, 0x1, 5}, /* dlvr_control_mode */ + { 0, 0x5A08, 1, 0x1, 6}, /* dlvr_control_lock */ + { 0, 0x5A08, 1, 0x1, 7}, /* dlvr_rfim_enable */ + { 0, 0x5A08, 2, 0x3, 8}, /* dlvr_freq_select */ + { 1, 0x5A10, 2, 0x3, 30}, /* dlvr_hardware_rev */ + { 1, 0x5A10, 2, 0x3, 0}, /* dlvr_freq_mhz */ + { 1, 0x5A10, 1, 0x1, 23}, /* dlvr_pll_busy */ +}; + +static const struct mapping_table lnl_dlvr_mapping[] = { + {"dlvr_freq_select", 0, "2227.2"}, + {"dlvr_freq_select", 1, "2140"}, + {"dlvr_freq_mhz", 0, "2227.2"}, + {"dlvr_freq_mhz", 1, "2140"}, + {NULL, 0, NULL}, +}; + +static int match_mapping_table(const struct mapping_table *table, const char *attr_name, + bool match_int_value, const u32 value, const char *value_str, + char **result_str, u32 *result_int) +{ + bool attr_matched = false; + int i = 0; + + if (!table) + return -EOPNOTSUPP; + + while (table[i].attr_name) { + if (strncmp(table[i].attr_name, attr_name, strlen(attr_name))) + goto match_next; + + attr_matched = true; + + if (match_int_value) { + if (table[i].value != value) + goto match_next; + + *result_str = (char *)table[i].mapped_str; + return 0; + } + + if (strncmp(table[i].mapped_str, value_str, strlen(table[i].mapped_str))) + goto match_next; + + *result_int = table[i].value; + + return 0; +match_next: + i++; + } + + /* If attribute name is matched, then the user space value is invalid */ + if (attr_matched) + return -EINVAL; + + return -EOPNOTSUPP; +} + +static int get_mapped_string(const struct mapping_table *table, const char *attr_name, + u32 value, char **result) +{ + return match_mapping_table(table, attr_name, true, value, NULL, result, NULL); +} + +static int get_mapped_value(const struct mapping_table *table, const char *attr_name, + const char *value, unsigned int *result) +{ + return match_mapping_table(table, attr_name, false, 0, value, NULL, result); +} + /* These will represent sysfs attribute names */ static const char * const dvfs_strings[] = { "rfi_restriction_run_busy", @@ -93,12 +171,14 @@ static ssize_t suffix##_show(struct device *dev,\ struct device_attribute *attr,\ char *buf)\ {\ + const struct mapping_table *mapping = NULL;\ struct proc_thermal_device *proc_priv;\ struct pci_dev *pdev = to_pci_dev(dev);\ const struct mmio_reg *mmio_regs;\ const char **match_strs;\ + int ret, err;\ u32 reg_val;\ - int ret;\ + char *str;\ \ proc_priv = pci_get_drvdata(pdev);\ if (table == 1) {\ @@ -106,7 +186,12 @@ static ssize_t suffix##_show(struct device *dev,\ mmio_regs = adl_dvfs_mmio_regs;\ } else if (table == 2) { \ match_strs = (const char **)dlvr_strings;\ - mmio_regs = dlvr_mmio_regs;\ + if (pdev->device == PCI_DEVICE_ID_INTEL_LNLM_THERMAL) {\ + mmio_regs = lnl_dlvr_mmio_regs;\ + mapping = lnl_dlvr_mapping;\ + } else {\ + mmio_regs = dlvr_mmio_regs;\ + } \ } else {\ match_strs = (const char **)fivr_strings;\ mmio_regs = tgl_fivr_mmio_regs;\ @@ -116,7 +201,12 @@ static ssize_t suffix##_show(struct device *dev,\ return ret;\ reg_val = readl((void __iomem *) (proc_priv->mmio_base + mmio_regs[ret].offset));\ ret = (reg_val >> mmio_regs[ret].shift) & mmio_regs[ret].mask;\ - return sprintf(buf, "%u\n", ret);\ + err = get_mapped_string(mapping, attr->attr.name, ret, &str);\ + if (!err)\ + return sprintf(buf, "%s\n", str);\ + if (err == -EOPNOTSUPP)\ + return sprintf(buf, "%u\n", ret);\ + return err;\ } #define RFIM_STORE(suffix, table)\ @@ -124,6 +214,7 @@ static ssize_t suffix##_store(struct device *dev,\ struct device_attribute *attr,\ const char *buf, size_t count)\ {\ + const struct mapping_table *mapping = NULL;\ struct proc_thermal_device *proc_priv;\ struct pci_dev *pdev = to_pci_dev(dev);\ unsigned int input;\ @@ -139,7 +230,12 @@ static ssize_t suffix##_store(struct device *dev,\ mmio_regs = adl_dvfs_mmio_regs;\ } else if (table == 2) { \ match_strs = (const char **)dlvr_strings;\ - mmio_regs = dlvr_mmio_regs;\ + if (pdev->device == PCI_DEVICE_ID_INTEL_LNLM_THERMAL) {\ + mmio_regs = lnl_dlvr_mmio_regs;\ + mapping = lnl_dlvr_mapping;\ + } else {\ + mmio_regs = dlvr_mmio_regs;\ + } \ } else {\ match_strs = (const char **)fivr_strings;\ mmio_regs = tgl_fivr_mmio_regs;\ @@ -150,9 +246,14 @@ static ssize_t suffix##_store(struct device *dev,\ return ret;\ if (mmio_regs[ret].read_only)\ return -EPERM;\ - err = kstrtouint(buf, 10, &input);\ - if (err)\ + err = get_mapped_value(mapping, attr->attr.name, buf, &input);\ + if (err == -EINVAL)\ return err;\ + if (err == -EOPNOTSUPP) {\ + err = kstrtouint(buf, 10, &input);\ + if (err)\ + return err;\ + } \ mask = GENMASK(mmio_regs[ret].shift + mmio_regs[ret].bits - 1, mmio_regs[ret].shift);\ reg_val = readl((void __iomem *) (proc_priv->mmio_base + mmio_regs[ret].offset));\ reg_val &= ~mask;\ diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c index a180a98bb9f1..5b18a46a10b0 100644 --- a/drivers/thermal/intel/intel_hfi.c +++ b/drivers/thermal/intel/intel_hfi.c @@ -401,10 +401,10 @@ static void hfi_disable(void) * intel_hfi_online() - Enable HFI on @cpu * @cpu: CPU in which the HFI will be enabled * - * Enable the HFI to be used in @cpu. The HFI is enabled at the die/package - * level. The first CPU in the die/package to come online does the full HFI + * Enable the HFI to be used in @cpu. The HFI is enabled at the package + * level. The first CPU in the package to come online does the full HFI * initialization. Subsequent CPUs will just link themselves to the HFI - * instance of their die/package. + * instance of their package. * * This function is called before enabling the thermal vector in the local APIC * in order to ensure that @cpu has an associated HFI instance when it receives @@ -414,31 +414,31 @@ void intel_hfi_online(unsigned int cpu) { struct hfi_instance *hfi_instance; struct hfi_cpu_info *info; - u16 die_id; + u16 pkg_id; /* Nothing to do if hfi_instances are missing. */ if (!hfi_instances) return; /* - * Link @cpu to the HFI instance of its package/die. It does not + * Link @cpu to the HFI instance of its package. It does not * matter whether the instance has been initialized. */ info = &per_cpu(hfi_cpu_info, cpu); - die_id = topology_logical_die_id(cpu); + pkg_id = topology_logical_package_id(cpu); hfi_instance = info->hfi_instance; if (!hfi_instance) { - if (die_id >= max_hfi_instances) + if (pkg_id >= max_hfi_instances) return; - hfi_instance = &hfi_instances[die_id]; + hfi_instance = &hfi_instances[pkg_id]; info->hfi_instance = hfi_instance; } init_hfi_cpu_index(info); /* - * Now check if the HFI instance of the package/die of @cpu has been + * Now check if the HFI instance of the package of @cpu has been * initialized (by checking its header). In such case, all we have to * do is to add @cpu to this instance's cpumask and enable the instance * if needed. @@ -504,7 +504,7 @@ free_hw_table: * * On some processors, hardware remembers previous programming settings even * after being reprogrammed. Thus, keep HFI enabled even if all CPUs in the - * die/package of @cpu are offline. See note in intel_hfi_online(). + * package of @cpu are offline. See note in intel_hfi_online(). */ void intel_hfi_offline(unsigned int cpu) { @@ -674,9 +674,13 @@ void __init intel_hfi_init(void) if (hfi_parse_features()) return; - /* There is one HFI instance per die/package. */ - max_hfi_instances = topology_max_packages() * - topology_max_dies_per_package(); + /* + * Note: HFI resources are managed at the physical package scope. + * There could be platforms that enumerate packages as Linux dies. + * Special handling would be needed if this happens on an HFI-capable + * platform. + */ + max_hfi_instances = topology_max_packages(); /* * This allocation may fail. CPU hotplug callbacks must check diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c index f5be2c389351..fc326985796c 100644 --- a/drivers/thermal/intel/intel_pch_thermal.c +++ b/drivers/thermal/intel/intel_pch_thermal.c @@ -298,6 +298,11 @@ static int intel_pch_thermal_suspend_noirq(struct device *device) /* Get the PCH current temperature value */ pch_cur_temp = GET_PCH_TEMP(WPT_TEMP_TSR & readw(ptd->hw_base + WPT_TEMP)); + if (pch_cur_temp >= pch_thr_temp) + dev_warn(&ptd->pdev->dev, + "CPU-PCH current temp [%dC] higher than the threshold temp [%dC], S0ix might fail. Start cooling...\n", + pch_cur_temp, pch_thr_temp); + /* * If current PCH temperature is higher than configured PCH threshold * value, run some delay loop with sleep to let the current temperature diff --git a/drivers/thermal/intel/intel_soc_dts_thermal.c b/drivers/thermal/intel/intel_soc_dts_thermal.c index 9c825c6e1f38..718c6326eaf4 100644 --- a/drivers/thermal/intel/intel_soc_dts_thermal.c +++ b/drivers/thermal/intel/intel_soc_dts_thermal.c @@ -36,7 +36,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data) } static const struct x86_cpu_id soc_thermal_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, BYT_SOC_DTS_APIC_IRQ), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, BYT_SOC_DTS_APIC_IRQ), {} }; MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids); diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c index 5e8b7f34b395..c86654f28aa5 100644 --- a/drivers/thermal/intel/intel_tcc.c +++ b/drivers/thermal/intel/intel_tcc.c @@ -6,9 +6,171 @@ #include <linux/errno.h> #include <linux/intel_tcc.h> +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> #include <asm/msr.h> /** + * struct temp_masks - Bitmasks for temperature readings + * @tcc_offset: TCC offset in MSR_TEMPERATURE_TARGET + * @digital_readout: Digital readout in MSR_IA32_THERM_STATUS + * @pkg_digital_readout: Digital readout in MSR_IA32_PACKAGE_THERM_STATUS + * + * Bitmasks to extract the fields of the MSR_TEMPERATURE and IA32_[PACKAGE]_ + * THERM_STATUS registers for different processor models. + * + * The bitmask of TjMax is not included in this structure. It is always 0xff. + */ +struct temp_masks { + u32 tcc_offset; + u32 digital_readout; + u32 pkg_digital_readout; +}; + +#define TCC_MODEL_TEMP_MASKS(model, _tcc_offset, _digital_readout, \ + _pkg_digital_readout) \ + static const struct temp_masks temp_##model __initconst = { \ + .tcc_offset = _tcc_offset, \ + .digital_readout = _digital_readout, \ + .pkg_digital_readout = _pkg_digital_readout \ + } + +TCC_MODEL_TEMP_MASKS(nehalem, 0, 0x7f, 0x7f); +TCC_MODEL_TEMP_MASKS(haswell_x, 0xf, 0x7f, 0x7f); +TCC_MODEL_TEMP_MASKS(broadwell, 0x3f, 0x7f, 0x7f); +TCC_MODEL_TEMP_MASKS(goldmont, 0x7f, 0x7f, 0x7f); +TCC_MODEL_TEMP_MASKS(tigerlake, 0x3f, 0xff, 0xff); +TCC_MODEL_TEMP_MASKS(sapphirerapids, 0x3f, 0x7f, 0xff); + +/* Use these masks for processors not included in @tcc_cpu_ids. */ +static struct temp_masks intel_tcc_temp_masks __ro_after_init = { + .tcc_offset = 0x7f, + .digital_readout = 0xff, + .pkg_digital_readout = 0xff, +}; + +static const struct x86_cpu_id intel_tcc_cpu_ids[] __initconst = { + X86_MATCH_VFM(INTEL_CORE_YONAH, &temp_nehalem), + X86_MATCH_VFM(INTEL_CORE2_MEROM, &temp_nehalem), + X86_MATCH_VFM(INTEL_CORE2_MEROM_L, &temp_nehalem), + X86_MATCH_VFM(INTEL_CORE2_PENRYN, &temp_nehalem), + X86_MATCH_VFM(INTEL_CORE2_DUNNINGTON, &temp_nehalem), + X86_MATCH_VFM(INTEL_NEHALEM, &temp_nehalem), + X86_MATCH_VFM(INTEL_NEHALEM_G, &temp_nehalem), + X86_MATCH_VFM(INTEL_NEHALEM_EP, &temp_nehalem), + X86_MATCH_VFM(INTEL_NEHALEM_EX, &temp_nehalem), + X86_MATCH_VFM(INTEL_WESTMERE, &temp_nehalem), + X86_MATCH_VFM(INTEL_WESTMERE_EP, &temp_nehalem), + X86_MATCH_VFM(INTEL_WESTMERE_EX, &temp_nehalem), + X86_MATCH_VFM(INTEL_SANDYBRIDGE, &temp_nehalem), + X86_MATCH_VFM(INTEL_SANDYBRIDGE_X, &temp_nehalem), + X86_MATCH_VFM(INTEL_IVYBRIDGE, &temp_nehalem), + X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &temp_haswell_x), + X86_MATCH_VFM(INTEL_HASWELL, &temp_nehalem), + X86_MATCH_VFM(INTEL_HASWELL_X, &temp_haswell_x), + X86_MATCH_VFM(INTEL_HASWELL_L, &temp_nehalem), + X86_MATCH_VFM(INTEL_HASWELL_G, &temp_nehalem), + X86_MATCH_VFM(INTEL_BROADWELL, &temp_broadwell), + X86_MATCH_VFM(INTEL_BROADWELL_G, &temp_broadwell), + X86_MATCH_VFM(INTEL_BROADWELL_X, &temp_haswell_x), + X86_MATCH_VFM(INTEL_BROADWELL_D, &temp_haswell_x), + X86_MATCH_VFM(INTEL_SKYLAKE_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_SKYLAKE, &temp_broadwell), + X86_MATCH_VFM(INTEL_SKYLAKE_X, &temp_haswell_x), + X86_MATCH_VFM(INTEL_KABYLAKE_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_KABYLAKE, &temp_broadwell), + X86_MATCH_VFM(INTEL_COMETLAKE, &temp_broadwell), + X86_MATCH_VFM(INTEL_COMETLAKE_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_CANNONLAKE_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_ICELAKE_X, &temp_broadwell), + X86_MATCH_VFM(INTEL_ICELAKE_D, &temp_broadwell), + X86_MATCH_VFM(INTEL_ICELAKE, &temp_broadwell), + X86_MATCH_VFM(INTEL_ICELAKE_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &temp_broadwell), + X86_MATCH_VFM(INTEL_ROCKETLAKE, &temp_broadwell), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, &temp_tigerlake), + X86_MATCH_VFM(INTEL_TIGERLAKE, &temp_tigerlake), + X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &temp_sapphirerapids), + X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &temp_sapphirerapids), + X86_MATCH_VFM(INTEL_LAKEFIELD, &temp_broadwell), + X86_MATCH_VFM(INTEL_ALDERLAKE, &temp_tigerlake), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, &temp_tigerlake), + X86_MATCH_VFM(INTEL_RAPTORLAKE, &temp_tigerlake), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &temp_tigerlake), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &temp_tigerlake), + X86_MATCH_VFM(INTEL_ATOM_BONNELL, &temp_nehalem), + X86_MATCH_VFM(INTEL_ATOM_BONNELL_MID, &temp_nehalem), + X86_MATCH_VFM(INTEL_ATOM_SALTWELL, &temp_nehalem), + X86_MATCH_VFM(INTEL_ATOM_SALTWELL_MID, &temp_nehalem), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_AIRMONT_NP, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &temp_goldmont), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &temp_goldmont), + X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &temp_goldmont), + X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_TREMONT, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &temp_broadwell), + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &temp_tigerlake), + X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &temp_broadwell), + X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &temp_broadwell), + {} +}; + +static int __init intel_tcc_init(void) +{ + const struct x86_cpu_id *id; + + id = x86_match_cpu(intel_tcc_cpu_ids); + if (id) + memcpy(&intel_tcc_temp_masks, (const void *)id->driver_data, + sizeof(intel_tcc_temp_masks)); + + return 0; +} +/* + * Use subsys_initcall to ensure temperature bitmasks are initialized before + * the drivers that use this library. + */ +subsys_initcall(intel_tcc_init); + +/** + * intel_tcc_get_offset_mask() - Returns the bitmask to read TCC offset + * + * Get the model-specific bitmask to extract TCC_OFFSET from the MSR + * TEMPERATURE_TARGET register. If the mask is 0, it means the processor does + * not support TCC offset. + * + * Return: The model-specific bitmask for TCC offset. + */ +u32 intel_tcc_get_offset_mask(void) +{ + return intel_tcc_temp_masks.tcc_offset; +} +EXPORT_SYMBOL_NS(intel_tcc_get_offset_mask, INTEL_TCC); + +/** + * get_temp_mask() - Returns the model-specific bitmask for temperature + * + * @pkg: true: Package Thermal Sensor. false: Core Thermal Sensor. + * + * Get the model-specific bitmask to extract the temperature reading from the + * MSR_IA32_[PACKAGE]_THERM_STATUS register. + * + * Callers must check if the thermal status registers are supported. + * + * Return: The model-specific bitmask for temperature reading + */ +static u32 get_temp_mask(bool pkg) +{ + return pkg ? intel_tcc_temp_masks.pkg_digital_readout : + intel_tcc_temp_masks.digital_readout; +} + +/** * intel_tcc_get_tjmax() - returns the default TCC activation Temperature * @cpu: cpu that the MSR should be run on, nagative value means any cpu. * @@ -56,7 +218,7 @@ int intel_tcc_get_offset(int cpu) if (err) return err; - return (low >> 24) & 0x3f; + return (low >> 24) & intel_tcc_temp_masks.tcc_offset; } EXPORT_SYMBOL_NS_GPL(intel_tcc_get_offset, INTEL_TCC); @@ -76,7 +238,10 @@ int intel_tcc_set_offset(int cpu, int offset) u32 low, high; int err; - if (offset < 0 || offset > 0x3f) + if (!intel_tcc_temp_masks.tcc_offset) + return -ENODEV; + + if (offset < 0 || offset > intel_tcc_temp_masks.tcc_offset) return -EINVAL; if (cpu < 0) @@ -90,7 +255,7 @@ int intel_tcc_set_offset(int cpu, int offset) if (low & BIT(31)) return -EPERM; - low &= ~(0x3f << 24); + low &= ~(intel_tcc_temp_masks.tcc_offset << 24); low |= offset << 24; if (cpu < 0) @@ -113,8 +278,8 @@ EXPORT_SYMBOL_NS_GPL(intel_tcc_set_offset, INTEL_TCC); */ int intel_tcc_get_temp(int cpu, int *temp, bool pkg) { - u32 low, high; u32 msr = pkg ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS; + u32 low, high, mask; int tjmax, err; tjmax = intel_tcc_get_tjmax(cpu); @@ -132,7 +297,9 @@ int intel_tcc_get_temp(int cpu, int *temp, bool pkg) if (!(low & BIT(31))) return -ENODATA; - *temp = tjmax - ((low >> 16) & 0x7f); + mask = get_temp_mask(pkg); + + *temp = tjmax - ((low >> 16) & mask); return 0; } diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c index 6c392147e6d1..17110ffa80bb 100644 --- a/drivers/thermal/intel/intel_tcc_cooling.c +++ b/drivers/thermal/intel/intel_tcc_cooling.c @@ -20,7 +20,7 @@ static struct thermal_cooling_device *tcc_cdev; static int tcc_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { - *state = 0x3f; + *state = intel_tcc_get_offset_mask(); return 0; } @@ -49,21 +49,21 @@ static const struct thermal_cooling_device_ops tcc_cooling_ops = { }; static const struct x86_cpu_id tcc_ids[] __initconst = { - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, NULL), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL), + X86_MATCH_VFM(INTEL_SKYLAKE, NULL), + X86_MATCH_VFM(INTEL_SKYLAKE_L, NULL), + X86_MATCH_VFM(INTEL_KABYLAKE, NULL), + X86_MATCH_VFM(INTEL_KABYLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ICELAKE, NULL), + X86_MATCH_VFM(INTEL_ICELAKE_L, NULL), + X86_MATCH_VFM(INTEL_TIGERLAKE, NULL), + X86_MATCH_VFM(INTEL_TIGERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_COMETLAKE, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE, NULL), + X86_MATCH_VFM(INTEL_ALDERLAKE_L, NULL), + X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_P, NULL), + X86_MATCH_VFM(INTEL_RAPTORLAKE_S, NULL), {} }; |