diff options
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/Kconfig | 4 | ||||
-rw-r--r-- | drivers/acpi/acpi_apd.c | 1 | ||||
-rw-r--r-- | drivers/acpi/apei/erst.c | 72 | ||||
-rw-r--r-- | drivers/acpi/apei/ghes.c | 5 | ||||
-rw-r--r-- | drivers/acpi/arm64/Kconfig | 3 | ||||
-rw-r--r-- | drivers/acpi/arm64/Makefile | 1 | ||||
-rw-r--r-- | drivers/acpi/arm64/gtdt.c | 417 | ||||
-rw-r--r-- | drivers/acpi/arm64/iort.c | 230 | ||||
-rw-r--r-- | drivers/acpi/battery.c | 2 | ||||
-rw-r--r-- | drivers/acpi/bgrt.c | 6 | ||||
-rw-r--r-- | drivers/acpi/button.c | 5 | ||||
-rw-r--r-- | drivers/acpi/cppc_acpi.c | 2 | ||||
-rw-r--r-- | drivers/acpi/device_pm.c | 3 | ||||
-rw-r--r-- | drivers/acpi/glue.c | 11 | ||||
-rw-r--r-- | drivers/acpi/nfit/Kconfig | 12 | ||||
-rw-r--r-- | drivers/acpi/nfit/core.c | 233 | ||||
-rw-r--r-- | drivers/acpi/nfit/nfit.h | 4 | ||||
-rw-r--r-- | drivers/acpi/pci_mcfg.c | 14 | ||||
-rw-r--r-- | drivers/acpi/processor_driver.c | 10 | ||||
-rw-r--r-- | drivers/acpi/processor_throttling.c | 62 | ||||
-rw-r--r-- | drivers/acpi/property.c | 259 | ||||
-rw-r--r-- | drivers/acpi/scan.c | 11 | ||||
-rw-r--r-- | drivers/acpi/sleep.c | 28 | ||||
-rw-r--r-- | drivers/acpi/tables.c | 2 |
24 files changed, 1159 insertions, 238 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index de3b8fce5164..1ce52f84dc23 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -440,7 +440,7 @@ config ACPI_CUSTOM_METHOD config ACPI_BGRT bool "Boottime Graphics Resource Table support" - depends on EFI && X86 + depends on EFI && (X86 || ARM64) help This driver adds support for exposing the ACPI Boottime Graphics Resource Table, which allows the operating system to obtain @@ -511,7 +511,7 @@ config XPOWER_PMIC_OPREGION config BXT_WC_PMIC_OPREGION bool "ACPI operation region support for BXT WhiskeyCove PMIC" - depends on INTEL_SOC_PMIC + depends on INTEL_SOC_PMIC_BXTWC help This config adds ACPI operation region support for BXT WhiskeyCove PMIC. diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 8f57648f318b..fc6c416f8724 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -179,6 +179,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { #ifdef CONFIG_ARM64 { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, { "BRCM900D", APD_ADDR(vulcan_spi_desc) }, + { "CAV900D", APD_ADDR(vulcan_spi_desc) }, { "HISI0A21", APD_ADDR(hip07_i2c_desc) }, { "HISI0A22", APD_ADDR(hip08_i2c_desc) }, #endif diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index ec4f507b524f..2c462beee551 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -513,7 +513,7 @@ retry: if (i < erst_record_id_cache.len) goto retry; if (erst_record_id_cache.len >= erst_record_id_cache.size) { - int new_size, alloc_size; + int new_size; u64 *new_entries; new_size = erst_record_id_cache.size * 2; @@ -524,11 +524,7 @@ retry: pr_warn(FW_WARN "too many record IDs!\n"); return 0; } - alloc_size = new_size * sizeof(entries[0]); - if (alloc_size < PAGE_SIZE) - new_entries = kmalloc(alloc_size, GFP_KERNEL); - else - new_entries = vmalloc(alloc_size); + new_entries = kvmalloc(new_size * sizeof(entries[0]), GFP_KERNEL); if (!new_entries) return -ENOMEM; memcpy(new_entries, entries, @@ -925,15 +921,9 @@ static int erst_check_table(struct acpi_table_erst *erst_tab) static int erst_open_pstore(struct pstore_info *psi); static int erst_close_pstore(struct pstore_info *psi); -static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count, - struct timespec *time, char **buf, - bool *compressed, ssize_t *ecc_notice_size, - struct pstore_info *psi); -static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason, - u64 *id, unsigned int part, int count, bool compressed, - size_t size, struct pstore_info *psi); -static int erst_clearer(enum pstore_type_id type, u64 id, int count, - struct timespec time, struct pstore_info *psi); +static ssize_t erst_reader(struct pstore_record *record); +static int erst_writer(struct pstore_record *record); +static int erst_clearer(struct pstore_record *record); static struct pstore_info erst_info = { .owner = THIS_MODULE, @@ -986,10 +976,7 @@ static int erst_close_pstore(struct pstore_info *psi) return 0; } -static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count, - struct timespec *time, char **buf, - bool *compressed, ssize_t *ecc_notice_size, - struct pstore_info *psi) +static ssize_t erst_reader(struct pstore_record *record) { int rc; ssize_t len = 0; @@ -1027,42 +1014,40 @@ skip: if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0) goto skip; - *buf = kmalloc(len, GFP_KERNEL); - if (*buf == NULL) { + record->buf = kmalloc(len, GFP_KERNEL); + if (record->buf == NULL) { rc = -ENOMEM; goto out; } - memcpy(*buf, rcd->data, len - sizeof(*rcd)); - *id = record_id; - *compressed = false; - *ecc_notice_size = 0; + memcpy(record->buf, rcd->data, len - sizeof(*rcd)); + record->id = record_id; + record->compressed = false; + record->ecc_notice_size = 0; if (uuid_le_cmp(rcd->sec_hdr.section_type, CPER_SECTION_TYPE_DMESG_Z) == 0) { - *type = PSTORE_TYPE_DMESG; - *compressed = true; + record->type = PSTORE_TYPE_DMESG; + record->compressed = true; } else if (uuid_le_cmp(rcd->sec_hdr.section_type, CPER_SECTION_TYPE_DMESG) == 0) - *type = PSTORE_TYPE_DMESG; + record->type = PSTORE_TYPE_DMESG; else if (uuid_le_cmp(rcd->sec_hdr.section_type, CPER_SECTION_TYPE_MCE) == 0) - *type = PSTORE_TYPE_MCE; + record->type = PSTORE_TYPE_MCE; else - *type = PSTORE_TYPE_UNKNOWN; + record->type = PSTORE_TYPE_UNKNOWN; if (rcd->hdr.validation_bits & CPER_VALID_TIMESTAMP) - time->tv_sec = rcd->hdr.timestamp; + record->time.tv_sec = rcd->hdr.timestamp; else - time->tv_sec = 0; - time->tv_nsec = 0; + record->time.tv_sec = 0; + record->time.tv_nsec = 0; out: kfree(rcd); return (rc < 0) ? rc : (len - sizeof(*rcd)); } -static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason, - u64 *id, unsigned int part, int count, bool compressed, - size_t size, struct pstore_info *psi) +static int erst_writer(struct pstore_record *record) { struct cper_pstore_record *rcd = (struct cper_pstore_record *) (erst_info.buf - sizeof(*rcd)); @@ -1077,21 +1062,21 @@ static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason, /* timestamp valid. platform_id, partition_id are invalid */ rcd->hdr.validation_bits = CPER_VALID_TIMESTAMP; rcd->hdr.timestamp = get_seconds(); - rcd->hdr.record_length = sizeof(*rcd) + size; + rcd->hdr.record_length = sizeof(*rcd) + record->size; rcd->hdr.creator_id = CPER_CREATOR_PSTORE; rcd->hdr.notification_type = CPER_NOTIFY_MCE; rcd->hdr.record_id = cper_next_record_id(); rcd->hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR; rcd->sec_hdr.section_offset = sizeof(*rcd); - rcd->sec_hdr.section_length = size; + rcd->sec_hdr.section_length = record->size; rcd->sec_hdr.revision = CPER_SEC_REV; /* fru_id and fru_text is invalid */ rcd->sec_hdr.validation_bits = 0; rcd->sec_hdr.flags = CPER_SEC_PRIMARY; - switch (type) { + switch (record->type) { case PSTORE_TYPE_DMESG: - if (compressed) + if (record->compressed) rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG_Z; else rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG; @@ -1105,15 +1090,14 @@ static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason, rcd->sec_hdr.section_severity = CPER_SEV_FATAL; ret = erst_write(&rcd->hdr); - *id = rcd->hdr.record_id; + record->id = rcd->hdr.record_id; return ret; } -static int erst_clearer(enum pstore_type_id type, u64 id, int count, - struct timespec time, struct pstore_info *psi) +static int erst_clearer(struct pstore_record *record) { - return erst_clear(id); + return erst_clear(record->id); } static int __init erst_init(void) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 79b3c9c5a3bc..d0855c09f32f 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1005,9 +1005,8 @@ static int ghes_probe(struct platform_device *ghes_dev) switch (generic->notify.type) { case ACPI_HEST_NOTIFY_POLLED: - ghes->timer.function = ghes_poll_func; - ghes->timer.data = (unsigned long)ghes; - init_timer_deferrable(&ghes->timer); + setup_deferrable_timer(&ghes->timer, ghes_poll_func, + (unsigned long)ghes); ghes_add_timer(ghes); break; case ACPI_HEST_NOTIFY_EXTERNAL: diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig index 4616da4c15be..5a6f80fce0d6 100644 --- a/drivers/acpi/arm64/Kconfig +++ b/drivers/acpi/arm64/Kconfig @@ -4,3 +4,6 @@ config ACPI_IORT bool + +config ACPI_GTDT + bool diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 72331f2ce0e9..1017def2ea12 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_ACPI_IORT) += iort.o +obj-$(CONFIG_ACPI_GTDT) += gtdt.o diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c new file mode 100644 index 000000000000..597a737d538f --- /dev/null +++ b/drivers/acpi/arm64/gtdt.c @@ -0,0 +1,417 @@ +/* + * ARM Specific GTDT table Support + * + * Copyright (C) 2016, Linaro Ltd. + * Author: Daniel Lezcano <daniel.lezcano@linaro.org> + * Fu Wei <fu.wei@linaro.org> + * Hanjun Guo <hanjun.guo@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/acpi.h> +#include <linux/init.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> + +#include <clocksource/arm_arch_timer.h> + +#undef pr_fmt +#define pr_fmt(fmt) "ACPI GTDT: " fmt + +/** + * struct acpi_gtdt_descriptor - Store the key info of GTDT for all functions + * @gtdt: The pointer to the struct acpi_table_gtdt of GTDT table. + * @gtdt_end: The pointer to the end of GTDT table. + * @platform_timer: The pointer to the start of Platform Timer Structure + * + * The struct store the key info of GTDT table, it should be initialized by + * acpi_gtdt_init. + */ +struct acpi_gtdt_descriptor { + struct acpi_table_gtdt *gtdt; + void *gtdt_end; + void *platform_timer; +}; + +static struct acpi_gtdt_descriptor acpi_gtdt_desc __initdata; + +static inline void *next_platform_timer(void *platform_timer) +{ + struct acpi_gtdt_header *gh = platform_timer; + + platform_timer += gh->length; + if (platform_timer < acpi_gtdt_desc.gtdt_end) + return platform_timer; + + return NULL; +} + +#define for_each_platform_timer(_g) \ + for (_g = acpi_gtdt_desc.platform_timer; _g; \ + _g = next_platform_timer(_g)) + +static inline bool is_timer_block(void *platform_timer) +{ + struct acpi_gtdt_header *gh = platform_timer; + + return gh->type == ACPI_GTDT_TYPE_TIMER_BLOCK; +} + +static inline bool is_non_secure_watchdog(void *platform_timer) +{ + struct acpi_gtdt_header *gh = platform_timer; + struct acpi_gtdt_watchdog *wd = platform_timer; + + if (gh->type != ACPI_GTDT_TYPE_WATCHDOG) + return false; + + return !(wd->timer_flags & ACPI_GTDT_WATCHDOG_SECURE); +} + +static int __init map_gt_gsi(u32 interrupt, u32 flags) +{ + int trigger, polarity; + + trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE + : ACPI_LEVEL_SENSITIVE; + + polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW + : ACPI_ACTIVE_HIGH; + + return acpi_register_gsi(NULL, interrupt, trigger, polarity); +} + +/** + * acpi_gtdt_map_ppi() - Map the PPIs of per-cpu arch_timer. + * @type: the type of PPI. + * + * Note: Secure state is not managed by the kernel on ARM64 systems. + * So we only handle the non-secure timer PPIs, + * ARCH_TIMER_PHYS_SECURE_PPI is treated as invalid type. + * + * Return: the mapped PPI value, 0 if error. + */ +int __init acpi_gtdt_map_ppi(int type) +{ + struct acpi_table_gtdt *gtdt = acpi_gtdt_desc.gtdt; + + switch (type) { + case ARCH_TIMER_PHYS_NONSECURE_PPI: + return map_gt_gsi(gtdt->non_secure_el1_interrupt, + gtdt->non_secure_el1_flags); + case ARCH_TIMER_VIRT_PPI: + return map_gt_gsi(gtdt->virtual_timer_interrupt, + gtdt->virtual_timer_flags); + + case ARCH_TIMER_HYP_PPI: + return map_gt_gsi(gtdt->non_secure_el2_interrupt, + gtdt->non_secure_el2_flags); + default: + pr_err("Failed to map timer interrupt: invalid type.\n"); + } + + return 0; +} + +/** + * acpi_gtdt_c3stop() - Got c3stop info from GTDT according to the type of PPI. + * @type: the type of PPI. + * + * Return: true if the timer HW state is lost when a CPU enters an idle state, + * false otherwise + */ +bool __init acpi_gtdt_c3stop(int type) +{ + struct acpi_table_gtdt *gtdt = acpi_gtdt_desc.gtdt; + + switch (type) { + case ARCH_TIMER_PHYS_NONSECURE_PPI: + return !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON); + + case ARCH_TIMER_VIRT_PPI: + return !(gtdt->virtual_timer_flags & ACPI_GTDT_ALWAYS_ON); + + case ARCH_TIMER_HYP_PPI: + return !(gtdt->non_secure_el2_flags & ACPI_GTDT_ALWAYS_ON); + + default: + pr_err("Failed to get c3stop info: invalid type.\n"); + } + + return false; +} + +/** + * acpi_gtdt_init() - Get the info of GTDT table to prepare for further init. + * @table: The pointer to GTDT table. + * @platform_timer_count: It points to a integer variable which is used + * for storing the number of platform timers. + * This pointer could be NULL, if the caller + * doesn't need this info. + * + * Return: 0 if success, -EINVAL if error. + */ +int __init acpi_gtdt_init(struct acpi_table_header *table, + int *platform_timer_count) +{ + void *platform_timer; + struct acpi_table_gtdt *gtdt; + + gtdt = container_of(table, struct acpi_table_gtdt, header); + acpi_gtdt_desc.gtdt = gtdt; + acpi_gtdt_desc.gtdt_end = (void *)table + table->length; + acpi_gtdt_desc.platform_timer = NULL; + if (platform_timer_count) + *platform_timer_count = 0; + + if (table->revision < 2) { + pr_warn("Revision:%d doesn't support Platform Timers.\n", + table->revision); + return 0; + } + + if (!gtdt->platform_timer_count) { + pr_debug("No Platform Timer.\n"); + return 0; + } + + platform_timer = (void *)gtdt + gtdt->platform_timer_offset; + if (platform_timer < (void *)table + sizeof(struct acpi_table_gtdt)) { + pr_err(FW_BUG "invalid timer data.\n"); + return -EINVAL; + } + acpi_gtdt_desc.platform_timer = platform_timer; + if (platform_timer_count) + *platform_timer_count = gtdt->platform_timer_count; + + return 0; +} + +static int __init gtdt_parse_timer_block(struct acpi_gtdt_timer_block *block, + struct arch_timer_mem *timer_mem) +{ + int i; + struct arch_timer_mem_frame *frame; + struct acpi_gtdt_timer_entry *gtdt_frame; + + if (!block->timer_count) { + pr_err(FW_BUG "GT block present, but frame count is zero."); + return -ENODEV; + } + + if (block->timer_count > ARCH_TIMER_MEM_MAX_FRAMES) { + pr_err(FW_BUG "GT block lists %d frames, ACPI spec only allows 8\n", + block->timer_count); + return -EINVAL; + } + + timer_mem->cntctlbase = (phys_addr_t)block->block_address; + /* + * The CNTCTLBase frame is 4KB (register offsets 0x000 - 0xFFC). + * See ARM DDI 0487A.k_iss10775, page I1-5129, Table I1-3 + * "CNTCTLBase memory map". + */ + timer_mem->size = SZ_4K; + + gtdt_frame = (void *)block + block->timer_offset; + if (gtdt_frame + block->timer_count != (void *)block + block->header.length) + return -EINVAL; + + /* + * Get the GT timer Frame data for every GT Block Timer + */ + for (i = 0; i < block->timer_count; i++, gtdt_frame++) { + if (gtdt_frame->common_flags & ACPI_GTDT_GT_IS_SECURE_TIMER) + continue; + if (gtdt_frame->frame_number >= ARCH_TIMER_MEM_MAX_FRAMES || + !gtdt_frame->base_address || !gtdt_frame->timer_interrupt) + goto error; + + frame = &timer_mem->frame[gtdt_frame->frame_number]; + + /* duplicate frame */ + if (frame->valid) + goto error; + + frame->phys_irq = map_gt_gsi(gtdt_frame->timer_interrupt, + gtdt_frame->timer_flags); + if (frame->phys_irq <= 0) { + pr_warn("failed to map physical timer irq in frame %d.\n", + gtdt_frame->frame_number); + goto error; + } + + if (gtdt_frame->virtual_timer_interrupt) { + frame->virt_irq = + map_gt_gsi(gtdt_frame->virtual_timer_interrupt, + gtdt_frame->virtual_timer_flags); + if (frame->virt_irq <= 0) { + pr_warn("failed to map virtual timer irq in frame %d.\n", + gtdt_frame->frame_number); + goto error; + } + } else { + pr_debug("virtual timer in frame %d not implemented.\n", + gtdt_frame->frame_number); + } + + frame->cntbase = gtdt_frame->base_address; + /* + * The CNTBaseN frame is 4KB (register offsets 0x000 - 0xFFC). + * See ARM DDI 0487A.k_iss10775, page I1-5130, Table I1-4 + * "CNTBaseN memory map". + */ + frame->size = SZ_4K; + frame->valid = true; + } + + return 0; + +error: + do { + if (gtdt_frame->common_flags & ACPI_GTDT_GT_IS_SECURE_TIMER || + gtdt_frame->frame_number >= ARCH_TIMER_MEM_MAX_FRAMES) + continue; + + frame = &timer_mem->frame[gtdt_frame->frame_number]; + + if (frame->phys_irq > 0) + acpi_unregister_gsi(gtdt_frame->timer_interrupt); + frame->phys_irq = 0; + + if (frame->virt_irq > 0) + acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt); + frame->virt_irq = 0; + } while (i-- >= 0 && gtdt_frame--); + + return -EINVAL; +} + +/** + * acpi_arch_timer_mem_init() - Get the info of all GT blocks in GTDT table. + * @timer_mem: The pointer to the array of struct arch_timer_mem for returning + * the result of parsing. The element number of this array should + * be platform_timer_count(the total number of platform timers). + * @timer_count: It points to a integer variable which is used for storing the + * number of GT blocks we have parsed. + * + * Return: 0 if success, -EINVAL/-ENODEV if error. + */ +int __init acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, + int *timer_count) +{ + int ret; + void *platform_timer; + + *timer_count = 0; + for_each_platform_timer(platform_timer) { + if (is_timer_block(platform_timer)) { + ret = gtdt_parse_timer_block(platform_timer, timer_mem); + if (ret) + return ret; + timer_mem++; + (*timer_count)++; + } + } + + if (*timer_count) + pr_info("found %d memory-mapped timer block(s).\n", + *timer_count); + + return 0; +} + +/* + * Initialize a SBSA generic Watchdog platform device info from GTDT + */ +static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd, + int index) +{ + struct platform_device *pdev; + int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags); + + /* + * According to SBSA specification the size of refresh and control + * frames of SBSA Generic Watchdog is SZ_4K(Offset 0x000 – 0xFFF). + */ + struct resource res[] = { + DEFINE_RES_MEM(wd->control_frame_address, SZ_4K), + DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K), + DEFINE_RES_IRQ(irq), + }; + int nr_res = ARRAY_SIZE(res); + + pr_debug("found a Watchdog (0x%llx/0x%llx gsi:%u flags:0x%x).\n", + wd->refresh_frame_address, wd->control_frame_address, + wd->timer_interrupt, wd->timer_flags); + + if (!(wd->refresh_frame_address && wd->control_frame_address)) { + pr_err(FW_BUG "failed to get the Watchdog base address.\n"); + acpi_unregister_gsi(wd->timer_interrupt); + return -EINVAL; + } + + if (irq <= 0) { + pr_warn("failed to map the Watchdog interrupt.\n"); + nr_res--; + } + + /* + * Add a platform device named "sbsa-gwdt" to match the platform driver. + * "sbsa-gwdt": SBSA(Server Base System Architecture) Generic Watchdog + * The platform driver can get device info below by matching this name. + */ + pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res); + if (IS_ERR(pdev)) { + acpi_unregister_gsi(wd->timer_interrupt); + return PTR_ERR(pdev); + } + + return 0; +} + +static int __init gtdt_sbsa_gwdt_init(void) +{ + void *platform_timer; + struct acpi_table_header *table; + int ret, timer_count, gwdt_count = 0; + + if (acpi_disabled) + return 0; + + if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_GTDT, 0, &table))) + return -EINVAL; + + /* + * Note: Even though the global variable acpi_gtdt_desc has been + * initialized by acpi_gtdt_init() while initializing the arch timers, + * when we call this function to get SBSA watchdogs info from GTDT, the + * pointers stashed in it are stale (since they are early temporary + * mappings carried out before acpi_permanent_mmap is set) and we need + * to re-initialize them with permanent mapped pointer values to let the + * GTDT parsing possible. + */ + ret = acpi_gtdt_init(table, &timer_count); + if (ret || !timer_count) + return ret; + + for_each_platform_timer(platform_timer) { + if (is_non_secure_watchdog(platform_timer)) { + ret = gtdt_import_sbsa_gwdt(platform_timer, gwdt_count); + if (ret) + break; + gwdt_count++; + } + } + + if (gwdt_count) + pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count); + + return ret; +} + +device_initcall(gtdt_sbsa_gwdt_init); diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 4a5bb967250b..c5fecf97ee2f 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -225,7 +225,7 @@ static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type, if (iort_node->type == type && ACPI_SUCCESS(callback(iort_node, context))) - return iort_node; + return iort_node; iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node, iort_node->length); @@ -253,17 +253,15 @@ static acpi_status iort_match_node_callback(struct acpi_iort_node *node, void *context) { struct device *dev = context; - acpi_status status; + acpi_status status = AE_NOT_FOUND; if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_device *adev = to_acpi_device_node(dev->fwnode); struct acpi_iort_named_component *ncomp; - if (!adev) { - status = AE_NOT_FOUND; + if (!adev) goto out; - } status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); if (ACPI_FAILURE(status)) { @@ -289,8 +287,6 @@ static acpi_status iort_match_node_callback(struct acpi_iort_node *node, */ status = pci_rc->pci_segment_number == pci_domain_nr(bus) ? AE_OK : AE_NOT_FOUND; - } else { - status = AE_NOT_FOUND; } out: return status; @@ -322,8 +318,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, - u32 *id_out, u8 type_mask, - int index) + u32 *id_out, int index) { struct acpi_iort_node *parent; struct acpi_iort_id_mapping *map; @@ -345,9 +340,6 @@ struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table, map->output_reference); - if (!(IORT_TYPE_MASK(parent->type) & type_mask)) - return NULL; - if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT || node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { @@ -359,11 +351,11 @@ struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node, return NULL; } -static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node, - u32 rid_in, u32 *rid_out, - u8 type_mask) +static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, + u32 id_in, u32 *id_out, + u8 type_mask) { - u32 rid = rid_in; + u32 id = id_in; /* Parse the ID mapping tree to find specified node type */ while (node) { @@ -371,8 +363,8 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node, int i; if (IORT_TYPE_MASK(node->type) & type_mask) { - if (rid_out) - *rid_out = rid; + if (id_out) + *id_out = id; return node; } @@ -389,9 +381,9 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node, goto fail_map; } - /* Do the RID translation */ + /* Do the ID translation */ for (i = 0; i < node->mapping_count; i++, map++) { - if (!iort_id_map(map, node->type, rid, &rid)) + if (!iort_id_map(map, node->type, id, &id)) break; } @@ -403,13 +395,41 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node, } fail_map: - /* Map input RID to output RID unchanged on mapping failure*/ - if (rid_out) - *rid_out = rid_in; + /* Map input ID to output ID unchanged on mapping failure */ + if (id_out) + *id_out = id_in; return NULL; } +static +struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node, + u32 *id_out, u8 type_mask, + int index) +{ + struct acpi_iort_node *parent; + u32 id; + + /* step 1: retrieve the initial dev id */ + parent = iort_node_get_id(node, &id, index); + if (!parent) + return NULL; + + /* + * optional step 2: map the initial dev id if its parent is not + * the target type we want, map it again for the use cases such + * as NC (named component) -> SMMU -> ITS. If the type is matched, + * return the initial dev id and its parent pointer directly. + */ + if (!(IORT_TYPE_MASK(parent->type) & type_mask)) + parent = iort_node_map_id(parent, id, id_out, type_mask); + else + if (id_out) + *id_out = id; + + return parent; +} + static struct acpi_iort_node *iort_find_dev_node(struct device *dev) { struct pci_bus *pbus; @@ -443,13 +463,38 @@ u32 iort_msi_map_rid(struct device *dev, u32 req_id) if (!node) return req_id; - iort_node_map_rid(node, req_id, &dev_id, IORT_MSI_TYPE); + iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); return dev_id; } /** + * iort_pmsi_get_dev_id() - Get the device id for a device + * @dev: The device for which the mapping is to be done. + * @dev_id: The device ID found. + * + * Returns: 0 for successful find a dev id, -ENODEV on error + */ +int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id) +{ + int i; + struct acpi_iort_node *node; + + node = iort_find_dev_node(dev); + if (!node) + return -ENODEV; + + for (i = 0; i < node->mapping_count; i++) { + if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i)) + return 0; + } + + return -ENODEV; +} + +/** * iort_dev_find_its_id() - Find the ITS identifier for a device * @dev: The device. + * @req_id: Device's requester ID * @idx: Index of the ITS identifier list. * @its_id: ITS identifier. * @@ -465,7 +510,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, if (!node) return -ENXIO; - node = iort_node_map_rid(node, req_id, NULL, IORT_MSI_TYPE); + node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); if (!node) return -ENXIO; @@ -503,6 +548,56 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); } +/** + * iort_get_platform_device_domain() - Find MSI domain related to a + * platform device + * @dev: the dev pointer associated with the platform device + * + * Returns: the MSI domain for this device, NULL otherwise + */ +static struct irq_domain *iort_get_platform_device_domain(struct device *dev) +{ + struct acpi_iort_node *node, *msi_parent; + struct fwnode_handle *iort_fwnode; + struct acpi_iort_its_group *its; + int i; + + /* find its associated iort node */ + node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, + iort_match_node_callback, dev); + if (!node) + return NULL; + + /* then find its msi parent node */ + for (i = 0; i < node->mapping_count; i++) { + msi_parent = iort_node_map_platform_id(node, NULL, + IORT_MSI_TYPE, i); + if (msi_parent) + break; + } + + if (!msi_parent) + return NULL; + + /* Move to ITS specific data */ + its = (struct acpi_iort_its_group *)msi_parent->node_data; + + iort_fwnode = iort_find_domain_token(its->identifiers[0]); + if (!iort_fwnode) + return NULL; + + return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); +} + +void acpi_configure_pmsi_domain(struct device *dev) +{ + struct irq_domain *msi_domain; + + msi_domain = iort_get_platform_device_domain(dev); + if (msi_domain) + dev_set_msi_domain(dev, msi_domain); +} + static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) { u32 *rid = data; @@ -523,6 +618,46 @@ static int arm_smmu_iort_xlate(struct device *dev, u32 streamid, return ret; } +static inline bool iort_iommu_driver_enabled(u8 type) +{ + switch (type) { + case ACPI_IORT_NODE_SMMU_V3: + return IS_BUILTIN(CONFIG_ARM_SMMU_V3); + case ACPI_IORT_NODE_SMMU: + return IS_BUILTIN(CONFIG_ARM_SMMU); + default: + pr_warn("IORT node type %u does not describe an SMMU\n", type); + return false; + } +} + +#ifdef CONFIG_IOMMU_API +static inline +const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) +{ + return (fwspec && fwspec->ops) ? fwspec->ops : NULL; +} + +static inline +int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) +{ + int err = 0; + + if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus && + !dev->iommu_group) + err = ops->add_device(dev); + + return err; +} +#else +static inline +const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec) +{ return NULL; } +static inline +int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev) +{ return 0; } +#endif + static const struct iommu_ops *iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, u32 streamid) @@ -531,14 +666,31 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev, int ret = -ENODEV; struct fwnode_handle *iort_fwnode; + /* + * If we already translated the fwspec there + * is nothing left to do, return the iommu_ops. + */ + ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); + if (ops) + return ops; + if (node) { iort_fwnode = iort_get_fwnode(node); if (!iort_fwnode) return NULL; ops = iommu_ops_from_fwnode(iort_fwnode); + /* + * If the ops look-up fails, this means that either + * the SMMU drivers have not been probed yet or that + * the SMMU drivers are not built in the kernel; + * Depending on whether the SMMU drivers are built-in + * in the kernel or not, defer the IOMMU configuration + * or just abort it. + */ if (!ops) - return NULL; + return iort_iommu_driver_enabled(node->type) ? + ERR_PTR(-EPROBE_DEFER) : NULL; ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops); } @@ -581,6 +733,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) struct acpi_iort_node *node, *parent; const struct iommu_ops *ops = NULL; u32 streamid = 0; + int err; if (dev_is_pci(dev)) { struct pci_bus *bus = to_pci_dev(dev)->bus; @@ -594,8 +747,8 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) if (!node) return NULL; - parent = iort_node_map_rid(node, rid, &streamid, - IORT_IOMMU_TYPE); + parent = iort_node_map_id(node, rid, &streamid, + IORT_IOMMU_TYPE); ops = iort_iommu_xlate(dev, parent, streamid); @@ -607,17 +760,28 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) if (!node) return NULL; - parent = iort_node_get_id(node, &streamid, - IORT_IOMMU_TYPE, i++); + parent = iort_node_map_platform_id(node, &streamid, + IORT_IOMMU_TYPE, i++); while (parent) { ops = iort_iommu_xlate(dev, parent, streamid); + if (IS_ERR_OR_NULL(ops)) + return ops; - parent = iort_node_get_id(node, &streamid, - IORT_IOMMU_TYPE, i++); + parent = iort_node_map_platform_id(node, &streamid, + IORT_IOMMU_TYPE, + i++); } } + /* + * If we have reason to believe the IOMMU driver missed the initial + * add_device callback for dev, replay it to get things in order. + */ + err = iort_add_device_replay(ops, dev); + if (err) + ops = ERR_PTR(err); + return ops; } @@ -956,6 +1120,4 @@ void __init acpi_iort_init(void) } iort_init_platform_devices(); - - acpi_probe_device_table(iort); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index d42eeef9d928..a9a9ab3399d4 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume) if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && (battery->capacity_now <= battery->alarm))) - pm_wakeup_event(&battery->device->dev, 0); + pm_wakeup_hard_event(&battery->device->dev); return result; } diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c index ca28aa572aa9..df1c629205e7 100644 --- a/drivers/acpi/bgrt.c +++ b/drivers/acpi/bgrt.c @@ -81,6 +81,12 @@ static struct attribute_group bgrt_attribute_group = { .bin_attrs = bgrt_bin_attributes, }; +int __init acpi_parse_bgrt(struct acpi_table_header *table) +{ + efi_bgrt_init(table); + return 0; +} + static int __init bgrt_init(void) { int ret; diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 668137e4a069..b7c2a06963d6 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -216,7 +216,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state) } if (state) - pm_wakeup_event(&device->dev, 0); + pm_wakeup_hard_event(&device->dev); ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); if (ret == NOTIFY_DONE) @@ -398,7 +398,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) } else { int keycode; - pm_wakeup_event(&device->dev, 0); + pm_wakeup_hard_event(&device->dev); if (button->suspended) break; @@ -530,6 +530,7 @@ static int acpi_button_add(struct acpi_device *device) lid_device = device; } + device_init_wakeup(&device->dev, true); printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); return 0; diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 6cbe6036da99..e5b47f032d9a 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -95,7 +95,7 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); /* pcc mapped address + header size + offset within PCC subspace */ #define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs)) -/* Check if a CPC regsiter is in PCC */ +/* Check if a CPC register is in PCC */ #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ (cpc)->cpc_entry.reg.space_id == \ ACPI_ADR_SPACE_PLATFORM_COMM) diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 993fd31394c8..798d5003a039 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -24,6 +24,7 @@ #include <linux/pm_qos.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/suspend.h> #include "internal.h" @@ -399,7 +400,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used) mutex_lock(&acpi_pm_notifier_lock); if (adev->wakeup.flags.notifier_present) { - __pm_wakeup_event(adev->wakeup.ws, 0); + pm_wakeup_ws_event(adev->wakeup.ws, 0, true); if (adev->wakeup.context.work.func) queue_pm_work(&adev->wakeup.context.work); } diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index edc8663b5db3..3be1433853bf 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -6,6 +6,8 @@ * * This file is released under the GPLv2. */ + +#include <linux/acpi_iort.h> #include <linux/export.h> #include <linux/init.h> #include <linux/list.h> @@ -14,6 +16,7 @@ #include <linux/rwsem.h> #include <linux/acpi.h> #include <linux/dma-mapping.h> +#include <linux/platform_device.h> #include "internal.h" @@ -176,7 +179,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; - enum dev_dma_attr attr; if (has_acpi_companion(dev)) { if (acpi_dev) { @@ -233,10 +235,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) if (!has_acpi_companion(dev)) ACPI_COMPANION_SET(dev, acpi_dev); - attr = acpi_get_dma_attr(acpi_dev); - if (attr != DEV_DMA_NOT_SUPPORTED) - acpi_dma_configure(dev, attr); - acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, physical_node_name); @@ -322,6 +320,9 @@ static int acpi_platform_notify(struct device *dev) if (!adev) goto out; + if (dev->bus == &platform_bus_type) + acpi_configure_pmsi_domain(dev); + if (type && type->setup) type->setup(dev); else if (adev->handler && adev->handler->bind) diff --git a/drivers/acpi/nfit/Kconfig b/drivers/acpi/nfit/Kconfig index dd0d53c52552..6d3351452ea2 100644 --- a/drivers/acpi/nfit/Kconfig +++ b/drivers/acpi/nfit/Kconfig @@ -12,15 +12,3 @@ config ACPI_NFIT To compile this driver as a module, choose M here: the module will be called nfit. - -config ACPI_NFIT_DEBUG - bool "NFIT DSM debug" - depends on ACPI_NFIT - depends on DYNAMIC_DEBUG - default n - help - Enabling this option causes the nfit driver to dump the - input and output buffers of _DSM operations on the ACPI0012 - device and its children. This can be very verbose, so leave - it disabled unless you are debugging a hardware / firmware - issue. diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index c8ea9d698cd0..656acb5d7166 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -49,7 +49,16 @@ MODULE_PARM_DESC(scrub_overflow_abort, static bool disable_vendor_specific; module_param(disable_vendor_specific, bool, S_IRUGO); MODULE_PARM_DESC(disable_vendor_specific, - "Limit commands to the publicly specified set\n"); + "Limit commands to the publicly specified set"); + +static unsigned long override_dsm_mask; +module_param(override_dsm_mask, ulong, S_IRUGO); +MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions"); + +static int default_dsm_family = -1; +module_param(default_dsm_family, int, S_IRUGO); +MODULE_PARM_DESC(default_dsm_family, + "Try this DSM type first when identifying NVDIMM family"); LIST_HEAD(acpi_descs); DEFINE_MUTEX(acpi_desc_lock); @@ -175,14 +184,29 @@ static int xlat_bus_status(void *buf, unsigned int cmd, u32 status) return 0; } +static int xlat_nvdimm_status(void *buf, unsigned int cmd, u32 status) +{ + switch (cmd) { + case ND_CMD_GET_CONFIG_SIZE: + if (status >> 16 & ND_CONFIG_LOCKED) + return -EACCES; + break; + default: + break; + } + + /* all other non-zero status results in an error */ + if (status) + return -EIO; + return 0; +} + static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd, u32 status) { if (!nvdimm) return xlat_bus_status(buf, cmd, status); - if (status) - return -EIO; - return 0; + return xlat_nvdimm_status(buf, cmd, status); } int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, @@ -259,14 +283,11 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, in_buf.buffer.length = call_pkg->nd_size_in; } - if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { - dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n", - __func__, dimm_name, cmd, func, - in_buf.buffer.length); - print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, + dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n", + __func__, dimm_name, cmd, func, in_buf.buffer.length); + print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, in_buf.buffer.pointer, min_t(u32, 256, in_buf.buffer.length), true); - } out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj); if (!out_obj) { @@ -298,13 +319,11 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, goto out; } - if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { - dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, - dimm_name, cmd_name, out_obj->buffer.length); - print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, - 4, out_obj->buffer.pointer, min_t(u32, 128, - out_obj->buffer.length), true); - } + dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, dimm_name, + cmd_name, out_obj->buffer.length); + print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, + out_obj->buffer.pointer, + min_t(u32, 128, out_obj->buffer.length), true); for (i = 0, offset = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, @@ -448,9 +467,9 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc, INIT_LIST_HEAD(&nfit_memdev->list); memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); - dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", + dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d flags: %#x\n", __func__, memdev->device_handle, memdev->range_index, - memdev->region_index); + memdev->region_index, memdev->flags); return true; } @@ -729,28 +748,38 @@ static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, } } -static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, +static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_system_address *spa) { struct nfit_mem *nfit_mem, *found; struct nfit_memdev *nfit_memdev; - int type = nfit_spa_type(spa); + int type = spa ? nfit_spa_type(spa) : 0; switch (type) { case NFIT_SPA_DCR: case NFIT_SPA_PM: break; default: - return 0; + if (spa) + return 0; } + /* + * This loop runs in two modes, when a dimm is mapped the loop + * adds memdev associations to an existing dimm, or creates a + * dimm. In the unmapped dimm case this loop sweeps for memdev + * instances with an invalid / zero range_index and adds those + * dimms without spa associations. + */ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { struct nfit_flush *nfit_flush; struct nfit_dcr *nfit_dcr; u32 device_handle; u16 dcr; - if (nfit_memdev->memdev->range_index != spa->range_index) + if (spa && nfit_memdev->memdev->range_index != spa->range_index) + continue; + if (!spa && nfit_memdev->memdev->range_index) continue; found = NULL; dcr = nfit_memdev->memdev->region_index; @@ -835,14 +864,15 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, break; } nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); - } else { + } else if (type == NFIT_SPA_PM) { /* * A single dimm may belong to multiple SPA-PM * ranges, record at least one in addition to * any SPA-DCR range. */ nfit_mem->memdev_pmem = nfit_memdev->memdev; - } + } else + nfit_mem->memdev_dcr = nfit_memdev->memdev; } return 0; @@ -866,6 +896,8 @@ static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; + int rc; + /* * For each SPA-DCR or SPA-PMEM address range find its @@ -876,13 +908,20 @@ static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) * BDWs are optional. */ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { - int rc; - - rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa); + rc = __nfit_mem_init(acpi_desc, nfit_spa->spa); if (rc) return rc; } + /* + * If a DIMM has failed to be mapped into SPA there will be no + * SPA entries above. Find and register all the unmapped DIMMs + * for reporting and recovery purposes. + */ + rc = __nfit_mem_init(acpi_desc, NULL); + if (rc) + return rc; + list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); return 0; @@ -1237,12 +1276,14 @@ static ssize_t flags_show(struct device *dev, { u16 flags = to_nfit_memdev(dev)->flags; - return sprintf(buf, "%s%s%s%s%s\n", + return sprintf(buf, "%s%s%s%s%s%s%s\n", flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", - flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : ""); + flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "", + flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "", + flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : ""); } static DEVICE_ATTR_RO(flags); @@ -1290,8 +1331,16 @@ static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, struct device *dev = container_of(kobj, struct device, kobj); struct nvdimm *nvdimm = to_nvdimm(dev); - if (!to_nfit_dcr(dev)) + if (!to_nfit_dcr(dev)) { + /* Without a dcr only the memdev attributes can be surfaced */ + if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr + || a == &dev_attr_flags.attr + || a == &dev_attr_family.attr + || a == &dev_attr_dsm_mask.attr) + return a->mode; return 0; + } + if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) return 0; return a->mode; @@ -1368,6 +1417,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, unsigned long dsm_mask; const u8 *uuid; int i; + int family = -1; /* nfit test assumes 1:1 relationship between commands and dsms */ nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; @@ -1398,11 +1448,14 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, */ for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++) if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) - break; + if (family < 0 || i == default_dsm_family) + family = i; /* limit the supported commands to those that are publicly documented */ - nfit_mem->family = i; - if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { + nfit_mem->family = family; + if (override_dsm_mask && !disable_vendor_specific) + dsm_mask = override_dsm_mask; + else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { dsm_mask = 0x3fe; if (disable_vendor_specific) dsm_mask &= ~(1 << ND_CMD_VENDOR); @@ -1462,6 +1515,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { struct acpi_nfit_flush_address *flush; unsigned long flags = 0, cmd_mask; + struct nfit_memdev *nfit_memdev; u32 device_handle; u16 mem_flags; @@ -1473,11 +1527,22 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) } if (nfit_mem->bdw && nfit_mem->memdev_pmem) - flags |= NDD_ALIASING; + set_bit(NDD_ALIASING, &flags); + + /* collate flags across all memdevs for this dimm */ + list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { + struct acpi_nfit_memory_map *dimm_memdev; + + dimm_memdev = __to_nfit_memdev(nfit_mem); + if (dimm_memdev->device_handle + != nfit_memdev->memdev->device_handle) + continue; + dimm_memdev->flags |= nfit_memdev->memdev->flags; + } mem_flags = __to_nfit_memdev(nfit_mem)->flags; if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) - flags |= NDD_UNARMED; + set_bit(NDD_UNARMED, &flags); rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); if (rc) @@ -1507,12 +1572,13 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) continue; - dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n", + dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", nvdimm_name(nvdimm), mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", - mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : ""); + mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "", + mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : ""); } @@ -1783,8 +1849,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, mmio_flush_range((void __force *) mmio->addr.aperture + offset, c); - memcpy_from_pmem(iobuf + copied, - mmio->addr.aperture + offset, c); + memcpy(iobuf + copied, mmio->addr.aperture + offset, c); } copied += c; @@ -2525,6 +2590,7 @@ static void acpi_nfit_scrub(struct work_struct *work) acpi_nfit_register_region(acpi_desc, nfit_spa); } } + acpi_desc->init_complete = 1; list_for_each_entry(nfit_spa, &acpi_desc->spas, list) acpi_nfit_async_scrub(acpi_desc, nfit_spa); @@ -2547,7 +2613,8 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) return rc; } - queue_work(nfit_wq, &acpi_desc->work); + if (!acpi_desc->cancel) + queue_work(nfit_wq, &acpi_desc->work); return 0; } @@ -2593,32 +2660,11 @@ static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) return 0; } -static void acpi_nfit_destruct(void *data) +static void acpi_nfit_unregister(void *data) { struct acpi_nfit_desc *acpi_desc = data; - struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); - - /* - * Destruct under acpi_desc_lock so that nfit_handle_mce does not - * race teardown - */ - mutex_lock(&acpi_desc_lock); - acpi_desc->cancel = 1; - /* - * Bounce the nvdimm bus lock to make sure any in-flight - * acpi_nfit_ars_rescan() submissions have had a chance to - * either submit or see ->cancel set. - */ - device_lock(bus_dev); - device_unlock(bus_dev); - flush_workqueue(nfit_wq); - if (acpi_desc->scrub_count_state) - sysfs_put(acpi_desc->scrub_count_state); nvdimm_bus_unregister(acpi_desc->nvdimm_bus); - acpi_desc->nvdimm_bus = NULL; - list_del(&acpi_desc->list); - mutex_unlock(&acpi_desc_lock); } int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) @@ -2636,7 +2682,7 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) if (!acpi_desc->nvdimm_bus) return -ENOMEM; - rc = devm_add_action_or_reset(dev, acpi_nfit_destruct, + rc = devm_add_action_or_reset(dev, acpi_nfit_unregister, acpi_desc); if (rc) return rc; @@ -2728,6 +2774,13 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) device_lock(dev); device_unlock(dev); + /* bounce the init_mutex to make init_complete valid */ + mutex_lock(&acpi_desc->init_mutex); + if (acpi_desc->cancel || acpi_desc->init_complete) { + mutex_unlock(&acpi_desc->init_mutex); + return 0; + } + /* * Scrub work could take 10s of seconds, userspace may give up so we * need to be interruptible while waiting. @@ -2735,6 +2788,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) INIT_WORK_ONSTACK(&flush.work, flush_probe); COMPLETION_INITIALIZER_ONSTACK(flush.cmp); queue_work(nfit_wq, &flush.work); + mutex_unlock(&acpi_desc->init_mutex); rc = wait_for_completion_interruptible(&flush.cmp); cancel_work_sync(&flush.work); @@ -2771,10 +2825,12 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc) if (work_busy(&acpi_desc->work)) return -EBUSY; - if (acpi_desc->cancel) + mutex_lock(&acpi_desc->init_mutex); + if (acpi_desc->cancel) { + mutex_unlock(&acpi_desc->init_mutex); return 0; + } - mutex_lock(&acpi_desc->init_mutex); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { struct acpi_nfit_system_address *spa = nfit_spa->spa; @@ -2818,6 +2874,40 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) } EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); +static void acpi_nfit_put_table(void *table) +{ + acpi_put_table(table); +} + +void acpi_nfit_shutdown(void *data) +{ + struct acpi_nfit_desc *acpi_desc = data; + struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); + + /* + * Destruct under acpi_desc_lock so that nfit_handle_mce does not + * race teardown + */ + mutex_lock(&acpi_desc_lock); + list_del(&acpi_desc->list); + mutex_unlock(&acpi_desc_lock); + + mutex_lock(&acpi_desc->init_mutex); + acpi_desc->cancel = 1; + mutex_unlock(&acpi_desc->init_mutex); + + /* + * Bounce the nvdimm bus lock to make sure any in-flight + * acpi_nfit_ars_rescan() submissions have had a chance to + * either submit or see ->cancel set. + */ + device_lock(bus_dev); + device_unlock(bus_dev); + + flush_workqueue(nfit_wq); +} +EXPORT_SYMBOL_GPL(acpi_nfit_shutdown); + static int acpi_nfit_add(struct acpi_device *adev) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -2834,6 +2924,10 @@ static int acpi_nfit_add(struct acpi_device *adev) dev_dbg(dev, "failed to find NFIT at startup\n"); return 0; } + + rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl); + if (rc) + return rc; sz = tbl->length; acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); @@ -2861,12 +2955,15 @@ static int acpi_nfit_add(struct acpi_device *adev) rc = acpi_nfit_init(acpi_desc, (void *) tbl + sizeof(struct acpi_table_nfit), sz - sizeof(struct acpi_table_nfit)); - return rc; + + if (rc) + return rc; + return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc); } static int acpi_nfit_remove(struct acpi_device *adev) { - /* see acpi_nfit_destruct */ + /* see acpi_nfit_unregister */ return 0; } diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index fc29c2e9832e..58fb7d68e04a 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -37,7 +37,7 @@ #define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ - | ACPI_NFIT_MEM_NOT_ARMED) + | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) enum nfit_uuids { /* for simplicity alias the uuid index with the family id */ @@ -163,6 +163,7 @@ struct acpi_nfit_desc { unsigned int scrub_count; unsigned int scrub_mode; unsigned int cancel:1; + unsigned int init_complete:1; unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, @@ -238,6 +239,7 @@ static inline struct acpi_nfit_desc *to_acpi_desc( const u8 *to_nfit_uuid(enum nfit_uuids id); int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); +void acpi_nfit_shutdown(void *data); void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event); void __acpi_nvdimm_notify(struct device *dev, u32 event); int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 2944353253ed..a4e8432fc2fb 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -54,6 +54,7 @@ static struct mcfg_fixup mcfg_quirks[] = { #define QCOM_ECAM32(seg) \ { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops } + QCOM_ECAM32(0), QCOM_ECAM32(1), QCOM_ECAM32(2), @@ -68,6 +69,7 @@ static struct mcfg_fixup mcfg_quirks[] = { { "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \ { "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \ { "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops } + HISI_QUAD_DOM("HIP05 ", 0, &hisi_pcie_ops), HISI_QUAD_DOM("HIP06 ", 0, &hisi_pcie_ops), HISI_QUAD_DOM("HIP07 ", 0, &hisi_pcie_ops), @@ -77,6 +79,7 @@ static struct mcfg_fixup mcfg_quirks[] = { #define THUNDER_PEM_RES(addr, node) \ DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M) + #define THUNDER_PEM_QUIRK(rev, node) \ { "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY, \ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) }, \ @@ -90,13 +93,16 @@ static struct mcfg_fixup mcfg_quirks[] = { &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) }, \ { "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY, \ &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) } - /* SoC pass2.x */ - THUNDER_PEM_QUIRK(1, 0), - THUNDER_PEM_QUIRK(1, 1), #define THUNDER_ECAM_QUIRK(rev, seg) \ { "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY, \ &pci_thunder_ecam_ops } + + /* SoC pass2.x */ + THUNDER_PEM_QUIRK(1, 0), + THUNDER_PEM_QUIRK(1, 1), + THUNDER_ECAM_QUIRK(1, 10), + /* SoC pass1.x */ THUNDER_PEM_QUIRK(2, 0), /* off-chip devices */ THUNDER_PEM_QUIRK(2, 1), /* off-chip devices */ @@ -112,9 +118,11 @@ static struct mcfg_fixup mcfg_quirks[] = { #define XGENE_V1_ECAM_MCFG(rev, seg) \ {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ &xgene_v1_pcie_ecam_ops } + #define XGENE_V2_ECAM_MCFG(rev, seg) \ {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ &xgene_v2_pcie_ecam_ops } + /* X-Gene SoC with v1 PCIe controller */ XGENE_V1_ECAM_MCFG(1, 0), XGENE_V1_ECAM_MCFG(1, 1), diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 9d5f0c7ed3f7..8697a82bd465 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -251,6 +251,9 @@ static int __acpi_processor_start(struct acpi_device *device) if (ACPI_SUCCESS(status)) return 0; + result = -ENODEV; + acpi_pss_perf_exit(pr, device); + err_power_exit: acpi_processor_power_exit(pr); return result; @@ -259,11 +262,16 @@ err_power_exit: static int acpi_processor_start(struct device *dev) { struct acpi_device *device = ACPI_COMPANION(dev); + int ret; if (!device) return -ENODEV; - return __acpi_processor_start(device); + /* Protect against concurrent CPU hotplug operations */ + get_online_cpus(); + ret = __acpi_processor_start(device); + put_online_cpus(); + return ret; } static int acpi_processor_stop(struct device *dev) diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index a12f96cc93ff..3de34633f7f9 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg { #define THROTTLING_POSTCHANGE (2) static int acpi_processor_get_throttling(struct acpi_processor *pr); -int acpi_processor_set_throttling(struct acpi_processor *pr, - int state, bool force); +static int __acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force, bool direct); static int acpi_processor_update_tsd_coord(void) { @@ -891,7 +891,8 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid throttling state, reset\n")); state = 0; - ret = acpi_processor_set_throttling(pr, state, true); + ret = __acpi_processor_set_throttling(pr, state, true, + true); if (ret) return ret; } @@ -901,36 +902,31 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) return 0; } -static int acpi_processor_get_throttling(struct acpi_processor *pr) +static long __acpi_processor_get_throttling(void *data) { - cpumask_var_t saved_mask; - int ret; + struct acpi_processor *pr = data; + + return pr->throttling.acpi_processor_get_throttling(pr); +} +static int acpi_processor_get_throttling(struct acpi_processor *pr) +{ if (!pr) return -EINVAL; if (!pr->flags.throttling) return -ENODEV; - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) - return -ENOMEM; - /* - * Migrate task to the cpu pointed by pr. + * This is either called from the CPU hotplug callback of + * processor_driver or via the ACPI probe function. In the latter + * case the CPU is not guaranteed to be online. Both call sites are + * protected against CPU hotplug. */ - cpumask_copy(saved_mask, ¤t->cpus_allowed); - /* FIXME: use work_on_cpu() */ - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { - /* Can't migrate to the target pr->id CPU. Exit */ - free_cpumask_var(saved_mask); + if (!cpu_online(pr->id)) return -ENODEV; - } - ret = pr->throttling.acpi_processor_get_throttling(pr); - /* restore the previous state */ - set_cpus_allowed_ptr(current, saved_mask); - free_cpumask_var(saved_mask); - return ret; + return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr); } static int acpi_processor_get_fadt_info(struct acpi_processor *pr) @@ -1080,8 +1076,15 @@ static long acpi_processor_throttling_fn(void *data) arg->target_state, arg->force); } -int acpi_processor_set_throttling(struct acpi_processor *pr, - int state, bool force) +static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) +{ + if (direct) + return fn(arg); + return work_on_cpu(cpu, fn, arg); +} + +static int __acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force, bool direct) { int ret = 0; unsigned int i; @@ -1130,7 +1133,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, arg.pr = pr; arg.target_state = state; arg.force = force; - ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); + ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg, + direct); } else { /* * When the T-state coordination is SW_ALL or HW_ALL, @@ -1163,8 +1167,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, arg.pr = match_pr; arg.target_state = state; arg.force = force; - ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, - &arg); + ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, + &arg, direct); } } /* @@ -1182,6 +1186,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, return ret; } +int acpi_processor_set_throttling(struct acpi_processor *pr, int state, + bool force) +{ + return __acpi_processor_set_throttling(pr, state, force, false); +} + int acpi_processor_get_throttling_info(struct acpi_processor *pr) { int result = 0; diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 3afddcd834ef..9364398204e9 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -37,14 +37,16 @@ static const u8 ads_uuid[16] = { static bool acpi_enumerate_nondev_subnodes(acpi_handle scope, const union acpi_object *desc, - struct acpi_device_data *data); + struct acpi_device_data *data, + struct fwnode_handle *parent); static bool acpi_extract_properties(const union acpi_object *desc, struct acpi_device_data *data); static bool acpi_nondev_subnode_extract(const union acpi_object *desc, acpi_handle handle, const union acpi_object *link, - struct list_head *list) + struct list_head *list, + struct fwnode_handle *parent) { struct acpi_data_node *dn; bool result; @@ -55,6 +57,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc, dn->name = link->package.elements[0].string.pointer; dn->fwnode.type = FWNODE_ACPI_DATA; + dn->parent = parent; INIT_LIST_HEAD(&dn->data.subnodes); result = acpi_extract_properties(desc, &dn->data); @@ -71,9 +74,11 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc, */ status = acpi_get_parent(handle, &scope); if (ACPI_SUCCESS(status) - && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data)) + && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, + &dn->fwnode)) result = true; - } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data)) { + } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data, + &dn->fwnode)) { result = true; } @@ -91,7 +96,8 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc, static bool acpi_nondev_subnode_data_ok(acpi_handle handle, const union acpi_object *link, - struct list_head *list) + struct list_head *list, + struct fwnode_handle *parent) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; acpi_status status; @@ -101,7 +107,8 @@ static bool acpi_nondev_subnode_data_ok(acpi_handle handle, if (ACPI_FAILURE(status)) return false; - if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list)) + if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list, + parent)) return true; ACPI_FREE(buf.pointer); @@ -110,7 +117,8 @@ static bool acpi_nondev_subnode_data_ok(acpi_handle handle, static bool acpi_nondev_subnode_ok(acpi_handle scope, const union acpi_object *link, - struct list_head *list) + struct list_head *list, + struct fwnode_handle *parent) { acpi_handle handle; acpi_status status; @@ -123,12 +131,13 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope, if (ACPI_FAILURE(status)) return false; - return acpi_nondev_subnode_data_ok(handle, link, list); + return acpi_nondev_subnode_data_ok(handle, link, list, parent); } static int acpi_add_nondev_subnodes(acpi_handle scope, const union acpi_object *links, - struct list_head *list) + struct list_head *list, + struct fwnode_handle *parent) { bool ret = false; int i; @@ -150,15 +159,18 @@ static int acpi_add_nondev_subnodes(acpi_handle scope, /* The second one may be a string, a reference or a package. */ switch (link->package.elements[1].type) { case ACPI_TYPE_STRING: - result = acpi_nondev_subnode_ok(scope, link, list); + result = acpi_nondev_subnode_ok(scope, link, list, + parent); break; case ACPI_TYPE_LOCAL_REFERENCE: handle = link->package.elements[1].reference.handle; - result = acpi_nondev_subnode_data_ok(handle, link, list); + result = acpi_nondev_subnode_data_ok(handle, link, list, + parent); break; case ACPI_TYPE_PACKAGE: desc = &link->package.elements[1]; - result = acpi_nondev_subnode_extract(desc, NULL, link, list); + result = acpi_nondev_subnode_extract(desc, NULL, link, + list, parent); break; default: result = false; @@ -172,7 +184,8 @@ static int acpi_add_nondev_subnodes(acpi_handle scope, static bool acpi_enumerate_nondev_subnodes(acpi_handle scope, const union acpi_object *desc, - struct acpi_device_data *data) + struct acpi_device_data *data, + struct fwnode_handle *parent) { int i; @@ -194,7 +207,8 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope, if (memcmp(uuid->buffer.pointer, ads_uuid, sizeof(ads_uuid))) continue; - return acpi_add_nondev_subnodes(scope, links, &data->subnodes); + return acpi_add_nondev_subnodes(scope, links, &data->subnodes, + parent); } return false; @@ -345,7 +359,8 @@ void acpi_init_properties(struct acpi_device *adev) if (acpi_of) acpi_init_of_compatible(adev); } - if (acpi_enumerate_nondev_subnodes(adev->handle, buf.pointer, &adev->data)) + if (acpi_enumerate_nondev_subnodes(adev->handle, buf.pointer, + &adev->data, acpi_fwnode_handle(adev))) adev->data.pointer = buf.pointer; if (!adev->data.pointer) { @@ -699,6 +714,8 @@ static int acpi_data_prop_read_single(struct acpi_device_data *data, return ret; *(char **)val = obj->string.pointer; + + return 1; } else { ret = -EINVAL; } @@ -708,7 +725,15 @@ static int acpi_data_prop_read_single(struct acpi_device_data *data, int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, enum dev_prop_type proptype, void *val) { - return adev ? acpi_data_prop_read_single(&adev->data, propname, proptype, val) : -EINVAL; + int ret; + + if (!adev) + return -EINVAL; + + ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val); + if (ret < 0 || proptype != ACPI_TYPE_STRING) + return ret; + return 0; } static int acpi_copy_property_array_u8(const union acpi_object *items, u8 *val, @@ -784,7 +809,7 @@ static int acpi_copy_property_array_string(const union acpi_object *items, val[i] = items[i].string.pointer; } - return 0; + return nval; } static int acpi_data_prop_read(struct acpi_device_data *data, @@ -798,7 +823,7 @@ static int acpi_data_prop_read(struct acpi_device_data *data, if (val && nval == 1) { ret = acpi_data_prop_read_single(data, propname, proptype, val); - if (!ret) + if (ret >= 0) return ret; } @@ -809,7 +834,7 @@ static int acpi_data_prop_read(struct acpi_device_data *data, if (!val) return obj->package.count; - if (nval > obj->package.count) + if (proptype != DEV_PROP_STRING && nval > obj->package.count) return -EOVERFLOW; else if (nval <= 0) return -EINVAL; @@ -830,7 +855,9 @@ static int acpi_data_prop_read(struct acpi_device_data *data, ret = acpi_copy_property_array_u64(items, (u64 *)val, nval); break; case DEV_PROP_STRING: - ret = acpi_copy_property_array_string(items, (char **)val, nval); + ret = acpi_copy_property_array_string( + items, (char **)val, + min_t(u32, nval, obj->package.count)); break; default: ret = -EINVAL; @@ -865,21 +892,22 @@ int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname, } /** - * acpi_get_next_subnode - Return the next child node handle for a device. - * @dev: Device to find the next child node for. + * acpi_get_next_subnode - Return the next child node handle for a fwnode + * @fwnode: Firmware node to find the next child node for. * @child: Handle to one of the device's child nodes or a null handle. */ -struct fwnode_handle *acpi_get_next_subnode(struct device *dev, +struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode, struct fwnode_handle *child) { - struct acpi_device *adev = ACPI_COMPANION(dev); + struct acpi_device *adev = to_acpi_device_node(fwnode); struct list_head *head, *next; - if (!adev) - return NULL; - if (!child || child->type == FWNODE_ACPI) { - head = &adev->children; + if (adev) + head = &adev->children; + else + goto nondev; + if (list_empty(head)) goto nondev; @@ -888,7 +916,6 @@ struct fwnode_handle *acpi_get_next_subnode(struct device *dev, next = adev->node.next; if (next == head) { child = NULL; - adev = ACPI_COMPANION(dev); goto nondev; } adev = list_entry(next, struct acpi_device, node); @@ -900,9 +927,16 @@ struct fwnode_handle *acpi_get_next_subnode(struct device *dev, nondev: if (!child || child->type == FWNODE_ACPI_DATA) { + struct acpi_data_node *data = to_acpi_data_node(fwnode); struct acpi_data_node *dn; - head = &adev->data.subnodes; + if (adev) + head = &adev->data.subnodes; + else if (data) + head = &data->data.subnodes; + else + return NULL; + if (list_empty(head)) return NULL; @@ -920,3 +954,168 @@ struct fwnode_handle *acpi_get_next_subnode(struct device *dev, } return NULL; } + +/** + * acpi_node_get_parent - Return parent fwnode of this fwnode + * @fwnode: Firmware node whose parent to get + * + * Returns parent node of an ACPI device or data firmware node or %NULL if + * not available. + */ +struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode) +{ + if (is_acpi_data_node(fwnode)) { + /* All data nodes have parent pointer so just return that */ + return to_acpi_data_node(fwnode)->parent; + } else if (is_acpi_device_node(fwnode)) { + acpi_handle handle, parent_handle; + + handle = to_acpi_device_node(fwnode)->handle; + if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) { + struct acpi_device *adev; + + if (!acpi_bus_get_device(parent_handle, &adev)) + return acpi_fwnode_handle(adev); + } + } + + return NULL; +} + +/** + * acpi_graph_get_next_endpoint - Get next endpoint ACPI firmware node + * @fwnode: Pointer to the parent firmware node + * @prev: Previous endpoint node or %NULL to get the first + * + * Looks up next endpoint ACPI firmware node below a given @fwnode. Returns + * %NULL if there is no next endpoint, ERR_PTR() in case of error. In case + * of success the next endpoint is returned. + */ +struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode, + struct fwnode_handle *prev) +{ + struct fwnode_handle *port = NULL; + struct fwnode_handle *endpoint; + + if (!prev) { + do { + port = fwnode_get_next_child_node(fwnode, port); + /* Ports must have port property */ + if (fwnode_property_present(port, "port")) + break; + } while (port); + } else { + port = fwnode_get_parent(prev); + } + + if (!port) + return NULL; + + endpoint = fwnode_get_next_child_node(port, prev); + while (!endpoint) { + port = fwnode_get_next_child_node(fwnode, port); + if (!port) + break; + if (fwnode_property_present(port, "port")) + endpoint = fwnode_get_next_child_node(port, NULL); + } + + if (endpoint) { + /* Endpoints must have "endpoint" property */ + if (!fwnode_property_present(endpoint, "endpoint")) + return ERR_PTR(-EPROTO); + } + + return endpoint; +} + +/** + * acpi_graph_get_child_prop_value - Return a child with a given property value + * @fwnode: device fwnode + * @prop_name: The name of the property to look for + * @val: the desired property value + * + * Return the port node corresponding to a given port number. Returns + * the child node on success, NULL otherwise. + */ +static struct fwnode_handle *acpi_graph_get_child_prop_value( + struct fwnode_handle *fwnode, const char *prop_name, unsigned int val) +{ + struct fwnode_handle *child; + + fwnode_for_each_child_node(fwnode, child) { + u32 nr; + + if (!fwnode_property_read_u32(fwnode, prop_name, &nr)) + continue; + + if (val == nr) + return child; + } + + return NULL; +} + + +/** + * acpi_graph_get_remote_enpoint - Parses and returns remote end of an endpoint + * @fwnode: Endpoint firmware node pointing to a remote device + * @parent: Firmware node of remote port parent is filled here if not %NULL + * @port: Firmware node of remote port is filled here if not %NULL + * @endpoint: Firmware node of remote endpoint is filled here if not %NULL + * + * Function parses remote end of ACPI firmware remote endpoint and fills in + * fields requested by the caller. Returns %0 in case of success and + * negative errno otherwise. + */ +int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode, + struct fwnode_handle **parent, + struct fwnode_handle **port, + struct fwnode_handle **endpoint) +{ + unsigned int port_nr, endpoint_nr; + struct acpi_reference_args args; + int ret; + + memset(&args, 0, sizeof(args)); + ret = acpi_node_get_property_reference(fwnode, "remote-endpoint", 0, + &args); + if (ret) + return ret; + + /* + * Always require two arguments with the reference: port and + * endpoint indices. + */ + if (args.nargs != 2) + return -EPROTO; + + fwnode = acpi_fwnode_handle(args.adev); + port_nr = args.args[0]; + endpoint_nr = args.args[1]; + + if (parent) + *parent = fwnode; + + if (!port && !endpoint) + return 0; + + fwnode = acpi_graph_get_child_prop_value(fwnode, "port", port_nr); + if (!fwnode) + return -EPROTO; + + if (port) + *port = fwnode; + + if (!endpoint) + return 0; + + fwnode = acpi_graph_get_child_prop_value(fwnode, "endpoint", + endpoint_nr); + if (!fwnode) + return -EPROTO; + + *endpoint = fwnode; + + return 0; +} diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index c26931067415..e39ec7b7cb67 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1363,20 +1363,25 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) * @dev: The pointer to the device * @attr: device dma attributes */ -void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) +int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) { const struct iommu_ops *iommu; + u64 size; iort_set_dma_mask(dev); iommu = iort_iommu_configure(dev); + if (IS_ERR(iommu)) + return PTR_ERR(iommu); + size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); /* * Assume dma valid range starts at 0 and covers the whole * coherent_dma_mask. */ - arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu, - attr == DEV_DMA_COHERENT); + arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT); + + return 0; } EXPORT_SYMBOL_GPL(acpi_dma_configure); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 097d630ab886..a6574d626340 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -663,14 +663,40 @@ static int acpi_freeze_prepare(void) acpi_os_wait_events_complete(); if (acpi_sci_irq_valid()) enable_irq_wake(acpi_sci_irq); + return 0; } +static void acpi_freeze_wake(void) +{ + /* + * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means + * that the SCI has triggered while suspended, so cancel the wakeup in + * case it has not been a wakeup event (the GPEs will be checked later). + */ + if (acpi_sci_irq_valid() && + !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) + pm_system_cancel_wakeup(); +} + +static void acpi_freeze_sync(void) +{ + /* + * Process all pending events in case there are any wakeup ones. + * + * The EC driver uses the system workqueue, so that one needs to be + * flushed too. + */ + acpi_os_wait_events_complete(); + flush_scheduled_work(); +} + static void acpi_freeze_restore(void) { acpi_disable_wakeup_devices(ACPI_STATE_S0); if (acpi_sci_irq_valid()) disable_irq_wake(acpi_sci_irq); + acpi_enable_all_runtime_gpes(); } @@ -682,6 +708,8 @@ static void acpi_freeze_end(void) static const struct platform_freeze_ops acpi_freeze_ops = { .begin = acpi_freeze_begin, .prepare = acpi_freeze_prepare, + .wake = acpi_freeze_wake, + .sync = acpi_freeze_sync, .restore = acpi_freeze_restore, .end = acpi_freeze_end, }; diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 0dae722ab2ec..ff425390bfa8 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -540,7 +540,7 @@ void __init acpi_table_upgrade(void) * But it's not enough on X86 because ioremap will * complain later (used by acpi_os_map_memory) that the pages * that should get mapped are not marked "reserved". - * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area) + * Both memblock_reserve and e820__range_add (via arch_reserve_mem_area) * works fine. */ memblock_reserve(acpi_tables_addr, all_tables_size); |