summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/arch_topology.c45
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/memory.c147
-rw-r--r--drivers/base/node.c48
-rw-r--r--drivers/base/power/domain.c42
-rw-r--r--drivers/base/power/main.c16
-rw-r--r--drivers/base/power/runtime.c5
-rw-r--r--drivers/base/power/wakeirq.c2
-rw-r--r--drivers/base/power/wakeup.c4
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regmap-irq.c6
-rw-r--r--drivers/base/regmap/regmap.c11
-rw-r--r--drivers/base/topology.c20
17 files changed, 265 insertions, 92 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 976154140f0b..1d6636ebaac5 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -339,6 +339,46 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
return !ret;
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+#include <acpi/cppc_acpi.h>
+
+void topology_init_cpu_capacity_cppc(void)
+{
+ struct cppc_perf_caps perf_caps;
+ int cpu;
+
+ if (likely(acpi_disabled || !acpi_cpc_valid()))
+ return;
+
+ raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
+ GFP_KERNEL);
+ if (!raw_capacity)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ if (!cppc_get_perf_caps(cpu, &perf_caps) &&
+ (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
+ (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
+ raw_capacity[cpu] = perf_caps.highest_perf;
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
+ cpu, raw_capacity[cpu]);
+ continue;
+ }
+
+ pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
+ pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+ goto exit;
+ }
+
+ topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
+ pr_debug("cpu_capacity: cpu_capacity initialization done\n");
+
+exit:
+ free_raw_capacity();
+}
+#endif
+
#ifdef CONFIG_CPU_FREQ
static cpumask_var_t cpus_to_visit;
static void parsing_done_workfn(struct work_struct *work);
@@ -387,9 +427,8 @@ static int __init register_cpufreq_notifier(void)
int ret;
/*
- * on ACPI-based systems we need to use the default cpu capacity
- * until we have the necessary code to parse the cpu capacity, so
- * skip registering cpufreq notifier.
+ * On ACPI-based systems skip registering cpufreq notifier as cpufreq
+ * information is not needed for cpu capacity initialization.
*/
if (!acpi_disabled || !raw_capacity)
return -EINVAL;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 7476f393df97..8feb85e186e3 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -16,7 +16,7 @@
#include <linux/kdev_t.h>
#include <linux/err.h>
#include <linux/slab.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/mutex.h>
#include "base.h"
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 7bb957b11861..3d6430eb0c6a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -21,7 +21,7 @@
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/netdevice.h>
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 5fc258073bc7..2ef23fce0860 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -275,7 +275,7 @@ static ssize_t print_cpus_isolated(struct device *dev,
return -ENOMEM;
cpumask_andnot(isolated, cpu_possible_mask,
- housekeeping_cpumask(HK_FLAG_DOMAIN));
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated));
free_cpumask_var(isolated);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index f41063ac1aee..db5a03a0618e 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -17,7 +17,7 @@
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/device.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
diff --git a/drivers/base/init.c b/drivers/base/init.c
index a9f57c22fb9e..d8d0fe687111 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -35,5 +35,6 @@ void __init driver_init(void)
auxiliary_bus_init();
cpu_dev_init();
memory_dev_init();
+ node_dev_init();
container_dev_init();
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 365cd4a7f239..7222ff9b5e05 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -215,6 +215,7 @@ static int memory_block_online(struct memory_block *mem)
adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
nr_vmemmap_pages);
+ mem->zone = zone;
return ret;
}
@@ -225,6 +226,9 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
int ret;
+ if (!mem->zone)
+ return -EINVAL;
+
/*
* Unaccount before offlining, such that unpopulated zone and kthreads
* can properly be torn down in offline_pages().
@@ -234,7 +238,7 @@ static int memory_block_offline(struct memory_block *mem)
-nr_vmemmap_pages);
ret = offline_pages(start_pfn + nr_vmemmap_pages,
- nr_pages - nr_vmemmap_pages, mem->group);
+ nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
if (ret) {
/* offline_pages() failed. Account back. */
if (nr_vmemmap_pages)
@@ -246,6 +250,7 @@ static int memory_block_offline(struct memory_block *mem)
if (nr_vmemmap_pages)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+ mem->zone = NULL;
return ret;
}
@@ -411,11 +416,10 @@ static ssize_t valid_zones_show(struct device *dev,
*/
if (mem->state == MEM_ONLINE) {
/*
- * The block contains more than one zone can not be offlined.
- * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+ * If !mem->zone, the memory block spans multiple zones and
+ * cannot get offlined.
*/
- default_zone = test_pages_in_a_zone(start_pfn,
- start_pfn + nr_pages);
+ default_zone = mem->zone;
if (!default_zone)
return sysfs_emit(buf, "%s\n", "none");
len += sysfs_emit_at(buf, len, "%s", default_zone->name);
@@ -555,6 +559,8 @@ static ssize_t hard_offline_page_store(struct device *dev,
return -EINVAL;
pfn >>= PAGE_SHIFT;
ret = memory_failure(pfn, 0);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
return ret ? ret : count;
}
@@ -613,11 +619,7 @@ static const struct attribute_group *memory_memblk_attr_groups[] = {
NULL,
};
-/*
- * register_memory - Setup a sysfs device for a memory block
- */
-static
-int register_memory(struct memory_block *memory)
+static int __add_memory_block(struct memory_block *memory)
{
int ret;
@@ -641,9 +643,85 @@ int register_memory(struct memory_block *memory)
return ret;
}
-static int init_memory_block(unsigned long block_id, unsigned long state,
- unsigned long nr_vmemmap_pages,
- struct memory_group *group)
+static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
+ int nid)
+{
+ const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct zone *zone, *matching_zone = NULL;
+ pg_data_t *pgdat = NODE_DATA(nid);
+ int i;
+
+ /*
+ * This logic only works for early memory, when the applicable zones
+ * already span the memory block. We don't expect overlapping zones on
+ * a single node for early memory. So if we're told that some PFNs
+ * of a node fall into this memory block, we can assume that all node
+ * zones that intersect with the memory block are actually applicable.
+ * No need to look at the memmap.
+ */
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ zone = pgdat->node_zones + i;
+ if (!populated_zone(zone))
+ continue;
+ if (!zone_intersects(zone, start_pfn, nr_pages))
+ continue;
+ if (!matching_zone) {
+ matching_zone = zone;
+ continue;
+ }
+ /* Spans multiple zones ... */
+ matching_zone = NULL;
+ break;
+ }
+ return matching_zone;
+}
+
+#ifdef CONFIG_NUMA
+/**
+ * memory_block_add_nid() - Indicate that system RAM falling into this memory
+ * block device (partially) belongs to the given node.
+ * @mem: The memory block device.
+ * @nid: The node id.
+ * @context: The memory initialization context.
+ *
+ * Indicate that system RAM falling into this memory block (partially) belongs
+ * to the given node. If the context indicates ("early") that we are adding the
+ * node during node device subsystem initialization, this will also properly
+ * set/adjust mem->zone based on the zone ranges of the given node.
+ */
+void memory_block_add_nid(struct memory_block *mem, int nid,
+ enum meminit_context context)
+{
+ if (context == MEMINIT_EARLY && mem->nid != nid) {
+ /*
+ * For early memory we have to determine the zone when setting
+ * the node id and handle multiple nodes spanning a single
+ * memory block by indicate via zone == NULL that we're not
+ * dealing with a single zone. So if we're setting the node id
+ * the first time, determine if there is a single zone. If we're
+ * setting the node id a second time to a different node,
+ * invalidate the single detected zone.
+ */
+ if (mem->nid == NUMA_NO_NODE)
+ mem->zone = early_node_zone_for_memory_block(mem, nid);
+ else
+ mem->zone = NULL;
+ }
+
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node. If we span multiple nodes (not applicable
+ * to hotplugged memory), zone == NULL will prohibit memory offlining
+ * and consequently unplug.
+ */
+ mem->nid = nid;
+}
+#endif
+
+static int add_memory_block(unsigned long block_id, unsigned long state,
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
{
struct memory_block *mem;
int ret = 0;
@@ -663,17 +741,30 @@ static int init_memory_block(unsigned long block_id, unsigned long state,
mem->nr_vmemmap_pages = nr_vmemmap_pages;
INIT_LIST_HEAD(&mem->group_next);
+#ifndef CONFIG_NUMA
+ if (state == MEM_ONLINE)
+ /*
+ * MEM_ONLINE at this point implies early memory. With NUMA,
+ * we'll determine the zone when setting the node id via
+ * memory_block_add_nid(). Memory hotplug updated the zone
+ * manually when memory onlining/offlining succeeds.
+ */
+ mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
+#endif /* CONFIG_NUMA */
+
+ ret = __add_memory_block(mem);
+ if (ret)
+ return ret;
+
if (group) {
mem->group = group;
list_add(&mem->group_next, &group->memory_blocks);
}
- ret = register_memory(mem);
-
- return ret;
+ return 0;
}
-static int add_memory_block(unsigned long base_section_nr)
+static int __init add_boot_memory_block(unsigned long base_section_nr)
{
int section_count = 0;
unsigned long nr;
@@ -685,11 +776,18 @@ static int add_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
- return init_memory_block(memory_block_id(base_section_nr),
- MEM_ONLINE, 0, NULL);
+ return add_memory_block(memory_block_id(base_section_nr),
+ MEM_ONLINE, 0, NULL);
+}
+
+static int add_hotplug_memory_block(unsigned long block_id,
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
+{
+ return add_memory_block(block_id, MEM_OFFLINE, nr_vmemmap_pages, group);
}
-static void unregister_memory(struct memory_block *memory)
+static void remove_memory_block(struct memory_block *memory)
{
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
return;
@@ -728,8 +826,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages,
- group);
+ ret = add_hotplug_memory_block(block_id, vmemmap_pages, group);
if (ret)
break;
}
@@ -740,7 +837,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
- unregister_memory(mem);
+ remove_memory_block(mem);
}
}
return ret;
@@ -769,7 +866,7 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
if (WARN_ON_ONCE(!mem))
continue;
unregister_memory_block_under_nodes(mem);
- unregister_memory(mem);
+ remove_memory_block(mem);
}
}
@@ -829,7 +926,7 @@ void __init memory_dev_init(void)
*/
for (nr = 0; nr <= __highest_present_section_nr;
nr += sections_per_block) {
- ret = add_memory_block(nr);
+ ret = add_boot_memory_block(nr);
if (ret)
panic("%s() failed to add memory block: %d\n", __func__,
ret);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 87acc47e8951..ec8bb24a5a22 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -796,15 +796,12 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
static void do_register_memory_block_under_node(int nid,
- struct memory_block *mem_blk)
+ struct memory_block *mem_blk,
+ enum meminit_context context)
{
int ret;
- /*
- * If this memory block spans multiple nodes, we only indicate
- * the last processed node.
- */
- mem_blk->nid = nid;
+ memory_block_add_nid(mem_blk, nid, context);
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
@@ -857,7 +854,7 @@ static int register_mem_block_under_node_early(struct memory_block *mem_blk,
if (page_nid != nid)
continue;
- do_register_memory_block_under_node(nid, mem_blk);
+ do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
return 0;
}
/* mem section does not span the specified node */
@@ -873,7 +870,7 @@ static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
{
int nid = *(int *)arg;
- do_register_memory_block_under_node(nid, mem_blk);
+ do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
return 0;
}
@@ -892,8 +889,9 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}
-void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
- enum meminit_context context)
+void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context)
{
walk_memory_blocks_func_t func;
@@ -1065,26 +1063,30 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
};
#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
-static int __init register_node_type(void)
+void __init node_dev_init(void)
{
- int ret;
+ static struct notifier_block node_memory_callback_nb = {
+ .notifier_call = node_memory_callback,
+ .priority = NODE_CALLBACK_PRI,
+ };
+ int ret, i;
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
- if (!ret) {
- static struct notifier_block node_memory_callback_nb = {
- .notifier_call = node_memory_callback,
- .priority = NODE_CALLBACK_PRI,
- };
- register_hotmemory_notifier(&node_memory_callback_nb);
- }
+ if (ret)
+ panic("%s() failed to register subsystem: %d\n", __func__, ret);
+
+ register_hotmemory_notifier(&node_memory_callback_nb);
/*
- * Note: we're not going to unregister the node class if we fail
- * to register the node state class attribute files.
+ * Create all node devices, which will properly link the node
+ * to applicable memory block devices and already created cpu devices.
*/
- return ret;
+ for_each_online_node(i) {
+ ret = register_one_node(i);
+ if (ret)
+ panic("%s() failed to add node: %d\n", __func__, ret);
+ }
}
-postcore_initcall(register_node_type);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 5db704f02e71..1ee878d126fd 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -636,6 +636,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
+ /*
+ * The children must be in their deepest (powered-off) states to allow
+ * the parent to be powered off. Note that, there's no need for
+ * additional locking, as powering on a child, requires the parent's
+ * lock to be acquired first.
+ */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *child = link->child;
+ if (child->state_idx < child->state_count - 1)
+ return -EBUSY;
+ }
+
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
enum pm_qos_flags_status stat;
@@ -1073,6 +1085,13 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
|| atomic_read(&genpd->sd_count) > 0)
return;
+ /* Check that the children are in their deepest (powered-off) state. */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *child = link->child;
+ if (child->state_idx < child->state_count - 1)
+ return;
+ }
+
/* Choose the deepest state when suspending */
genpd->state_idx = genpd->state_count - 1;
if (_genpd_power_off(genpd, false))
@@ -2058,9 +2077,9 @@ static int genpd_remove(struct generic_pm_domain *genpd)
kfree(link);
}
- genpd_debug_remove(genpd);
list_del(&genpd->gpd_list_node);
genpd_unlock(genpd);
+ genpd_debug_remove(genpd);
cancel_work_sync(&genpd->power_off_work);
if (genpd_is_cpu_domain(genpd))
free_cpumask_var(genpd->cpus);
@@ -2248,12 +2267,8 @@ int of_genpd_add_provider_simple(struct device_node *np,
/* Parse genpd OPP table */
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table(&genpd->dev);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
/*
* Save table for faster processing while setting performance
@@ -2312,9 +2327,8 @@ int of_genpd_add_provider_onecell(struct device_node *np,
if (genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
- i, ret);
+ dev_err_probe(&genpd->dev, ret,
+ "Failed to add OPP table for index %d\n", i);
goto error;
}
@@ -2672,12 +2686,8 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
ret = genpd_add_device(pd, dev, base_dev);
mutex_unlock(&gpd_list_lock);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to add to PM domain %s: %d",
- pd->name, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 04ea92cbd9cf..c50139207794 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -485,7 +485,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -1568,7 +1568,7 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
trace_device_pm_callback_start(dev, info, state.event);
error = cb(dev, state);
trace_device_pm_callback_end(dev, error);
- suspend_report_result(cb, error);
+ suspend_report_result(dev, cb, error);
initcall_debug_report(dev, calltime, cb, error);
@@ -1855,7 +1855,7 @@ unlock:
device_unlock(dev);
if (ret < 0) {
- suspend_report_result(callback, ret);
+ suspend_report_result(dev, callback, ret);
pm_runtime_put(dev);
return ret;
}
@@ -1960,10 +1960,10 @@ int dpm_suspend_start(pm_message_t state)
}
EXPORT_SYMBOL_GPL(dpm_suspend_start);
-void __suspend_report_result(const char *function, void *fn, int ret)
+void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
- pr_err("%s(): %pS returns %d\n", function, fn, ret);
+ dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
@@ -2018,7 +2018,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
void device_pm_check_callbacks(struct device *dev)
{
- spin_lock_irq(&dev->power.lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
dev->power.no_pm_callbacks =
(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
!dev->bus->suspend && !dev->bus->resume)) &&
@@ -2027,7 +2029,7 @@ void device_pm_check_callbacks(struct device *dev)
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
!dev->driver->suspend && !dev->driver->resume));
- spin_unlock_irq(&dev->power.lock);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
}
bool dev_pm_skip_suspend(struct device *dev)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2f3cce17219b..d4059e6ffeae 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1476,11 +1476,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_enable);
static void pm_runtime_disable_action(void *data)
{
+ pm_runtime_dont_use_autosuspend(data);
pm_runtime_disable(data);
}
/**
* devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
+ *
+ * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
+ * you at driver exit time if needed.
+ *
* @dev: Device to handle.
*/
int devm_pm_runtime_enable(struct device *dev)
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 0004db4a9d3b..d487a6bac630 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -289,7 +289,7 @@ EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
*
* Enables wakeirq conditionally. We need to enable wake-up interrupt
* lazily on the first rpm_suspend(). This is needed as the consumer device
- * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
+ * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
* otherwise try to disable already disabled wakeirq. The wake-up interrupt
* starts disabled with IRQ_NOAUTOEN set.
*
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 8666590201c9..a57d469676ca 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -587,7 +587,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
* @ws: Wakeup source to handle.
*
* Update the @ws' statistics and, if @ws has just been activated, notify the PM
- * core of the event by incrementing the counter of of wakeup events being
+ * core of the event by incrementing the counter of the wakeup events being
* processed.
*/
static void wakeup_source_activate(struct wakeup_source *ws)
@@ -733,7 +733,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
/*
* Increment the counter of registered wakeup events and decrement the
- * couter of wakeup events in progress simultaneously.
+ * counter of wakeup events in progress simultaneously.
*/
cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
trace_wakeup_source_deactivate(ws->name, cec);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b1905916f7af..b4df36c7b17d 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -31,6 +31,7 @@ struct regmap_format {
size_t buf_size;
size_t reg_bytes;
size_t pad_bytes;
+ size_t reg_downshift;
size_t val_bytes;
void (*format_write)(struct regmap *map,
unsigned int reg, unsigned int val);
@@ -62,6 +63,7 @@ struct regmap {
regmap_unlock unlock;
void *lock_arg; /* This is passed to lock/unlock functions */
gfp_t alloc_flags;
+ unsigned int reg_base;
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4a446259a184..400c7412a7dc 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -535,7 +535,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
/*
* Ignore masked IRQs and ack if we need to; we ack early so
- * there is no race between handling and acknowleding the
+ * there is no race between handling and acknowledging the
* interrupt. We assume that typically few of the interrupts
* will fire simultaneously so don't worry about overhead from
* doing a write per register.
@@ -1045,7 +1045,7 @@ int devm_regmap_add_irq_chip_fwnode(struct device *dev,
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
/**
- * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
+ * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip()
*
* @dev: The device pointer on which irq_chip belongs to.
* @map: The regmap for the device.
@@ -1074,7 +1074,7 @@ EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
/**
* devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
*
- * @dev: Device for which which resource was allocated.
+ * @dev: Device for which the resource was allocated.
* @irq: Primary IRQ for the device.
* @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
*
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 8f9fe5fd4707..5e12f7cb5147 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -821,8 +821,11 @@ struct regmap *__regmap_init(struct device *dev,
else
map->alloc_flags = GFP_KERNEL;
+ map->reg_base = config->reg_base;
+
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
+ map->format.reg_downshift = config->reg_downshift;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
config->val_bits + config->pad_bits, 8);
@@ -1735,6 +1738,8 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->write_flag_mask);
@@ -1905,6 +1910,8 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_write(map, reg, val);
trace_regmap_hw_write_start(map, reg, 1);
@@ -2346,6 +2353,8 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
unsigned int reg = regs[i].reg;
unsigned int val = regs[i].def;
trace_regmap_hw_write_start(map, reg, 1);
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(u8, reg, map->reg_shift);
u8 += reg_bytes + pad_bytes;
map->format.format_val(u8, val, 0);
@@ -2673,6 +2682,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
return ret;
}
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
map->format.format_reg(map->work_buf, reg, map->reg_shift);
regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
map->read_flag_mask);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index fc24e89f9592..e9d1efcda89b 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -14,11 +14,11 @@
#include <linux/hardirq.h>
#include <linux/topology.h>
-#define define_id_show_func(name) \
+#define define_id_show_func(name, fmt) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- return sysfs_emit(buf, "%d\n", topology_##name(dev->id)); \
+ return sysfs_emit(buf, fmt "\n", topology_##name(dev->id)); \
}
#define define_siblings_read_func(name, mask) \
@@ -42,22 +42,25 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
off, count); \
}
-define_id_show_func(physical_package_id);
+define_id_show_func(physical_package_id, "%d");
static DEVICE_ATTR_RO(physical_package_id);
#ifdef TOPOLOGY_DIE_SYSFS
-define_id_show_func(die_id);
+define_id_show_func(die_id, "%d");
static DEVICE_ATTR_RO(die_id);
#endif
#ifdef TOPOLOGY_CLUSTER_SYSFS
-define_id_show_func(cluster_id);
+define_id_show_func(cluster_id, "%d");
static DEVICE_ATTR_RO(cluster_id);
#endif
-define_id_show_func(core_id);
+define_id_show_func(core_id, "%d");
static DEVICE_ATTR_RO(core_id);
+define_id_show_func(ppin, "0x%llx");
+static DEVICE_ATTR_ADMIN_RO(ppin);
+
define_siblings_read_func(thread_siblings, sibling_cpumask);
static BIN_ATTR_RO(thread_siblings, 0);
static BIN_ATTR_RO(thread_siblings_list, 0);
@@ -87,7 +90,7 @@ static BIN_ATTR_RO(package_cpus, 0);
static BIN_ATTR_RO(package_cpus_list, 0);
#ifdef TOPOLOGY_BOOK_SYSFS
-define_id_show_func(book_id);
+define_id_show_func(book_id, "%d");
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
static BIN_ATTR_RO(book_siblings, 0);
@@ -95,7 +98,7 @@ static BIN_ATTR_RO(book_siblings_list, 0);
#endif
#ifdef TOPOLOGY_DRAWER_SYSFS
-define_id_show_func(drawer_id);
+define_id_show_func(drawer_id, "%d");
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
static BIN_ATTR_RO(drawer_siblings, 0);
@@ -145,6 +148,7 @@ static struct attribute *default_attrs[] = {
#ifdef TOPOLOGY_DRAWER_SYSFS
&dev_attr_drawer_id.attr,
#endif
+ &dev_attr_ppin.attr,
NULL
};