summaryrefslogtreecommitdiffstats
path: root/drivers/of/of_reserved_mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/of/of_reserved_mem.c')
-rw-r--r--drivers/of/of_reserved_mem.c242
1 files changed, 184 insertions, 58 deletions
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 46e1c3fbc769..75e819f66a56 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -27,8 +27,9 @@
#include "of_private.h"
-#define MAX_RESERVED_REGIONS 64
-static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
+static struct reserved_mem reserved_mem_array[MAX_RESERVED_REGIONS] __initdata;
+static struct reserved_mem *reserved_mem __refdata = reserved_mem_array;
+static int total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
static int reserved_mem_count;
static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
@@ -51,12 +52,58 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
memblock_phys_free(base, size);
}
- kmemleak_ignore_phys(base);
+ if (!err)
+ kmemleak_ignore_phys(base);
return err;
}
/*
+ * alloc_reserved_mem_array() - allocate memory for the reserved_mem
+ * array using memblock
+ *
+ * This function is used to allocate memory for the reserved_mem
+ * array according to the total number of reserved memory regions
+ * defined in the DT.
+ * After the new array is allocated, the information stored in
+ * the initial static array is copied over to this new array and
+ * the new array is used from this point on.
+ */
+static void __init alloc_reserved_mem_array(void)
+{
+ struct reserved_mem *new_array;
+ size_t alloc_size, copy_size, memset_size;
+
+ alloc_size = array_size(total_reserved_mem_cnt, sizeof(*new_array));
+ if (alloc_size == SIZE_MAX) {
+ pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
+ return;
+ }
+
+ new_array = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
+ if (!new_array) {
+ pr_err("Failed to allocate memory for reserved_mem array with err: %d", -ENOMEM);
+ return;
+ }
+
+ copy_size = array_size(reserved_mem_count, sizeof(*new_array));
+ if (copy_size == SIZE_MAX) {
+ memblock_free(new_array, alloc_size);
+ total_reserved_mem_cnt = MAX_RESERVED_REGIONS;
+ pr_err("Failed to allocate memory for reserved_mem array with err: %d", -EOVERFLOW);
+ return;
+ }
+
+ memset_size = alloc_size - copy_size;
+
+ memcpy(new_array, reserved_mem, copy_size);
+ memset(new_array + reserved_mem_count, 0, memset_size);
+
+ reserved_mem = new_array;
+}
+
+static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem);
+/*
* fdt_reserved_mem_save_node() - save fdt node for second pass initialization
*/
static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
@@ -64,7 +111,7 @@ static void __init fdt_reserved_mem_save_node(unsigned long node, const char *un
{
struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
- if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
+ if (reserved_mem_count == total_reserved_mem_cnt) {
pr_err("not enough space for all defined regions.\n");
return;
}
@@ -74,6 +121,9 @@ static void __init fdt_reserved_mem_save_node(unsigned long node, const char *un
rmem->base = base;
rmem->size = size;
+ /* Call the region specific initialization function */
+ fdt_init_reserved_mem_node(rmem);
+
reserved_mem_count++;
return;
}
@@ -106,7 +156,6 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
phys_addr_t base, size;
int len;
const __be32 *prop;
- int first = 1;
bool nomap;
prop = of_get_flat_dt_prop(node, "reg", &len);
@@ -134,10 +183,6 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
uname, &base, (unsigned long)(size / SZ_1M));
len -= t_len;
- if (first) {
- fdt_reserved_mem_save_node(node, uname, base, size);
- first = 0;
- }
}
return 0;
}
@@ -165,12 +210,85 @@ static int __init __reserved_mem_check_root(unsigned long node)
return 0;
}
+static void __init __rmem_check_for_overlap(void);
+
+/**
+ * fdt_scan_reserved_mem_reg_nodes() - Store info for the "reg" defined
+ * reserved memory regions.
+ *
+ * This function is used to scan through the DT and store the
+ * information for the reserved memory regions that are defined using
+ * the "reg" property. The region node number, name, base address, and
+ * size are all stored in the reserved_mem array by calling the
+ * fdt_reserved_mem_save_node() function.
+ */
+void __init fdt_scan_reserved_mem_reg_nodes(void)
+{
+ int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
+ const void *fdt = initial_boot_params;
+ phys_addr_t base, size;
+ const __be32 *prop;
+ int node, child;
+ int len;
+
+ if (!fdt)
+ return;
+
+ node = fdt_path_offset(fdt, "/reserved-memory");
+ if (node < 0) {
+ pr_info("Reserved memory: No reserved-memory node in the DT\n");
+ return;
+ }
+
+ /* Attempt dynamic allocation of a new reserved_mem array */
+ alloc_reserved_mem_array();
+
+ if (__reserved_mem_check_root(node)) {
+ pr_err("Reserved memory: unsupported node format, ignoring\n");
+ return;
+ }
+
+ fdt_for_each_subnode(child, fdt, node) {
+ const char *uname;
+
+ prop = of_get_flat_dt_prop(child, "reg", &len);
+ if (!prop)
+ continue;
+ if (!of_fdt_device_is_available(fdt, child))
+ continue;
+
+ uname = fdt_get_name(fdt, child, NULL);
+ if (len && len % t_len != 0) {
+ pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
+ uname);
+ continue;
+ }
+
+ if (len > t_len)
+ pr_warn("%s() ignores %d regions in node '%s'\n",
+ __func__, len / t_len - 1, uname);
+
+ base = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ size = dt_mem_next_cell(dt_root_size_cells, &prop);
+
+ if (size)
+ fdt_reserved_mem_save_node(child, uname, base, size);
+ }
+
+ /* check for overlapping reserved regions */
+ __rmem_check_for_overlap();
+}
+
+static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname);
+
/*
* fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
*/
int __init fdt_scan_reserved_mem(void)
{
int node, child;
+ int dynamic_nodes_cnt = 0, count = 0;
+ int dynamic_nodes[MAX_RESERVED_REGIONS];
const void *fdt = initial_boot_params;
node = fdt_path_offset(fdt, "/reserved-memory");
@@ -192,9 +310,31 @@ int __init fdt_scan_reserved_mem(void)
uname = fdt_get_name(fdt, child, NULL);
err = __reserved_mem_reserve_reg(child, uname);
- if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL))
- fdt_reserved_mem_save_node(child, uname, 0, 0);
+ if (!err)
+ count++;
+ /*
+ * Save the nodes for the dynamically-placed regions
+ * into an array which will be used for allocation right
+ * after all the statically-placed regions are reserved
+ * or marked as no-map. This is done to avoid dynamically
+ * allocating from one of the statically-placed regions.
+ */
+ if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) {
+ dynamic_nodes[dynamic_nodes_cnt] = child;
+ dynamic_nodes_cnt++;
+ }
+ }
+ for (int i = 0; i < dynamic_nodes_cnt; i++) {
+ const char *uname;
+ int err;
+
+ child = dynamic_nodes[i];
+ uname = fdt_get_name(fdt, child, NULL);
+ err = __reserved_mem_alloc_size(child, uname);
+ if (!err)
+ count++;
}
+ total_reserved_mem_cnt = count;
return 0;
}
@@ -253,8 +393,7 @@ static int __init __reserved_mem_alloc_in_range(phys_addr_t size,
* __reserved_mem_alloc_size() - allocate reserved memory described by
* 'size', 'alignment' and 'alloc-ranges' properties.
*/
-static int __init __reserved_mem_alloc_size(unsigned long node,
- const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
+static int __init __reserved_mem_alloc_size(unsigned long node, const char *uname)
{
int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
phys_addr_t start = 0, end = 0;
@@ -276,12 +415,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
prop = of_get_flat_dt_prop(node, "alignment", &len);
if (prop) {
- if (len != dt_root_addr_cells * sizeof(__be32)) {
+ if (len != dt_root_size_cells * sizeof(__be32)) {
pr_err("invalid alignment property in '%s' node.\n",
uname);
return -EINVAL;
}
- align = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ align = dt_mem_next_cell(dt_root_size_cells, &prop);
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
@@ -302,13 +441,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
return -EINVAL;
}
- base = 0;
-
while (len > 0) {
start = dt_mem_next_cell(dt_root_addr_cells, &prop);
end = start + dt_mem_next_cell(dt_root_size_cells,
&prop);
+ base = 0;
ret = __reserved_mem_alloc_in_range(size, align,
start, end, nomap, &base);
if (ret == 0) {
@@ -334,9 +472,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
return -ENOMEM;
}
- *res_base = base;
- *res_size = size;
-
+ /* Save region in the reserved_mem array */
+ fdt_reserved_mem_save_node(node, uname, base, size);
return 0;
}
@@ -425,48 +562,37 @@ static void __init __rmem_check_for_overlap(void)
}
/**
- * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions
+ * fdt_init_reserved_mem_node() - Initialize a reserved memory region
+ * @rmem: reserved_mem struct of the memory region to be initialized.
+ *
+ * This function is used to call the region specific initialization
+ * function for a reserved memory region.
*/
-void __init fdt_init_reserved_mem(void)
+static void __init fdt_init_reserved_mem_node(struct reserved_mem *rmem)
{
- int i;
-
- /* check for overlapping reserved regions */
- __rmem_check_for_overlap();
-
- for (i = 0; i < reserved_mem_count; i++) {
- struct reserved_mem *rmem = &reserved_mem[i];
- unsigned long node = rmem->fdt_node;
- int err = 0;
- bool nomap;
+ unsigned long node = rmem->fdt_node;
+ int err = 0;
+ bool nomap;
- nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
- if (rmem->size == 0)
- err = __reserved_mem_alloc_size(node, rmem->name,
- &rmem->base, &rmem->size);
- if (err == 0) {
- err = __reserved_mem_init_node(rmem);
- if (err != 0 && err != -ENOENT) {
- pr_info("node %s compatible matching fail\n",
- rmem->name);
- if (nomap)
- memblock_clear_nomap(rmem->base, rmem->size);
- else
- memblock_phys_free(rmem->base,
- rmem->size);
- } else {
- phys_addr_t end = rmem->base + rmem->size - 1;
- bool reusable =
- (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
-
- pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
- &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
- nomap ? "nomap" : "map",
- reusable ? "reusable" : "non-reusable",
- rmem->name ? rmem->name : "unknown");
- }
- }
+ err = __reserved_mem_init_node(rmem);
+ if (err != 0 && err != -ENOENT) {
+ pr_info("node %s compatible matching fail\n", rmem->name);
+ if (nomap)
+ memblock_clear_nomap(rmem->base, rmem->size);
+ else
+ memblock_phys_free(rmem->base, rmem->size);
+ } else {
+ phys_addr_t end = rmem->base + rmem->size - 1;
+ bool reusable =
+ (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
+
+ pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
+ &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
+ nomap ? "nomap" : "map",
+ reusable ? "reusable" : "non-reusable",
+ rmem->name ? rmem->name : "unknown");
}
}