summaryrefslogtreecommitdiffstats
path: root/drivers/dax/hmem/hmem.c
blob: e5fe8b39fb94beea985cfb0702ec3a6405977385 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
// SPDX-License-Identifier: GPL-2.0
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/module.h>
#include <linux/pfn_t.h>
#include <linux/dax.h>
#include "../bus.h"

static bool region_idle;
module_param_named(region_idle, region_idle, bool, 0644);

static int dax_hmem_probe(struct platform_device *pdev)
{
	unsigned long flags = IORESOURCE_DAX_KMEM;
	struct device *dev = &pdev->dev;
	struct dax_region *dax_region;
	struct memregion_info *mri;
	struct dev_dax_data data;
	struct dev_dax *dev_dax;

	/*
	 * @region_idle == true indicates that an administrative agent
	 * wants to manipulate the range partitioning before the devices
	 * are created, so do not send them to the dax_kmem driver by
	 * default.
	 */
	if (region_idle)
		flags = 0;

	mri = dev->platform_data;
	dax_region = alloc_dax_region(dev, pdev->id, &mri->range,
				      mri->target_node, PMD_SIZE, flags);
	if (!dax_region)
		return -ENOMEM;

	data = (struct dev_dax_data) {
		.dax_region = dax_region,
		.id = -1,
		.size = region_idle ? 0 : range_len(&mri->range),
	};
	dev_dax = devm_create_dev_dax(&data);
	if (IS_ERR(dev_dax))
		return PTR_ERR(dev_dax);

	/* child dev_dax instances now own the lifetime of the dax_region */
	dax_region_put(dax_region);
	return 0;
}

static struct platform_driver dax_hmem_driver = {
	.probe = dax_hmem_probe,
	.driver = {
		.name = "hmem",
	},
};

static void release_memregion(void *data)
{
	memregion_free((long) data);
}

static void release_hmem(void *pdev)
{
	platform_device_unregister(pdev);
}

static int hmem_register_device(struct device *host, int target_nid,
				const struct resource *res)
{
	struct platform_device *pdev;
	struct memregion_info info;
	long id;
	int rc;

	if (IS_ENABLED(CONFIG_CXL_REGION) &&
	    region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
			      IORES_DESC_CXL) != REGION_DISJOINT) {
		dev_dbg(host, "deferring range to CXL: %pr\n", res);
		return 0;
	}

	rc = region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
			       IORES_DESC_SOFT_RESERVED);
	if (rc != REGION_INTERSECTS)
		return 0;

	id = memregion_alloc(GFP_KERNEL);
	if (id < 0) {
		dev_err(host, "memregion allocation failure for %pr\n", res);
		return -ENOMEM;
	}
	rc = devm_add_action_or_reset(host, release_memregion, (void *) id);
	if (rc)
		return rc;

	pdev = platform_device_alloc("hmem", id);
	if (!pdev) {
		dev_err(host, "device allocation failure for %pr\n", res);
		return -ENOMEM;
	}

	pdev->dev.numa_node = numa_map_to_online_node(target_nid);
	info = (struct memregion_info) {
		.target_node = target_nid,
		.range = {
			.start = res->start,
			.end = res->end,
		},
	};
	rc = platform_device_add_data(pdev, &info, sizeof(info));
	if (rc < 0) {
		dev_err(host, "memregion_info allocation failure for %pr\n",
		       res);
		goto out_put;
	}

	rc = platform_device_add(pdev);
	if (rc < 0) {
		dev_err(host, "%s add failed for %pr\n", dev_name(&pdev->dev),
			res);
		goto out_put;
	}

	return devm_add_action_or_reset(host, release_hmem, pdev);

out_put:
	platform_device_put(pdev);
	return rc;
}

static int dax_hmem_platform_probe(struct platform_device *pdev)
{
	return walk_hmem_resources(&pdev->dev, hmem_register_device);
}

static struct platform_driver dax_hmem_platform_driver = {
	.probe = dax_hmem_platform_probe,
	.driver = {
		.name = "hmem_platform",
	},
};

static __init int dax_hmem_init(void)
{
	int rc;

	rc = platform_driver_register(&dax_hmem_platform_driver);
	if (rc)
		return rc;

	rc = platform_driver_register(&dax_hmem_driver);
	if (rc)
		platform_driver_unregister(&dax_hmem_platform_driver);

	return rc;
}

static __exit void dax_hmem_exit(void)
{
	platform_driver_unregister(&dax_hmem_driver);
	platform_driver_unregister(&dax_hmem_platform_driver);
}

module_init(dax_hmem_init);
module_exit(dax_hmem_exit);

/* Allow for CXL to define its own dax regions */
#if IS_ENABLED(CONFIG_CXL_REGION)
#if IS_MODULE(CONFIG_CXL_ACPI)
MODULE_SOFTDEP("pre: cxl_acpi");
#endif
#endif

MODULE_ALIAS("platform:hmem*");
MODULE_ALIAS("platform:hmem_platform*");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");