1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 #include <linux/device.h>
4 #include <linux/slab.h>
5 #include <linux/idr.h>
6 #include <cxlmem.h>
7 #include <cxl.h>
8 #include "core.h"
9 
10 /**
11  * DOC: cxl pmem
12  *
13  * The core CXL PMEM infrastructure supports persistent memory
14  * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
15  * 'bridge' device is added at the root of a CXL device topology if
16  * platform firmware advertises at least one persistent memory capable
17  * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
18  * device. Then for each cxl_memdev in the CXL device topology a bridge
19  * device is added to host a LIBNVDIMM dimm object. When these bridges
20  * are registered native LIBNVDIMM uapis are translated to CXL
21  * operations, for example, namespace label access commands.
22  */
23 
24 static DEFINE_IDA(cxl_nvdimm_bridge_ida);
25 
cxl_nvdimm_bridge_release(struct device * dev)26 static void cxl_nvdimm_bridge_release(struct device *dev)
27 {
28 	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
29 
30 	ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id);
31 	kfree(cxl_nvb);
32 }
33 
34 static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
35 	&cxl_base_attribute_group,
36 	NULL,
37 };
38 
39 const struct device_type cxl_nvdimm_bridge_type = {
40 	.name = "cxl_nvdimm_bridge",
41 	.release = cxl_nvdimm_bridge_release,
42 	.groups = cxl_nvdimm_bridge_attribute_groups,
43 };
44 
to_cxl_nvdimm_bridge(struct device * dev)45 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
46 {
47 	if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
48 			  "not a cxl_nvdimm_bridge device\n"))
49 		return NULL;
50 	return container_of(dev, struct cxl_nvdimm_bridge, dev);
51 }
52 EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
53 
match_nvdimm_bridge(struct device * dev,const void * data)54 __mock int match_nvdimm_bridge(struct device *dev, const void *data)
55 {
56 	return dev->type == &cxl_nvdimm_bridge_type;
57 }
58 
cxl_find_nvdimm_bridge(struct cxl_nvdimm * cxl_nvd)59 struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
60 {
61 	struct device *dev;
62 
63 	dev = bus_find_device(&cxl_bus_type, NULL, cxl_nvd, match_nvdimm_bridge);
64 	if (!dev)
65 		return NULL;
66 	return to_cxl_nvdimm_bridge(dev);
67 }
68 EXPORT_SYMBOL_GPL(cxl_find_nvdimm_bridge);
69 
70 static struct cxl_nvdimm_bridge *
cxl_nvdimm_bridge_alloc(struct cxl_port * port)71 cxl_nvdimm_bridge_alloc(struct cxl_port *port)
72 {
73 	struct cxl_nvdimm_bridge *cxl_nvb;
74 	struct device *dev;
75 	int rc;
76 
77 	cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
78 	if (!cxl_nvb)
79 		return ERR_PTR(-ENOMEM);
80 
81 	rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL);
82 	if (rc < 0)
83 		goto err;
84 	cxl_nvb->id = rc;
85 
86 	dev = &cxl_nvb->dev;
87 	cxl_nvb->port = port;
88 	cxl_nvb->state = CXL_NVB_NEW;
89 	device_initialize(dev);
90 	device_set_pm_not_required(dev);
91 	dev->parent = &port->dev;
92 	dev->bus = &cxl_bus_type;
93 	dev->type = &cxl_nvdimm_bridge_type;
94 
95 	return cxl_nvb;
96 
97 err:
98 	kfree(cxl_nvb);
99 	return ERR_PTR(rc);
100 }
101 
unregister_nvb(void * _cxl_nvb)102 static void unregister_nvb(void *_cxl_nvb)
103 {
104 	struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
105 	bool flush;
106 
107 	/*
108 	 * If the bridge was ever activated then there might be in-flight state
109 	 * work to flush. Once the state has been changed to 'dead' then no new
110 	 * work can be queued by user-triggered bind.
111 	 */
112 	device_lock(&cxl_nvb->dev);
113 	flush = cxl_nvb->state != CXL_NVB_NEW;
114 	cxl_nvb->state = CXL_NVB_DEAD;
115 	device_unlock(&cxl_nvb->dev);
116 
117 	/*
118 	 * Even though the device core will trigger device_release_driver()
119 	 * before the unregister, it does not know about the fact that
120 	 * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
121 	 * release not and flush it before tearing down the nvdimm device
122 	 * hierarchy.
123 	 */
124 	device_release_driver(&cxl_nvb->dev);
125 	if (flush)
126 		flush_work(&cxl_nvb->state_work);
127 	device_unregister(&cxl_nvb->dev);
128 }
129 
130 /**
131  * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
132  * @host: platform firmware root device
133  * @port: CXL port at the root of a CXL topology
134  *
135  * Return: bridge device that can host cxl_nvdimm objects
136  */
devm_cxl_add_nvdimm_bridge(struct device * host,struct cxl_port * port)137 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
138 						     struct cxl_port *port)
139 {
140 	struct cxl_nvdimm_bridge *cxl_nvb;
141 	struct device *dev;
142 	int rc;
143 
144 	if (!IS_ENABLED(CONFIG_CXL_PMEM))
145 		return ERR_PTR(-ENXIO);
146 
147 	cxl_nvb = cxl_nvdimm_bridge_alloc(port);
148 	if (IS_ERR(cxl_nvb))
149 		return cxl_nvb;
150 
151 	dev = &cxl_nvb->dev;
152 	rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id);
153 	if (rc)
154 		goto err;
155 
156 	rc = device_add(dev);
157 	if (rc)
158 		goto err;
159 
160 	rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
161 	if (rc)
162 		return ERR_PTR(rc);
163 
164 	return cxl_nvb;
165 
166 err:
167 	put_device(dev);
168 	return ERR_PTR(rc);
169 }
170 EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
171 
cxl_nvdimm_release(struct device * dev)172 static void cxl_nvdimm_release(struct device *dev)
173 {
174 	struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
175 
176 	kfree(cxl_nvd);
177 }
178 
179 static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
180 	&cxl_base_attribute_group,
181 	NULL,
182 };
183 
184 const struct device_type cxl_nvdimm_type = {
185 	.name = "cxl_nvdimm",
186 	.release = cxl_nvdimm_release,
187 	.groups = cxl_nvdimm_attribute_groups,
188 };
189 
is_cxl_nvdimm(struct device * dev)190 bool is_cxl_nvdimm(struct device *dev)
191 {
192 	return dev->type == &cxl_nvdimm_type;
193 }
194 EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
195 
to_cxl_nvdimm(struct device * dev)196 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
197 {
198 	if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
199 			  "not a cxl_nvdimm device\n"))
200 		return NULL;
201 	return container_of(dev, struct cxl_nvdimm, dev);
202 }
203 EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
204 
cxl_nvdimm_alloc(struct cxl_memdev * cxlmd)205 static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
206 {
207 	struct cxl_nvdimm *cxl_nvd;
208 	struct device *dev;
209 
210 	cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
211 	if (!cxl_nvd)
212 		return ERR_PTR(-ENOMEM);
213 
214 	dev = &cxl_nvd->dev;
215 	cxl_nvd->cxlmd = cxlmd;
216 	device_initialize(dev);
217 	device_set_pm_not_required(dev);
218 	dev->parent = &cxlmd->dev;
219 	dev->bus = &cxl_bus_type;
220 	dev->type = &cxl_nvdimm_type;
221 
222 	return cxl_nvd;
223 }
224 
cxl_nvd_unregister(void * dev)225 static void cxl_nvd_unregister(void *dev)
226 {
227 	device_unregister(dev);
228 }
229 
230 /**
231  * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
232  * @host: same host as @cxlmd
233  * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
234  *
235  * Return: 0 on success negative error code on failure.
236  */
devm_cxl_add_nvdimm(struct device * host,struct cxl_memdev * cxlmd)237 int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
238 {
239 	struct cxl_nvdimm *cxl_nvd;
240 	struct device *dev;
241 	int rc;
242 
243 	cxl_nvd = cxl_nvdimm_alloc(cxlmd);
244 	if (IS_ERR(cxl_nvd))
245 		return PTR_ERR(cxl_nvd);
246 
247 	dev = &cxl_nvd->dev;
248 	rc = dev_set_name(dev, "pmem%d", cxlmd->id);
249 	if (rc)
250 		goto err;
251 
252 	rc = device_add(dev);
253 	if (rc)
254 		goto err;
255 
256 	dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
257 		dev_name(dev));
258 
259 	return devm_add_action_or_reset(host, cxl_nvd_unregister, dev);
260 
261 err:
262 	put_device(dev);
263 	return rc;
264 }
265 EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
266