1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Management Component Transport Protocol (MCTP) - device implementation.
4  *
5  * Copyright (c) 2021 Code Construct
6  * Copyright (c) 2021 Google
7  */
8 
9 #include <linux/if_link.h>
10 #include <linux/mctp.h>
11 #include <linux/netdevice.h>
12 #include <linux/rcupdate.h>
13 #include <linux/rtnetlink.h>
14 
15 #include <net/addrconf.h>
16 #include <net/netlink.h>
17 #include <net/mctp.h>
18 #include <net/mctpdevice.h>
19 #include <net/sock.h>
20 
21 struct mctp_dump_cb {
22 	int h;
23 	int idx;
24 	size_t a_idx;
25 };
26 
27 /* unlocked: caller must hold rcu_read_lock */
__mctp_dev_get(const struct net_device * dev)28 struct mctp_dev *__mctp_dev_get(const struct net_device *dev)
29 {
30 	return rcu_dereference(dev->mctp_ptr);
31 }
32 
mctp_dev_get_rtnl(const struct net_device * dev)33 struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev)
34 {
35 	return rtnl_dereference(dev->mctp_ptr);
36 }
37 
mctp_fill_addrinfo(struct sk_buff * skb,struct netlink_callback * cb,struct mctp_dev * mdev,mctp_eid_t eid)38 static int mctp_fill_addrinfo(struct sk_buff *skb, struct netlink_callback *cb,
39 			      struct mctp_dev *mdev, mctp_eid_t eid)
40 {
41 	struct ifaddrmsg *hdr;
42 	struct nlmsghdr *nlh;
43 
44 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
45 			RTM_NEWADDR, sizeof(*hdr), NLM_F_MULTI);
46 	if (!nlh)
47 		return -EMSGSIZE;
48 
49 	hdr = nlmsg_data(nlh);
50 	hdr->ifa_family = AF_MCTP;
51 	hdr->ifa_prefixlen = 0;
52 	hdr->ifa_flags = 0;
53 	hdr->ifa_scope = 0;
54 	hdr->ifa_index = mdev->dev->ifindex;
55 
56 	if (nla_put_u8(skb, IFA_LOCAL, eid))
57 		goto cancel;
58 
59 	if (nla_put_u8(skb, IFA_ADDRESS, eid))
60 		goto cancel;
61 
62 	nlmsg_end(skb, nlh);
63 
64 	return 0;
65 
66 cancel:
67 	nlmsg_cancel(skb, nlh);
68 	return -EMSGSIZE;
69 }
70 
mctp_dump_dev_addrinfo(struct mctp_dev * mdev,struct sk_buff * skb,struct netlink_callback * cb)71 static int mctp_dump_dev_addrinfo(struct mctp_dev *mdev, struct sk_buff *skb,
72 				  struct netlink_callback *cb)
73 {
74 	struct mctp_dump_cb *mcb = (void *)cb->ctx;
75 	int rc = 0;
76 
77 	for (; mcb->a_idx < mdev->num_addrs; mcb->a_idx++) {
78 		rc = mctp_fill_addrinfo(skb, cb, mdev, mdev->addrs[mcb->a_idx]);
79 		if (rc < 0)
80 			break;
81 	}
82 
83 	return rc;
84 }
85 
mctp_dump_addrinfo(struct sk_buff * skb,struct netlink_callback * cb)86 static int mctp_dump_addrinfo(struct sk_buff *skb, struct netlink_callback *cb)
87 {
88 	struct mctp_dump_cb *mcb = (void *)cb->ctx;
89 	struct net *net = sock_net(skb->sk);
90 	struct hlist_head *head;
91 	struct net_device *dev;
92 	struct ifaddrmsg *hdr;
93 	struct mctp_dev *mdev;
94 	int ifindex;
95 	int idx, rc;
96 
97 	hdr = nlmsg_data(cb->nlh);
98 	// filter by ifindex if requested
99 	ifindex = hdr->ifa_index;
100 
101 	rcu_read_lock();
102 	for (; mcb->h < NETDEV_HASHENTRIES; mcb->h++, mcb->idx = 0) {
103 		idx = 0;
104 		head = &net->dev_index_head[mcb->h];
105 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
106 			if (idx >= mcb->idx &&
107 			    (ifindex == 0 || ifindex == dev->ifindex)) {
108 				mdev = __mctp_dev_get(dev);
109 				if (mdev) {
110 					rc = mctp_dump_dev_addrinfo(mdev,
111 								    skb, cb);
112 					// Error indicates full buffer, this
113 					// callback will get retried.
114 					if (rc < 0)
115 						goto out;
116 				}
117 			}
118 			idx++;
119 			// reset for next iteration
120 			mcb->a_idx = 0;
121 		}
122 	}
123 out:
124 	rcu_read_unlock();
125 	mcb->idx = idx;
126 
127 	return skb->len;
128 }
129 
130 static const struct nla_policy ifa_mctp_policy[IFA_MAX + 1] = {
131 	[IFA_ADDRESS]		= { .type = NLA_U8 },
132 	[IFA_LOCAL]		= { .type = NLA_U8 },
133 };
134 
mctp_rtm_newaddr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)135 static int mctp_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
136 			    struct netlink_ext_ack *extack)
137 {
138 	struct net *net = sock_net(skb->sk);
139 	struct nlattr *tb[IFA_MAX + 1];
140 	struct net_device *dev;
141 	struct mctp_addr *addr;
142 	struct mctp_dev *mdev;
143 	struct ifaddrmsg *ifm;
144 	unsigned long flags;
145 	u8 *tmp_addrs;
146 	int rc;
147 
148 	rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_mctp_policy,
149 			 extack);
150 	if (rc < 0)
151 		return rc;
152 
153 	ifm = nlmsg_data(nlh);
154 
155 	if (tb[IFA_LOCAL])
156 		addr = nla_data(tb[IFA_LOCAL]);
157 	else if (tb[IFA_ADDRESS])
158 		addr = nla_data(tb[IFA_ADDRESS]);
159 	else
160 		return -EINVAL;
161 
162 	/* find device */
163 	dev = __dev_get_by_index(net, ifm->ifa_index);
164 	if (!dev)
165 		return -ENODEV;
166 
167 	mdev = mctp_dev_get_rtnl(dev);
168 	if (!mdev)
169 		return -ENODEV;
170 
171 	if (!mctp_address_ok(addr->s_addr))
172 		return -EINVAL;
173 
174 	/* Prevent duplicates. Under RTNL so don't need to lock for reading */
175 	if (memchr(mdev->addrs, addr->s_addr, mdev->num_addrs))
176 		return -EEXIST;
177 
178 	tmp_addrs = kmalloc(mdev->num_addrs + 1, GFP_KERNEL);
179 	if (!tmp_addrs)
180 		return -ENOMEM;
181 	memcpy(tmp_addrs, mdev->addrs, mdev->num_addrs);
182 	tmp_addrs[mdev->num_addrs] = addr->s_addr;
183 
184 	/* Lock to write */
185 	spin_lock_irqsave(&mdev->addrs_lock, flags);
186 	mdev->num_addrs++;
187 	swap(mdev->addrs, tmp_addrs);
188 	spin_unlock_irqrestore(&mdev->addrs_lock, flags);
189 
190 	kfree(tmp_addrs);
191 
192 	mctp_route_add_local(mdev, addr->s_addr);
193 
194 	return 0;
195 }
196 
mctp_rtm_deladdr(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)197 static int mctp_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
198 			    struct netlink_ext_ack *extack)
199 {
200 	struct net *net = sock_net(skb->sk);
201 	struct nlattr *tb[IFA_MAX + 1];
202 	struct net_device *dev;
203 	struct mctp_addr *addr;
204 	struct mctp_dev *mdev;
205 	struct ifaddrmsg *ifm;
206 	unsigned long flags;
207 	u8 *pos;
208 	int rc;
209 
210 	rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_mctp_policy,
211 			 extack);
212 	if (rc < 0)
213 		return rc;
214 
215 	ifm = nlmsg_data(nlh);
216 
217 	if (tb[IFA_LOCAL])
218 		addr = nla_data(tb[IFA_LOCAL]);
219 	else if (tb[IFA_ADDRESS])
220 		addr = nla_data(tb[IFA_ADDRESS]);
221 	else
222 		return -EINVAL;
223 
224 	/* find device */
225 	dev = __dev_get_by_index(net, ifm->ifa_index);
226 	if (!dev)
227 		return -ENODEV;
228 
229 	mdev = mctp_dev_get_rtnl(dev);
230 	if (!mdev)
231 		return -ENODEV;
232 
233 	pos = memchr(mdev->addrs, addr->s_addr, mdev->num_addrs);
234 	if (!pos)
235 		return -ENOENT;
236 
237 	rc = mctp_route_remove_local(mdev, addr->s_addr);
238 	// we can ignore -ENOENT in the case a route was already removed
239 	if (rc < 0 && rc != -ENOENT)
240 		return rc;
241 
242 	spin_lock_irqsave(&mdev->addrs_lock, flags);
243 	memmove(pos, pos + 1, mdev->num_addrs - 1 - (pos - mdev->addrs));
244 	mdev->num_addrs--;
245 	spin_unlock_irqrestore(&mdev->addrs_lock, flags);
246 
247 	return 0;
248 }
249 
mctp_dev_hold(struct mctp_dev * mdev)250 void mctp_dev_hold(struct mctp_dev *mdev)
251 {
252 	refcount_inc(&mdev->refs);
253 }
254 
mctp_dev_put(struct mctp_dev * mdev)255 void mctp_dev_put(struct mctp_dev *mdev)
256 {
257 	if (refcount_dec_and_test(&mdev->refs)) {
258 		dev_put(mdev->dev);
259 		kfree_rcu(mdev, rcu);
260 	}
261 }
262 
mctp_dev_release_key(struct mctp_dev * dev,struct mctp_sk_key * key)263 void mctp_dev_release_key(struct mctp_dev *dev, struct mctp_sk_key *key)
264 	__must_hold(&key->lock)
265 {
266 	if (!dev)
267 		return;
268 	if (dev->ops && dev->ops->release_flow)
269 		dev->ops->release_flow(dev, key);
270 	key->dev = NULL;
271 	mctp_dev_put(dev);
272 }
273 
mctp_dev_set_key(struct mctp_dev * dev,struct mctp_sk_key * key)274 void mctp_dev_set_key(struct mctp_dev *dev, struct mctp_sk_key *key)
275 	__must_hold(&key->lock)
276 {
277 	mctp_dev_hold(dev);
278 	key->dev = dev;
279 }
280 
mctp_add_dev(struct net_device * dev)281 static struct mctp_dev *mctp_add_dev(struct net_device *dev)
282 {
283 	struct mctp_dev *mdev;
284 
285 	ASSERT_RTNL();
286 
287 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
288 	if (!mdev)
289 		return ERR_PTR(-ENOMEM);
290 
291 	spin_lock_init(&mdev->addrs_lock);
292 
293 	mdev->net = mctp_default_net(dev_net(dev));
294 
295 	/* associate to net_device */
296 	refcount_set(&mdev->refs, 1);
297 	rcu_assign_pointer(dev->mctp_ptr, mdev);
298 
299 	dev_hold(dev);
300 	mdev->dev = dev;
301 
302 	return mdev;
303 }
304 
mctp_fill_link_af(struct sk_buff * skb,const struct net_device * dev,u32 ext_filter_mask)305 static int mctp_fill_link_af(struct sk_buff *skb,
306 			     const struct net_device *dev, u32 ext_filter_mask)
307 {
308 	struct mctp_dev *mdev;
309 
310 	mdev = mctp_dev_get_rtnl(dev);
311 	if (!mdev)
312 		return -ENODATA;
313 	if (nla_put_u32(skb, IFLA_MCTP_NET, mdev->net))
314 		return -EMSGSIZE;
315 	return 0;
316 }
317 
mctp_get_link_af_size(const struct net_device * dev,u32 ext_filter_mask)318 static size_t mctp_get_link_af_size(const struct net_device *dev,
319 				    u32 ext_filter_mask)
320 {
321 	struct mctp_dev *mdev;
322 	unsigned int ret;
323 
324 	/* caller holds RCU */
325 	mdev = __mctp_dev_get(dev);
326 	if (!mdev)
327 		return 0;
328 	ret = nla_total_size(4); /* IFLA_MCTP_NET */
329 	return ret;
330 }
331 
332 static const struct nla_policy ifla_af_mctp_policy[IFLA_MCTP_MAX + 1] = {
333 	[IFLA_MCTP_NET]		= { .type = NLA_U32 },
334 };
335 
mctp_set_link_af(struct net_device * dev,const struct nlattr * attr,struct netlink_ext_ack * extack)336 static int mctp_set_link_af(struct net_device *dev, const struct nlattr *attr,
337 			    struct netlink_ext_ack *extack)
338 {
339 	struct nlattr *tb[IFLA_MCTP_MAX + 1];
340 	struct mctp_dev *mdev;
341 	int rc;
342 
343 	rc = nla_parse_nested(tb, IFLA_MCTP_MAX, attr, ifla_af_mctp_policy,
344 			      NULL);
345 	if (rc)
346 		return rc;
347 
348 	mdev = mctp_dev_get_rtnl(dev);
349 	if (!mdev)
350 		return 0;
351 
352 	if (tb[IFLA_MCTP_NET])
353 		WRITE_ONCE(mdev->net, nla_get_u32(tb[IFLA_MCTP_NET]));
354 
355 	return 0;
356 }
357 
358 /* Matches netdev types that should have MCTP handling */
mctp_known(struct net_device * dev)359 static bool mctp_known(struct net_device *dev)
360 {
361 	/* only register specific types (inc. NONE for TUN devices) */
362 	return dev->type == ARPHRD_MCTP ||
363 		   dev->type == ARPHRD_LOOPBACK ||
364 		   dev->type == ARPHRD_NONE;
365 }
366 
mctp_unregister(struct net_device * dev)367 static void mctp_unregister(struct net_device *dev)
368 {
369 	struct mctp_dev *mdev;
370 
371 	mdev = mctp_dev_get_rtnl(dev);
372 	if (mctp_known(dev) != (bool)mdev) {
373 		// Sanity check, should match what was set in mctp_register
374 		netdev_warn(dev, "%s: mdev pointer %d but type (%d) match is %d",
375 			    __func__, (bool)mdev, mctp_known(dev), dev->type);
376 		return;
377 	}
378 	if (!mdev)
379 		return;
380 
381 	RCU_INIT_POINTER(mdev->dev->mctp_ptr, NULL);
382 
383 	mctp_route_remove_dev(mdev);
384 	mctp_neigh_remove_dev(mdev);
385 	kfree(mdev->addrs);
386 
387 	mctp_dev_put(mdev);
388 }
389 
mctp_register(struct net_device * dev)390 static int mctp_register(struct net_device *dev)
391 {
392 	struct mctp_dev *mdev;
393 
394 	/* Already registered? */
395 	mdev = rtnl_dereference(dev->mctp_ptr);
396 
397 	if (mdev) {
398 		if (!mctp_known(dev))
399 			netdev_warn(dev, "%s: mctp_dev set for unknown type %d",
400 				    __func__, dev->type);
401 		return 0;
402 	}
403 
404 	/* only register specific types */
405 	if (!mctp_known(dev))
406 		return 0;
407 
408 	mdev = mctp_add_dev(dev);
409 	if (IS_ERR(mdev))
410 		return PTR_ERR(mdev);
411 
412 	return 0;
413 }
414 
mctp_dev_notify(struct notifier_block * this,unsigned long event,void * ptr)415 static int mctp_dev_notify(struct notifier_block *this, unsigned long event,
416 			   void *ptr)
417 {
418 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
419 	int rc;
420 
421 	switch (event) {
422 	case NETDEV_REGISTER:
423 		rc = mctp_register(dev);
424 		if (rc)
425 			return notifier_from_errno(rc);
426 		break;
427 	case NETDEV_UNREGISTER:
428 		mctp_unregister(dev);
429 		break;
430 	}
431 
432 	return NOTIFY_OK;
433 }
434 
mctp_register_netdevice(struct net_device * dev,const struct mctp_netdev_ops * ops)435 static int mctp_register_netdevice(struct net_device *dev,
436 				   const struct mctp_netdev_ops *ops)
437 {
438 	struct mctp_dev *mdev;
439 
440 	mdev = mctp_add_dev(dev);
441 	if (IS_ERR(mdev))
442 		return PTR_ERR(mdev);
443 
444 	mdev->ops = ops;
445 
446 	return register_netdevice(dev);
447 }
448 
mctp_register_netdev(struct net_device * dev,const struct mctp_netdev_ops * ops)449 int mctp_register_netdev(struct net_device *dev,
450 			 const struct mctp_netdev_ops *ops)
451 {
452 	int rc;
453 
454 	rtnl_lock();
455 	rc = mctp_register_netdevice(dev, ops);
456 	rtnl_unlock();
457 
458 	return rc;
459 }
460 EXPORT_SYMBOL_GPL(mctp_register_netdev);
461 
mctp_unregister_netdev(struct net_device * dev)462 void mctp_unregister_netdev(struct net_device *dev)
463 {
464 	unregister_netdev(dev);
465 }
466 EXPORT_SYMBOL_GPL(mctp_unregister_netdev);
467 
468 static struct rtnl_af_ops mctp_af_ops = {
469 	.family = AF_MCTP,
470 	.fill_link_af = mctp_fill_link_af,
471 	.get_link_af_size = mctp_get_link_af_size,
472 	.set_link_af = mctp_set_link_af,
473 };
474 
475 static struct notifier_block mctp_dev_nb = {
476 	.notifier_call = mctp_dev_notify,
477 	.priority = ADDRCONF_NOTIFY_PRIORITY,
478 };
479 
mctp_device_init(void)480 void __init mctp_device_init(void)
481 {
482 	register_netdevice_notifier(&mctp_dev_nb);
483 
484 	rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_GETADDR,
485 			     NULL, mctp_dump_addrinfo, 0);
486 	rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_NEWADDR,
487 			     mctp_rtm_newaddr, NULL, 0);
488 	rtnl_register_module(THIS_MODULE, PF_MCTP, RTM_DELADDR,
489 			     mctp_rtm_deladdr, NULL, 0);
490 	rtnl_af_register(&mctp_af_ops);
491 }
492 
mctp_device_exit(void)493 void __exit mctp_device_exit(void)
494 {
495 	rtnl_af_unregister(&mctp_af_ops);
496 	rtnl_unregister(PF_MCTP, RTM_DELADDR);
497 	rtnl_unregister(PF_MCTP, RTM_NEWADDR);
498 	rtnl_unregister(PF_MCTP, RTM_GETADDR);
499 
500 	unregister_netdevice_notifier(&mctp_dev_nb);
501 }
502