1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/netdevice.h>
3 #include <linux/proc_fs.h>
4 #include <linux/seq_file.h>
5 #include <net/wext.h>
6 
7 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
8 
9 #define get_bucket(x) ((x) >> BUCKET_SPACE)
10 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
11 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
12 
dev_from_same_bucket(struct seq_file * seq,loff_t * pos)13 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
14 {
15 	struct net *net = seq_file_net(seq);
16 	struct net_device *dev;
17 	struct hlist_head *h;
18 	unsigned int count = 0, offset = get_offset(*pos);
19 
20 	h = &net->dev_index_head[get_bucket(*pos)];
21 	hlist_for_each_entry_rcu(dev, h, index_hlist) {
22 		if (++count == offset)
23 			return dev;
24 	}
25 
26 	return NULL;
27 }
28 
dev_from_bucket(struct seq_file * seq,loff_t * pos)29 static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
30 {
31 	struct net_device *dev;
32 	unsigned int bucket;
33 
34 	do {
35 		dev = dev_from_same_bucket(seq, pos);
36 		if (dev)
37 			return dev;
38 
39 		bucket = get_bucket(*pos) + 1;
40 		*pos = set_bucket_offset(bucket, 1);
41 	} while (bucket < NETDEV_HASHENTRIES);
42 
43 	return NULL;
44 }
45 
46 /*
47  *	This is invoked by the /proc filesystem handler to display a device
48  *	in detail.
49  */
dev_seq_start(struct seq_file * seq,loff_t * pos)50 static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
51 	__acquires(RCU)
52 {
53 	rcu_read_lock();
54 	if (!*pos)
55 		return SEQ_START_TOKEN;
56 
57 	if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
58 		return NULL;
59 
60 	return dev_from_bucket(seq, pos);
61 }
62 
dev_seq_next(struct seq_file * seq,void * v,loff_t * pos)63 static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
64 {
65 	++*pos;
66 	return dev_from_bucket(seq, pos);
67 }
68 
dev_seq_stop(struct seq_file * seq,void * v)69 static void dev_seq_stop(struct seq_file *seq, void *v)
70 	__releases(RCU)
71 {
72 	rcu_read_unlock();
73 }
74 
dev_seq_printf_stats(struct seq_file * seq,struct net_device * dev)75 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
76 {
77 	struct rtnl_link_stats64 temp;
78 	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
79 
80 	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
81 		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
82 		   dev->name, stats->rx_bytes, stats->rx_packets,
83 		   stats->rx_errors,
84 		   stats->rx_dropped + stats->rx_missed_errors,
85 		   stats->rx_fifo_errors,
86 		   stats->rx_length_errors + stats->rx_over_errors +
87 		    stats->rx_crc_errors + stats->rx_frame_errors,
88 		   stats->rx_compressed, stats->multicast,
89 		   stats->tx_bytes, stats->tx_packets,
90 		   stats->tx_errors, stats->tx_dropped,
91 		   stats->tx_fifo_errors, stats->collisions,
92 		   stats->tx_carrier_errors +
93 		    stats->tx_aborted_errors +
94 		    stats->tx_window_errors +
95 		    stats->tx_heartbeat_errors,
96 		   stats->tx_compressed);
97 }
98 
99 /*
100  *	Called from the PROCfs module. This now uses the new arbitrary sized
101  *	/proc/net interface to create /proc/net/dev
102  */
dev_seq_show(struct seq_file * seq,void * v)103 static int dev_seq_show(struct seq_file *seq, void *v)
104 {
105 	if (v == SEQ_START_TOKEN)
106 		seq_puts(seq, "Inter-|   Receive                            "
107 			      "                    |  Transmit\n"
108 			      " face |bytes    packets errs drop fifo frame "
109 			      "compressed multicast|bytes    packets errs "
110 			      "drop fifo colls carrier compressed\n");
111 	else
112 		dev_seq_printf_stats(seq, v);
113 	return 0;
114 }
115 
softnet_backlog_len(struct softnet_data * sd)116 static u32 softnet_backlog_len(struct softnet_data *sd)
117 {
118 	return skb_queue_len_lockless(&sd->input_pkt_queue) +
119 	       skb_queue_len_lockless(&sd->process_queue);
120 }
121 
softnet_get_online(loff_t * pos)122 static struct softnet_data *softnet_get_online(loff_t *pos)
123 {
124 	struct softnet_data *sd = NULL;
125 
126 	while (*pos < nr_cpu_ids)
127 		if (cpu_online(*pos)) {
128 			sd = &per_cpu(softnet_data, *pos);
129 			break;
130 		} else
131 			++*pos;
132 	return sd;
133 }
134 
softnet_seq_start(struct seq_file * seq,loff_t * pos)135 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
136 {
137 	return softnet_get_online(pos);
138 }
139 
softnet_seq_next(struct seq_file * seq,void * v,loff_t * pos)140 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
141 {
142 	++*pos;
143 	return softnet_get_online(pos);
144 }
145 
softnet_seq_stop(struct seq_file * seq,void * v)146 static void softnet_seq_stop(struct seq_file *seq, void *v)
147 {
148 }
149 
softnet_seq_show(struct seq_file * seq,void * v)150 static int softnet_seq_show(struct seq_file *seq, void *v)
151 {
152 	struct softnet_data *sd = v;
153 	unsigned int flow_limit_count = 0;
154 
155 #ifdef CONFIG_NET_FLOW_LIMIT
156 	struct sd_flow_limit *fl;
157 
158 	rcu_read_lock();
159 	fl = rcu_dereference(sd->flow_limit);
160 	if (fl)
161 		flow_limit_count = fl->count;
162 	rcu_read_unlock();
163 #endif
164 
165 	/* the index is the CPU id owing this sd. Since offline CPUs are not
166 	 * displayed, it would be othrwise not trivial for the user-space
167 	 * mapping the data a specific CPU
168 	 */
169 	seq_printf(seq,
170 		   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
171 		   sd->processed, sd->dropped, sd->time_squeeze, 0,
172 		   0, 0, 0, 0, /* was fastroute */
173 		   0,	/* was cpu_collision */
174 		   sd->received_rps, flow_limit_count,
175 		   softnet_backlog_len(sd), (int)seq->index);
176 	return 0;
177 }
178 
179 static const struct seq_operations dev_seq_ops = {
180 	.start = dev_seq_start,
181 	.next  = dev_seq_next,
182 	.stop  = dev_seq_stop,
183 	.show  = dev_seq_show,
184 };
185 
186 static const struct seq_operations softnet_seq_ops = {
187 	.start = softnet_seq_start,
188 	.next  = softnet_seq_next,
189 	.stop  = softnet_seq_stop,
190 	.show  = softnet_seq_show,
191 };
192 
ptype_get_idx(loff_t pos)193 static void *ptype_get_idx(loff_t pos)
194 {
195 	struct packet_type *pt = NULL;
196 	loff_t i = 0;
197 	int t;
198 
199 	list_for_each_entry_rcu(pt, &ptype_all, list) {
200 		if (i == pos)
201 			return pt;
202 		++i;
203 	}
204 
205 	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
206 		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
207 			if (i == pos)
208 				return pt;
209 			++i;
210 		}
211 	}
212 	return NULL;
213 }
214 
ptype_seq_start(struct seq_file * seq,loff_t * pos)215 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
216 	__acquires(RCU)
217 {
218 	rcu_read_lock();
219 	return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
220 }
221 
ptype_seq_next(struct seq_file * seq,void * v,loff_t * pos)222 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
223 {
224 	struct packet_type *pt;
225 	struct list_head *nxt;
226 	int hash;
227 
228 	++*pos;
229 	if (v == SEQ_START_TOKEN)
230 		return ptype_get_idx(0);
231 
232 	pt = v;
233 	nxt = pt->list.next;
234 	if (pt->type == htons(ETH_P_ALL)) {
235 		if (nxt != &ptype_all)
236 			goto found;
237 		hash = 0;
238 		nxt = ptype_base[0].next;
239 	} else
240 		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
241 
242 	while (nxt == &ptype_base[hash]) {
243 		if (++hash >= PTYPE_HASH_SIZE)
244 			return NULL;
245 		nxt = ptype_base[hash].next;
246 	}
247 found:
248 	return list_entry(nxt, struct packet_type, list);
249 }
250 
ptype_seq_stop(struct seq_file * seq,void * v)251 static void ptype_seq_stop(struct seq_file *seq, void *v)
252 	__releases(RCU)
253 {
254 	rcu_read_unlock();
255 }
256 
ptype_seq_show(struct seq_file * seq,void * v)257 static int ptype_seq_show(struct seq_file *seq, void *v)
258 {
259 	struct packet_type *pt = v;
260 
261 	if (v == SEQ_START_TOKEN)
262 		seq_puts(seq, "Type Device      Function\n");
263 	else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
264 		if (pt->type == htons(ETH_P_ALL))
265 			seq_puts(seq, "ALL ");
266 		else
267 			seq_printf(seq, "%04x", ntohs(pt->type));
268 
269 		seq_printf(seq, " %-8s %ps\n",
270 			   pt->dev ? pt->dev->name : "", pt->func);
271 	}
272 
273 	return 0;
274 }
275 
276 static const struct seq_operations ptype_seq_ops = {
277 	.start = ptype_seq_start,
278 	.next  = ptype_seq_next,
279 	.stop  = ptype_seq_stop,
280 	.show  = ptype_seq_show,
281 };
282 
dev_proc_net_init(struct net * net)283 static int __net_init dev_proc_net_init(struct net *net)
284 {
285 	int rc = -ENOMEM;
286 
287 	if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
288 			sizeof(struct seq_net_private)))
289 		goto out;
290 	if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
291 			 &softnet_seq_ops))
292 		goto out_dev;
293 	if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
294 			sizeof(struct seq_net_private)))
295 		goto out_softnet;
296 
297 	if (wext_proc_init(net))
298 		goto out_ptype;
299 	rc = 0;
300 out:
301 	return rc;
302 out_ptype:
303 	remove_proc_entry("ptype", net->proc_net);
304 out_softnet:
305 	remove_proc_entry("softnet_stat", net->proc_net);
306 out_dev:
307 	remove_proc_entry("dev", net->proc_net);
308 	goto out;
309 }
310 
dev_proc_net_exit(struct net * net)311 static void __net_exit dev_proc_net_exit(struct net *net)
312 {
313 	wext_proc_exit(net);
314 
315 	remove_proc_entry("ptype", net->proc_net);
316 	remove_proc_entry("softnet_stat", net->proc_net);
317 	remove_proc_entry("dev", net->proc_net);
318 }
319 
320 static struct pernet_operations __net_initdata dev_proc_ops = {
321 	.init = dev_proc_net_init,
322 	.exit = dev_proc_net_exit,
323 };
324 
dev_mc_seq_show(struct seq_file * seq,void * v)325 static int dev_mc_seq_show(struct seq_file *seq, void *v)
326 {
327 	struct netdev_hw_addr *ha;
328 	struct net_device *dev = v;
329 
330 	if (v == SEQ_START_TOKEN)
331 		return 0;
332 
333 	netif_addr_lock_bh(dev);
334 	netdev_for_each_mc_addr(ha, dev) {
335 		seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
336 			   dev->ifindex, dev->name,
337 			   ha->refcount, ha->global_use,
338 			   (int)dev->addr_len, ha->addr);
339 	}
340 	netif_addr_unlock_bh(dev);
341 	return 0;
342 }
343 
344 static const struct seq_operations dev_mc_seq_ops = {
345 	.start = dev_seq_start,
346 	.next  = dev_seq_next,
347 	.stop  = dev_seq_stop,
348 	.show  = dev_mc_seq_show,
349 };
350 
dev_mc_net_init(struct net * net)351 static int __net_init dev_mc_net_init(struct net *net)
352 {
353 	if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
354 			sizeof(struct seq_net_private)))
355 		return -ENOMEM;
356 	return 0;
357 }
358 
dev_mc_net_exit(struct net * net)359 static void __net_exit dev_mc_net_exit(struct net *net)
360 {
361 	remove_proc_entry("dev_mcast", net->proc_net);
362 }
363 
364 static struct pernet_operations __net_initdata dev_mc_net_ops = {
365 	.init = dev_mc_net_init,
366 	.exit = dev_mc_net_exit,
367 };
368 
dev_proc_init(void)369 int __init dev_proc_init(void)
370 {
371 	int ret = register_pernet_subsys(&dev_proc_ops);
372 	if (!ret)
373 		return register_pernet_subsys(&dev_mc_net_ops);
374 	return ret;
375 }
376