1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * SVC Greybus driver.
4   *
5   * Copyright 2015 Google Inc.
6   * Copyright 2015 Linaro Ltd.
7   */
8  
9  #include <linux/debugfs.h>
10  #include <linux/workqueue.h>
11  #include <linux/greybus.h>
12  
13  #define SVC_INTF_EJECT_TIMEOUT		9000
14  #define SVC_INTF_ACTIVATE_TIMEOUT	6000
15  #define SVC_INTF_RESUME_TIMEOUT		3000
16  
17  struct gb_svc_deferred_request {
18  	struct work_struct work;
19  	struct gb_operation *operation;
20  };
21  
22  static int gb_svc_queue_deferred_request(struct gb_operation *operation);
23  
endo_id_show(struct device * dev,struct device_attribute * attr,char * buf)24  static ssize_t endo_id_show(struct device *dev,
25  			    struct device_attribute *attr, char *buf)
26  {
27  	struct gb_svc *svc = to_gb_svc(dev);
28  
29  	return sprintf(buf, "0x%04x\n", svc->endo_id);
30  }
31  static DEVICE_ATTR_RO(endo_id);
32  
ap_intf_id_show(struct device * dev,struct device_attribute * attr,char * buf)33  static ssize_t ap_intf_id_show(struct device *dev,
34  			       struct device_attribute *attr, char *buf)
35  {
36  	struct gb_svc *svc = to_gb_svc(dev);
37  
38  	return sprintf(buf, "%u\n", svc->ap_intf_id);
39  }
40  static DEVICE_ATTR_RO(ap_intf_id);
41  
42  // FIXME
43  // This is a hack, we need to do this "right" and clean the interface up
44  // properly, not just forcibly yank the thing out of the system and hope for the
45  // best.  But for now, people want their modules to come out without having to
46  // throw the thing to the ground or get out a screwdriver.
intf_eject_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)47  static ssize_t intf_eject_store(struct device *dev,
48  				struct device_attribute *attr, const char *buf,
49  				size_t len)
50  {
51  	struct gb_svc *svc = to_gb_svc(dev);
52  	unsigned short intf_id;
53  	int ret;
54  
55  	ret = kstrtou16(buf, 10, &intf_id);
56  	if (ret < 0)
57  		return ret;
58  
59  	dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
60  
61  	ret = gb_svc_intf_eject(svc, intf_id);
62  	if (ret < 0)
63  		return ret;
64  
65  	return len;
66  }
67  static DEVICE_ATTR_WO(intf_eject);
68  
watchdog_show(struct device * dev,struct device_attribute * attr,char * buf)69  static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
70  			     char *buf)
71  {
72  	struct gb_svc *svc = to_gb_svc(dev);
73  
74  	return sprintf(buf, "%s\n",
75  		       gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
76  }
77  
watchdog_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)78  static ssize_t watchdog_store(struct device *dev,
79  			      struct device_attribute *attr, const char *buf,
80  			      size_t len)
81  {
82  	struct gb_svc *svc = to_gb_svc(dev);
83  	int retval;
84  	bool user_request;
85  
86  	retval = strtobool(buf, &user_request);
87  	if (retval)
88  		return retval;
89  
90  	if (user_request)
91  		retval = gb_svc_watchdog_enable(svc);
92  	else
93  		retval = gb_svc_watchdog_disable(svc);
94  	if (retval)
95  		return retval;
96  	return len;
97  }
98  static DEVICE_ATTR_RW(watchdog);
99  
watchdog_action_show(struct device * dev,struct device_attribute * attr,char * buf)100  static ssize_t watchdog_action_show(struct device *dev,
101  				    struct device_attribute *attr, char *buf)
102  {
103  	struct gb_svc *svc = to_gb_svc(dev);
104  
105  	if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
106  		return sprintf(buf, "panic\n");
107  	else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
108  		return sprintf(buf, "reset\n");
109  
110  	return -EINVAL;
111  }
112  
watchdog_action_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)113  static ssize_t watchdog_action_store(struct device *dev,
114  				     struct device_attribute *attr,
115  				     const char *buf, size_t len)
116  {
117  	struct gb_svc *svc = to_gb_svc(dev);
118  
119  	if (sysfs_streq(buf, "panic"))
120  		svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
121  	else if (sysfs_streq(buf, "reset"))
122  		svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
123  	else
124  		return -EINVAL;
125  
126  	return len;
127  }
128  static DEVICE_ATTR_RW(watchdog_action);
129  
gb_svc_pwrmon_rail_count_get(struct gb_svc * svc,u8 * value)130  static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
131  {
132  	struct gb_svc_pwrmon_rail_count_get_response response;
133  	int ret;
134  
135  	ret = gb_operation_sync(svc->connection,
136  				GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
137  				&response, sizeof(response));
138  	if (ret) {
139  		dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
140  		return ret;
141  	}
142  
143  	*value = response.rail_count;
144  
145  	return 0;
146  }
147  
gb_svc_pwrmon_rail_names_get(struct gb_svc * svc,struct gb_svc_pwrmon_rail_names_get_response * response,size_t bufsize)148  static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
149  		struct gb_svc_pwrmon_rail_names_get_response *response,
150  		size_t bufsize)
151  {
152  	int ret;
153  
154  	ret = gb_operation_sync(svc->connection,
155  				GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
156  				response, bufsize);
157  	if (ret) {
158  		dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
159  		return ret;
160  	}
161  
162  	if (response->status != GB_SVC_OP_SUCCESS) {
163  		dev_err(&svc->dev,
164  			"SVC error while getting rail names: %u\n",
165  			response->status);
166  		return -EREMOTEIO;
167  	}
168  
169  	return 0;
170  }
171  
gb_svc_pwrmon_sample_get(struct gb_svc * svc,u8 rail_id,u8 measurement_type,u32 * value)172  static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
173  				    u8 measurement_type, u32 *value)
174  {
175  	struct gb_svc_pwrmon_sample_get_request request;
176  	struct gb_svc_pwrmon_sample_get_response response;
177  	int ret;
178  
179  	request.rail_id = rail_id;
180  	request.measurement_type = measurement_type;
181  
182  	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
183  				&request, sizeof(request),
184  				&response, sizeof(response));
185  	if (ret) {
186  		dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
187  		return ret;
188  	}
189  
190  	if (response.result) {
191  		dev_err(&svc->dev,
192  			"UniPro error while getting rail power sample (%d %d): %d\n",
193  			rail_id, measurement_type, response.result);
194  		switch (response.result) {
195  		case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
196  			return -EINVAL;
197  		case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
198  			return -ENOMSG;
199  		default:
200  			return -EREMOTEIO;
201  		}
202  	}
203  
204  	*value = le32_to_cpu(response.measurement);
205  
206  	return 0;
207  }
208  
gb_svc_pwrmon_intf_sample_get(struct gb_svc * svc,u8 intf_id,u8 measurement_type,u32 * value)209  int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
210  				  u8 measurement_type, u32 *value)
211  {
212  	struct gb_svc_pwrmon_intf_sample_get_request request;
213  	struct gb_svc_pwrmon_intf_sample_get_response response;
214  	int ret;
215  
216  	request.intf_id = intf_id;
217  	request.measurement_type = measurement_type;
218  
219  	ret = gb_operation_sync(svc->connection,
220  				GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
221  				&request, sizeof(request),
222  				&response, sizeof(response));
223  	if (ret) {
224  		dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
225  		return ret;
226  	}
227  
228  	if (response.result) {
229  		dev_err(&svc->dev,
230  			"UniPro error while getting intf power sample (%d %d): %d\n",
231  			intf_id, measurement_type, response.result);
232  		switch (response.result) {
233  		case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
234  			return -EINVAL;
235  		case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
236  			return -ENOMSG;
237  		default:
238  			return -EREMOTEIO;
239  		}
240  	}
241  
242  	*value = le32_to_cpu(response.measurement);
243  
244  	return 0;
245  }
246  
247  static struct attribute *svc_attrs[] = {
248  	&dev_attr_endo_id.attr,
249  	&dev_attr_ap_intf_id.attr,
250  	&dev_attr_intf_eject.attr,
251  	&dev_attr_watchdog.attr,
252  	&dev_attr_watchdog_action.attr,
253  	NULL,
254  };
255  ATTRIBUTE_GROUPS(svc);
256  
gb_svc_intf_device_id(struct gb_svc * svc,u8 intf_id,u8 device_id)257  int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
258  {
259  	struct gb_svc_intf_device_id_request request;
260  
261  	request.intf_id = intf_id;
262  	request.device_id = device_id;
263  
264  	return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
265  				 &request, sizeof(request), NULL, 0);
266  }
267  
gb_svc_intf_eject(struct gb_svc * svc,u8 intf_id)268  int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
269  {
270  	struct gb_svc_intf_eject_request request;
271  	int ret;
272  
273  	request.intf_id = intf_id;
274  
275  	/*
276  	 * The pulse width for module release in svc is long so we need to
277  	 * increase the timeout so the operation will not return to soon.
278  	 */
279  	ret = gb_operation_sync_timeout(svc->connection,
280  					GB_SVC_TYPE_INTF_EJECT, &request,
281  					sizeof(request), NULL, 0,
282  					SVC_INTF_EJECT_TIMEOUT);
283  	if (ret) {
284  		dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
285  		return ret;
286  	}
287  
288  	return 0;
289  }
290  
gb_svc_intf_vsys_set(struct gb_svc * svc,u8 intf_id,bool enable)291  int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
292  {
293  	struct gb_svc_intf_vsys_request request;
294  	struct gb_svc_intf_vsys_response response;
295  	int type, ret;
296  
297  	request.intf_id = intf_id;
298  
299  	if (enable)
300  		type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
301  	else
302  		type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
303  
304  	ret = gb_operation_sync(svc->connection, type,
305  				&request, sizeof(request),
306  				&response, sizeof(response));
307  	if (ret < 0)
308  		return ret;
309  	if (response.result_code != GB_SVC_INTF_VSYS_OK)
310  		return -EREMOTEIO;
311  	return 0;
312  }
313  
gb_svc_intf_refclk_set(struct gb_svc * svc,u8 intf_id,bool enable)314  int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
315  {
316  	struct gb_svc_intf_refclk_request request;
317  	struct gb_svc_intf_refclk_response response;
318  	int type, ret;
319  
320  	request.intf_id = intf_id;
321  
322  	if (enable)
323  		type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
324  	else
325  		type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
326  
327  	ret = gb_operation_sync(svc->connection, type,
328  				&request, sizeof(request),
329  				&response, sizeof(response));
330  	if (ret < 0)
331  		return ret;
332  	if (response.result_code != GB_SVC_INTF_REFCLK_OK)
333  		return -EREMOTEIO;
334  	return 0;
335  }
336  
gb_svc_intf_unipro_set(struct gb_svc * svc,u8 intf_id,bool enable)337  int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
338  {
339  	struct gb_svc_intf_unipro_request request;
340  	struct gb_svc_intf_unipro_response response;
341  	int type, ret;
342  
343  	request.intf_id = intf_id;
344  
345  	if (enable)
346  		type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
347  	else
348  		type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
349  
350  	ret = gb_operation_sync(svc->connection, type,
351  				&request, sizeof(request),
352  				&response, sizeof(response));
353  	if (ret < 0)
354  		return ret;
355  	if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
356  		return -EREMOTEIO;
357  	return 0;
358  }
359  
gb_svc_intf_activate(struct gb_svc * svc,u8 intf_id,u8 * intf_type)360  int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
361  {
362  	struct gb_svc_intf_activate_request request;
363  	struct gb_svc_intf_activate_response response;
364  	int ret;
365  
366  	request.intf_id = intf_id;
367  
368  	ret = gb_operation_sync_timeout(svc->connection,
369  					GB_SVC_TYPE_INTF_ACTIVATE,
370  					&request, sizeof(request),
371  					&response, sizeof(response),
372  					SVC_INTF_ACTIVATE_TIMEOUT);
373  	if (ret < 0)
374  		return ret;
375  	if (response.status != GB_SVC_OP_SUCCESS) {
376  		dev_err(&svc->dev, "failed to activate interface %u: %u\n",
377  			intf_id, response.status);
378  		return -EREMOTEIO;
379  	}
380  
381  	*intf_type = response.intf_type;
382  
383  	return 0;
384  }
385  
gb_svc_intf_resume(struct gb_svc * svc,u8 intf_id)386  int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
387  {
388  	struct gb_svc_intf_resume_request request;
389  	struct gb_svc_intf_resume_response response;
390  	int ret;
391  
392  	request.intf_id = intf_id;
393  
394  	ret = gb_operation_sync_timeout(svc->connection,
395  					GB_SVC_TYPE_INTF_RESUME,
396  					&request, sizeof(request),
397  					&response, sizeof(response),
398  					SVC_INTF_RESUME_TIMEOUT);
399  	if (ret < 0) {
400  		dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
401  			intf_id, ret);
402  		return ret;
403  	}
404  
405  	if (response.status != GB_SVC_OP_SUCCESS) {
406  		dev_err(&svc->dev, "failed to resume interface %u: %u\n",
407  			intf_id, response.status);
408  		return -EREMOTEIO;
409  	}
410  
411  	return 0;
412  }
413  
gb_svc_dme_peer_get(struct gb_svc * svc,u8 intf_id,u16 attr,u16 selector,u32 * value)414  int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
415  			u32 *value)
416  {
417  	struct gb_svc_dme_peer_get_request request;
418  	struct gb_svc_dme_peer_get_response response;
419  	u16 result;
420  	int ret;
421  
422  	request.intf_id = intf_id;
423  	request.attr = cpu_to_le16(attr);
424  	request.selector = cpu_to_le16(selector);
425  
426  	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
427  				&request, sizeof(request),
428  				&response, sizeof(response));
429  	if (ret) {
430  		dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
431  			intf_id, attr, selector, ret);
432  		return ret;
433  	}
434  
435  	result = le16_to_cpu(response.result_code);
436  	if (result) {
437  		dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
438  			intf_id, attr, selector, result);
439  		return -EREMOTEIO;
440  	}
441  
442  	if (value)
443  		*value = le32_to_cpu(response.attr_value);
444  
445  	return 0;
446  }
447  
gb_svc_dme_peer_set(struct gb_svc * svc,u8 intf_id,u16 attr,u16 selector,u32 value)448  int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
449  			u32 value)
450  {
451  	struct gb_svc_dme_peer_set_request request;
452  	struct gb_svc_dme_peer_set_response response;
453  	u16 result;
454  	int ret;
455  
456  	request.intf_id = intf_id;
457  	request.attr = cpu_to_le16(attr);
458  	request.selector = cpu_to_le16(selector);
459  	request.value = cpu_to_le32(value);
460  
461  	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
462  				&request, sizeof(request),
463  				&response, sizeof(response));
464  	if (ret) {
465  		dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
466  			intf_id, attr, selector, value, ret);
467  		return ret;
468  	}
469  
470  	result = le16_to_cpu(response.result_code);
471  	if (result) {
472  		dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
473  			intf_id, attr, selector, value, result);
474  		return -EREMOTEIO;
475  	}
476  
477  	return 0;
478  }
479  
gb_svc_connection_create(struct gb_svc * svc,u8 intf1_id,u16 cport1_id,u8 intf2_id,u16 cport2_id,u8 cport_flags)480  int gb_svc_connection_create(struct gb_svc *svc,
481  			     u8 intf1_id, u16 cport1_id,
482  			     u8 intf2_id, u16 cport2_id,
483  			     u8 cport_flags)
484  {
485  	struct gb_svc_conn_create_request request;
486  
487  	request.intf1_id = intf1_id;
488  	request.cport1_id = cpu_to_le16(cport1_id);
489  	request.intf2_id = intf2_id;
490  	request.cport2_id = cpu_to_le16(cport2_id);
491  	request.tc = 0;		/* TC0 */
492  	request.flags = cport_flags;
493  
494  	return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
495  				 &request, sizeof(request), NULL, 0);
496  }
497  
gb_svc_connection_destroy(struct gb_svc * svc,u8 intf1_id,u16 cport1_id,u8 intf2_id,u16 cport2_id)498  void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
499  			       u8 intf2_id, u16 cport2_id)
500  {
501  	struct gb_svc_conn_destroy_request request;
502  	struct gb_connection *connection = svc->connection;
503  	int ret;
504  
505  	request.intf1_id = intf1_id;
506  	request.cport1_id = cpu_to_le16(cport1_id);
507  	request.intf2_id = intf2_id;
508  	request.cport2_id = cpu_to_le16(cport2_id);
509  
510  	ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
511  				&request, sizeof(request), NULL, 0);
512  	if (ret) {
513  		dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
514  			intf1_id, cport1_id, intf2_id, cport2_id, ret);
515  	}
516  }
517  
518  /* Creates bi-directional routes between the devices */
gb_svc_route_create(struct gb_svc * svc,u8 intf1_id,u8 dev1_id,u8 intf2_id,u8 dev2_id)519  int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
520  			u8 intf2_id, u8 dev2_id)
521  {
522  	struct gb_svc_route_create_request request;
523  
524  	request.intf1_id = intf1_id;
525  	request.dev1_id = dev1_id;
526  	request.intf2_id = intf2_id;
527  	request.dev2_id = dev2_id;
528  
529  	return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
530  				 &request, sizeof(request), NULL, 0);
531  }
532  
533  /* Destroys bi-directional routes between the devices */
gb_svc_route_destroy(struct gb_svc * svc,u8 intf1_id,u8 intf2_id)534  void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
535  {
536  	struct gb_svc_route_destroy_request request;
537  	int ret;
538  
539  	request.intf1_id = intf1_id;
540  	request.intf2_id = intf2_id;
541  
542  	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
543  				&request, sizeof(request), NULL, 0);
544  	if (ret) {
545  		dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
546  			intf1_id, intf2_id, ret);
547  	}
548  }
549  
gb_svc_intf_set_power_mode(struct gb_svc * svc,u8 intf_id,u8 hs_series,u8 tx_mode,u8 tx_gear,u8 tx_nlanes,u8 tx_amplitude,u8 tx_hs_equalizer,u8 rx_mode,u8 rx_gear,u8 rx_nlanes,u8 flags,u32 quirks,struct gb_svc_l2_timer_cfg * local,struct gb_svc_l2_timer_cfg * remote)550  int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
551  			       u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
552  			       u8 tx_amplitude, u8 tx_hs_equalizer,
553  			       u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
554  			       u8 flags, u32 quirks,
555  			       struct gb_svc_l2_timer_cfg *local,
556  			       struct gb_svc_l2_timer_cfg *remote)
557  {
558  	struct gb_svc_intf_set_pwrm_request request;
559  	struct gb_svc_intf_set_pwrm_response response;
560  	int ret;
561  	u16 result_code;
562  
563  	memset(&request, 0, sizeof(request));
564  
565  	request.intf_id = intf_id;
566  	request.hs_series = hs_series;
567  	request.tx_mode = tx_mode;
568  	request.tx_gear = tx_gear;
569  	request.tx_nlanes = tx_nlanes;
570  	request.tx_amplitude = tx_amplitude;
571  	request.tx_hs_equalizer = tx_hs_equalizer;
572  	request.rx_mode = rx_mode;
573  	request.rx_gear = rx_gear;
574  	request.rx_nlanes = rx_nlanes;
575  	request.flags = flags;
576  	request.quirks = cpu_to_le32(quirks);
577  	if (local)
578  		request.local_l2timerdata = *local;
579  	if (remote)
580  		request.remote_l2timerdata = *remote;
581  
582  	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
583  				&request, sizeof(request),
584  				&response, sizeof(response));
585  	if (ret < 0)
586  		return ret;
587  
588  	result_code = response.result_code;
589  	if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
590  		dev_err(&svc->dev, "set power mode = %d\n", result_code);
591  		return -EIO;
592  	}
593  
594  	return 0;
595  }
596  EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
597  
gb_svc_intf_set_power_mode_hibernate(struct gb_svc * svc,u8 intf_id)598  int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
599  {
600  	struct gb_svc_intf_set_pwrm_request request;
601  	struct gb_svc_intf_set_pwrm_response response;
602  	int ret;
603  	u16 result_code;
604  
605  	memset(&request, 0, sizeof(request));
606  
607  	request.intf_id = intf_id;
608  	request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
609  	request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
610  	request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
611  
612  	ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
613  				&request, sizeof(request),
614  				&response, sizeof(response));
615  	if (ret < 0) {
616  		dev_err(&svc->dev,
617  			"failed to send set power mode operation to interface %u: %d\n",
618  			intf_id, ret);
619  		return ret;
620  	}
621  
622  	result_code = response.result_code;
623  	if (result_code != GB_SVC_SETPWRM_PWR_OK) {
624  		dev_err(&svc->dev,
625  			"failed to hibernate the link for interface %u: %u\n",
626  			intf_id, result_code);
627  		return -EIO;
628  	}
629  
630  	return 0;
631  }
632  
gb_svc_ping(struct gb_svc * svc)633  int gb_svc_ping(struct gb_svc *svc)
634  {
635  	return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
636  					 NULL, 0, NULL, 0,
637  					 GB_OPERATION_TIMEOUT_DEFAULT * 2);
638  }
639  
gb_svc_version_request(struct gb_operation * op)640  static int gb_svc_version_request(struct gb_operation *op)
641  {
642  	struct gb_connection *connection = op->connection;
643  	struct gb_svc *svc = gb_connection_get_data(connection);
644  	struct gb_svc_version_request *request;
645  	struct gb_svc_version_response *response;
646  
647  	if (op->request->payload_size < sizeof(*request)) {
648  		dev_err(&svc->dev, "short version request (%zu < %zu)\n",
649  			op->request->payload_size,
650  			sizeof(*request));
651  		return -EINVAL;
652  	}
653  
654  	request = op->request->payload;
655  
656  	if (request->major > GB_SVC_VERSION_MAJOR) {
657  		dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
658  			 request->major, GB_SVC_VERSION_MAJOR);
659  		return -ENOTSUPP;
660  	}
661  
662  	svc->protocol_major = request->major;
663  	svc->protocol_minor = request->minor;
664  
665  	if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
666  		return -ENOMEM;
667  
668  	response = op->response->payload;
669  	response->major = svc->protocol_major;
670  	response->minor = svc->protocol_minor;
671  
672  	return 0;
673  }
674  
pwr_debugfs_voltage_read(struct file * file,char __user * buf,size_t len,loff_t * offset)675  static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
676  					size_t len, loff_t *offset)
677  {
678  	struct svc_debugfs_pwrmon_rail *pwrmon_rails =
679  		file_inode(file)->i_private;
680  	struct gb_svc *svc = pwrmon_rails->svc;
681  	int ret, desc;
682  	u32 value;
683  	char buff[16];
684  
685  	ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
686  				       GB_SVC_PWRMON_TYPE_VOL, &value);
687  	if (ret) {
688  		dev_err(&svc->dev,
689  			"failed to get voltage sample %u: %d\n",
690  			pwrmon_rails->id, ret);
691  		return ret;
692  	}
693  
694  	desc = scnprintf(buff, sizeof(buff), "%u\n", value);
695  
696  	return simple_read_from_buffer(buf, len, offset, buff, desc);
697  }
698  
pwr_debugfs_current_read(struct file * file,char __user * buf,size_t len,loff_t * offset)699  static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
700  					size_t len, loff_t *offset)
701  {
702  	struct svc_debugfs_pwrmon_rail *pwrmon_rails =
703  		file_inode(file)->i_private;
704  	struct gb_svc *svc = pwrmon_rails->svc;
705  	int ret, desc;
706  	u32 value;
707  	char buff[16];
708  
709  	ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
710  				       GB_SVC_PWRMON_TYPE_CURR, &value);
711  	if (ret) {
712  		dev_err(&svc->dev,
713  			"failed to get current sample %u: %d\n",
714  			pwrmon_rails->id, ret);
715  		return ret;
716  	}
717  
718  	desc = scnprintf(buff, sizeof(buff), "%u\n", value);
719  
720  	return simple_read_from_buffer(buf, len, offset, buff, desc);
721  }
722  
pwr_debugfs_power_read(struct file * file,char __user * buf,size_t len,loff_t * offset)723  static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
724  				      size_t len, loff_t *offset)
725  {
726  	struct svc_debugfs_pwrmon_rail *pwrmon_rails =
727  		file_inode(file)->i_private;
728  	struct gb_svc *svc = pwrmon_rails->svc;
729  	int ret, desc;
730  	u32 value;
731  	char buff[16];
732  
733  	ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
734  				       GB_SVC_PWRMON_TYPE_PWR, &value);
735  	if (ret) {
736  		dev_err(&svc->dev, "failed to get power sample %u: %d\n",
737  			pwrmon_rails->id, ret);
738  		return ret;
739  	}
740  
741  	desc = scnprintf(buff, sizeof(buff), "%u\n", value);
742  
743  	return simple_read_from_buffer(buf, len, offset, buff, desc);
744  }
745  
746  static const struct file_operations pwrmon_debugfs_voltage_fops = {
747  	.read		= pwr_debugfs_voltage_read,
748  };
749  
750  static const struct file_operations pwrmon_debugfs_current_fops = {
751  	.read		= pwr_debugfs_current_read,
752  };
753  
754  static const struct file_operations pwrmon_debugfs_power_fops = {
755  	.read		= pwr_debugfs_power_read,
756  };
757  
gb_svc_pwrmon_debugfs_init(struct gb_svc * svc)758  static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
759  {
760  	int i;
761  	size_t bufsize;
762  	struct dentry *dent;
763  	struct gb_svc_pwrmon_rail_names_get_response *rail_names;
764  	u8 rail_count;
765  
766  	dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
767  	if (IS_ERR_OR_NULL(dent))
768  		return;
769  
770  	if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
771  		goto err_pwrmon_debugfs;
772  
773  	if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
774  		goto err_pwrmon_debugfs;
775  
776  	bufsize = sizeof(*rail_names) +
777  		GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
778  
779  	rail_names = kzalloc(bufsize, GFP_KERNEL);
780  	if (!rail_names)
781  		goto err_pwrmon_debugfs;
782  
783  	svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
784  				    GFP_KERNEL);
785  	if (!svc->pwrmon_rails)
786  		goto err_pwrmon_debugfs_free;
787  
788  	if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
789  		goto err_pwrmon_debugfs_free;
790  
791  	for (i = 0; i < rail_count; i++) {
792  		struct dentry *dir;
793  		struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
794  		char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
795  
796  		snprintf(fname, sizeof(fname), "%s",
797  			 (char *)&rail_names->name[i]);
798  
799  		rail->id = i;
800  		rail->svc = svc;
801  
802  		dir = debugfs_create_dir(fname, dent);
803  		debugfs_create_file("voltage_now", 0444, dir, rail,
804  				    &pwrmon_debugfs_voltage_fops);
805  		debugfs_create_file("current_now", 0444, dir, rail,
806  				    &pwrmon_debugfs_current_fops);
807  		debugfs_create_file("power_now", 0444, dir, rail,
808  				    &pwrmon_debugfs_power_fops);
809  	}
810  
811  	kfree(rail_names);
812  	return;
813  
814  err_pwrmon_debugfs_free:
815  	kfree(rail_names);
816  	kfree(svc->pwrmon_rails);
817  	svc->pwrmon_rails = NULL;
818  
819  err_pwrmon_debugfs:
820  	debugfs_remove(dent);
821  }
822  
gb_svc_debugfs_init(struct gb_svc * svc)823  static void gb_svc_debugfs_init(struct gb_svc *svc)
824  {
825  	svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
826  						 gb_debugfs_get());
827  	gb_svc_pwrmon_debugfs_init(svc);
828  }
829  
gb_svc_debugfs_exit(struct gb_svc * svc)830  static void gb_svc_debugfs_exit(struct gb_svc *svc)
831  {
832  	debugfs_remove_recursive(svc->debugfs_dentry);
833  	kfree(svc->pwrmon_rails);
834  	svc->pwrmon_rails = NULL;
835  }
836  
gb_svc_hello(struct gb_operation * op)837  static int gb_svc_hello(struct gb_operation *op)
838  {
839  	struct gb_connection *connection = op->connection;
840  	struct gb_svc *svc = gb_connection_get_data(connection);
841  	struct gb_svc_hello_request *hello_request;
842  	int ret;
843  
844  	if (op->request->payload_size < sizeof(*hello_request)) {
845  		dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
846  			 op->request->payload_size,
847  			 sizeof(*hello_request));
848  		return -EINVAL;
849  	}
850  
851  	hello_request = op->request->payload;
852  	svc->endo_id = le16_to_cpu(hello_request->endo_id);
853  	svc->ap_intf_id = hello_request->interface_id;
854  
855  	ret = device_add(&svc->dev);
856  	if (ret) {
857  		dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
858  		return ret;
859  	}
860  
861  	ret = gb_svc_watchdog_create(svc);
862  	if (ret) {
863  		dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
864  		goto err_unregister_device;
865  	}
866  
867  	gb_svc_debugfs_init(svc);
868  
869  	return gb_svc_queue_deferred_request(op);
870  
871  err_unregister_device:
872  	gb_svc_watchdog_destroy(svc);
873  	device_del(&svc->dev);
874  	return ret;
875  }
876  
gb_svc_interface_lookup(struct gb_svc * svc,u8 intf_id)877  static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
878  						    u8 intf_id)
879  {
880  	struct gb_host_device *hd = svc->hd;
881  	struct gb_module *module;
882  	size_t num_interfaces;
883  	u8 module_id;
884  
885  	list_for_each_entry(module, &hd->modules, hd_node) {
886  		module_id = module->module_id;
887  		num_interfaces = module->num_interfaces;
888  
889  		if (intf_id >= module_id &&
890  		    intf_id < module_id + num_interfaces) {
891  			return module->interfaces[intf_id - module_id];
892  		}
893  	}
894  
895  	return NULL;
896  }
897  
gb_svc_module_lookup(struct gb_svc * svc,u8 module_id)898  static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
899  {
900  	struct gb_host_device *hd = svc->hd;
901  	struct gb_module *module;
902  
903  	list_for_each_entry(module, &hd->modules, hd_node) {
904  		if (module->module_id == module_id)
905  			return module;
906  	}
907  
908  	return NULL;
909  }
910  
gb_svc_process_hello_deferred(struct gb_operation * operation)911  static void gb_svc_process_hello_deferred(struct gb_operation *operation)
912  {
913  	struct gb_connection *connection = operation->connection;
914  	struct gb_svc *svc = gb_connection_get_data(connection);
915  	int ret;
916  
917  	/*
918  	 * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
919  	 * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
920  	 * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
921  	 * module.
922  	 *
923  	 * The code should be removed once SW-2217, Heuristic for UniPro
924  	 * Power Mode Changes is resolved.
925  	 */
926  	ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
927  					 GB_SVC_UNIPRO_HS_SERIES_A,
928  					 GB_SVC_UNIPRO_SLOW_AUTO_MODE,
929  					 2, 1,
930  					 GB_SVC_SMALL_AMPLITUDE,
931  					 GB_SVC_NO_DE_EMPHASIS,
932  					 GB_SVC_UNIPRO_SLOW_AUTO_MODE,
933  					 2, 1,
934  					 0, 0,
935  					 NULL, NULL);
936  
937  	if (ret)
938  		dev_warn(&svc->dev,
939  			 "power mode change failed on AP to switch link: %d\n",
940  			 ret);
941  }
942  
gb_svc_process_module_inserted(struct gb_operation * operation)943  static void gb_svc_process_module_inserted(struct gb_operation *operation)
944  {
945  	struct gb_svc_module_inserted_request *request;
946  	struct gb_connection *connection = operation->connection;
947  	struct gb_svc *svc = gb_connection_get_data(connection);
948  	struct gb_host_device *hd = svc->hd;
949  	struct gb_module *module;
950  	size_t num_interfaces;
951  	u8 module_id;
952  	u16 flags;
953  	int ret;
954  
955  	/* The request message size has already been verified. */
956  	request = operation->request->payload;
957  	module_id = request->primary_intf_id;
958  	num_interfaces = request->intf_count;
959  	flags = le16_to_cpu(request->flags);
960  
961  	dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
962  		__func__, module_id, num_interfaces, flags);
963  
964  	if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
965  		dev_warn(&svc->dev, "no primary interface detected on module %u\n",
966  			 module_id);
967  	}
968  
969  	module = gb_svc_module_lookup(svc, module_id);
970  	if (module) {
971  		dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
972  			 module_id);
973  		return;
974  	}
975  
976  	module = gb_module_create(hd, module_id, num_interfaces);
977  	if (!module) {
978  		dev_err(&svc->dev, "failed to create module\n");
979  		return;
980  	}
981  
982  	ret = gb_module_add(module);
983  	if (ret) {
984  		gb_module_put(module);
985  		return;
986  	}
987  
988  	list_add(&module->hd_node, &hd->modules);
989  }
990  
gb_svc_process_module_removed(struct gb_operation * operation)991  static void gb_svc_process_module_removed(struct gb_operation *operation)
992  {
993  	struct gb_svc_module_removed_request *request;
994  	struct gb_connection *connection = operation->connection;
995  	struct gb_svc *svc = gb_connection_get_data(connection);
996  	struct gb_module *module;
997  	u8 module_id;
998  
999  	/* The request message size has already been verified. */
1000  	request = operation->request->payload;
1001  	module_id = request->primary_intf_id;
1002  
1003  	dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
1004  
1005  	module = gb_svc_module_lookup(svc, module_id);
1006  	if (!module) {
1007  		dev_warn(&svc->dev, "unexpected module-removed event %u\n",
1008  			 module_id);
1009  		return;
1010  	}
1011  
1012  	module->disconnected = true;
1013  
1014  	gb_module_del(module);
1015  	list_del(&module->hd_node);
1016  	gb_module_put(module);
1017  }
1018  
gb_svc_process_intf_oops(struct gb_operation * operation)1019  static void gb_svc_process_intf_oops(struct gb_operation *operation)
1020  {
1021  	struct gb_svc_intf_oops_request *request;
1022  	struct gb_connection *connection = operation->connection;
1023  	struct gb_svc *svc = gb_connection_get_data(connection);
1024  	struct gb_interface *intf;
1025  	u8 intf_id;
1026  	u8 reason;
1027  
1028  	/* The request message size has already been verified. */
1029  	request = operation->request->payload;
1030  	intf_id = request->intf_id;
1031  	reason = request->reason;
1032  
1033  	intf = gb_svc_interface_lookup(svc, intf_id);
1034  	if (!intf) {
1035  		dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
1036  			 intf_id);
1037  		return;
1038  	}
1039  
1040  	dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
1041  		 intf_id, reason);
1042  
1043  	mutex_lock(&intf->mutex);
1044  	intf->disconnected = true;
1045  	gb_interface_disable(intf);
1046  	gb_interface_deactivate(intf);
1047  	mutex_unlock(&intf->mutex);
1048  }
1049  
gb_svc_process_intf_mailbox_event(struct gb_operation * operation)1050  static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
1051  {
1052  	struct gb_svc_intf_mailbox_event_request *request;
1053  	struct gb_connection *connection = operation->connection;
1054  	struct gb_svc *svc = gb_connection_get_data(connection);
1055  	struct gb_interface *intf;
1056  	u8 intf_id;
1057  	u16 result_code;
1058  	u32 mailbox;
1059  
1060  	/* The request message size has already been verified. */
1061  	request = operation->request->payload;
1062  	intf_id = request->intf_id;
1063  	result_code = le16_to_cpu(request->result_code);
1064  	mailbox = le32_to_cpu(request->mailbox);
1065  
1066  	dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
1067  		__func__, intf_id, result_code, mailbox);
1068  
1069  	intf = gb_svc_interface_lookup(svc, intf_id);
1070  	if (!intf) {
1071  		dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
1072  		return;
1073  	}
1074  
1075  	gb_interface_mailbox_event(intf, result_code, mailbox);
1076  }
1077  
gb_svc_process_deferred_request(struct work_struct * work)1078  static void gb_svc_process_deferred_request(struct work_struct *work)
1079  {
1080  	struct gb_svc_deferred_request *dr;
1081  	struct gb_operation *operation;
1082  	struct gb_svc *svc;
1083  	u8 type;
1084  
1085  	dr = container_of(work, struct gb_svc_deferred_request, work);
1086  	operation = dr->operation;
1087  	svc = gb_connection_get_data(operation->connection);
1088  	type = operation->request->header->type;
1089  
1090  	switch (type) {
1091  	case GB_SVC_TYPE_SVC_HELLO:
1092  		gb_svc_process_hello_deferred(operation);
1093  		break;
1094  	case GB_SVC_TYPE_MODULE_INSERTED:
1095  		gb_svc_process_module_inserted(operation);
1096  		break;
1097  	case GB_SVC_TYPE_MODULE_REMOVED:
1098  		gb_svc_process_module_removed(operation);
1099  		break;
1100  	case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1101  		gb_svc_process_intf_mailbox_event(operation);
1102  		break;
1103  	case GB_SVC_TYPE_INTF_OOPS:
1104  		gb_svc_process_intf_oops(operation);
1105  		break;
1106  	default:
1107  		dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
1108  	}
1109  
1110  	gb_operation_put(operation);
1111  	kfree(dr);
1112  }
1113  
gb_svc_queue_deferred_request(struct gb_operation * operation)1114  static int gb_svc_queue_deferred_request(struct gb_operation *operation)
1115  {
1116  	struct gb_svc *svc = gb_connection_get_data(operation->connection);
1117  	struct gb_svc_deferred_request *dr;
1118  
1119  	dr = kmalloc(sizeof(*dr), GFP_KERNEL);
1120  	if (!dr)
1121  		return -ENOMEM;
1122  
1123  	gb_operation_get(operation);
1124  
1125  	dr->operation = operation;
1126  	INIT_WORK(&dr->work, gb_svc_process_deferred_request);
1127  
1128  	queue_work(svc->wq, &dr->work);
1129  
1130  	return 0;
1131  }
1132  
gb_svc_intf_reset_recv(struct gb_operation * op)1133  static int gb_svc_intf_reset_recv(struct gb_operation *op)
1134  {
1135  	struct gb_svc *svc = gb_connection_get_data(op->connection);
1136  	struct gb_message *request = op->request;
1137  	struct gb_svc_intf_reset_request *reset;
1138  
1139  	if (request->payload_size < sizeof(*reset)) {
1140  		dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
1141  			 request->payload_size, sizeof(*reset));
1142  		return -EINVAL;
1143  	}
1144  	reset = request->payload;
1145  
1146  	/* FIXME Reset the interface here */
1147  
1148  	return 0;
1149  }
1150  
gb_svc_module_inserted_recv(struct gb_operation * op)1151  static int gb_svc_module_inserted_recv(struct gb_operation *op)
1152  {
1153  	struct gb_svc *svc = gb_connection_get_data(op->connection);
1154  	struct gb_svc_module_inserted_request *request;
1155  
1156  	if (op->request->payload_size < sizeof(*request)) {
1157  		dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
1158  			 op->request->payload_size, sizeof(*request));
1159  		return -EINVAL;
1160  	}
1161  
1162  	request = op->request->payload;
1163  
1164  	dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1165  		request->primary_intf_id);
1166  
1167  	return gb_svc_queue_deferred_request(op);
1168  }
1169  
gb_svc_module_removed_recv(struct gb_operation * op)1170  static int gb_svc_module_removed_recv(struct gb_operation *op)
1171  {
1172  	struct gb_svc *svc = gb_connection_get_data(op->connection);
1173  	struct gb_svc_module_removed_request *request;
1174  
1175  	if (op->request->payload_size < sizeof(*request)) {
1176  		dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
1177  			 op->request->payload_size, sizeof(*request));
1178  		return -EINVAL;
1179  	}
1180  
1181  	request = op->request->payload;
1182  
1183  	dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1184  		request->primary_intf_id);
1185  
1186  	return gb_svc_queue_deferred_request(op);
1187  }
1188  
gb_svc_intf_oops_recv(struct gb_operation * op)1189  static int gb_svc_intf_oops_recv(struct gb_operation *op)
1190  {
1191  	struct gb_svc *svc = gb_connection_get_data(op->connection);
1192  	struct gb_svc_intf_oops_request *request;
1193  
1194  	if (op->request->payload_size < sizeof(*request)) {
1195  		dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
1196  			 op->request->payload_size, sizeof(*request));
1197  		return -EINVAL;
1198  	}
1199  
1200  	return gb_svc_queue_deferred_request(op);
1201  }
1202  
gb_svc_intf_mailbox_event_recv(struct gb_operation * op)1203  static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
1204  {
1205  	struct gb_svc *svc = gb_connection_get_data(op->connection);
1206  	struct gb_svc_intf_mailbox_event_request *request;
1207  
1208  	if (op->request->payload_size < sizeof(*request)) {
1209  		dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
1210  			 op->request->payload_size, sizeof(*request));
1211  		return -EINVAL;
1212  	}
1213  
1214  	request = op->request->payload;
1215  
1216  	dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1217  
1218  	return gb_svc_queue_deferred_request(op);
1219  }
1220  
gb_svc_request_handler(struct gb_operation * op)1221  static int gb_svc_request_handler(struct gb_operation *op)
1222  {
1223  	struct gb_connection *connection = op->connection;
1224  	struct gb_svc *svc = gb_connection_get_data(connection);
1225  	u8 type = op->type;
1226  	int ret = 0;
1227  
1228  	/*
1229  	 * SVC requests need to follow a specific order (at least initially) and
1230  	 * below code takes care of enforcing that. The expected order is:
1231  	 * - PROTOCOL_VERSION
1232  	 * - SVC_HELLO
1233  	 * - Any other request, but the earlier two.
1234  	 *
1235  	 * Incoming requests are guaranteed to be serialized and so we don't
1236  	 * need to protect 'state' for any races.
1237  	 */
1238  	switch (type) {
1239  	case GB_SVC_TYPE_PROTOCOL_VERSION:
1240  		if (svc->state != GB_SVC_STATE_RESET)
1241  			ret = -EINVAL;
1242  		break;
1243  	case GB_SVC_TYPE_SVC_HELLO:
1244  		if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
1245  			ret = -EINVAL;
1246  		break;
1247  	default:
1248  		if (svc->state != GB_SVC_STATE_SVC_HELLO)
1249  			ret = -EINVAL;
1250  		break;
1251  	}
1252  
1253  	if (ret) {
1254  		dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
1255  			 type, svc->state);
1256  		return ret;
1257  	}
1258  
1259  	switch (type) {
1260  	case GB_SVC_TYPE_PROTOCOL_VERSION:
1261  		ret = gb_svc_version_request(op);
1262  		if (!ret)
1263  			svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
1264  		return ret;
1265  	case GB_SVC_TYPE_SVC_HELLO:
1266  		ret = gb_svc_hello(op);
1267  		if (!ret)
1268  			svc->state = GB_SVC_STATE_SVC_HELLO;
1269  		return ret;
1270  	case GB_SVC_TYPE_INTF_RESET:
1271  		return gb_svc_intf_reset_recv(op);
1272  	case GB_SVC_TYPE_MODULE_INSERTED:
1273  		return gb_svc_module_inserted_recv(op);
1274  	case GB_SVC_TYPE_MODULE_REMOVED:
1275  		return gb_svc_module_removed_recv(op);
1276  	case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1277  		return gb_svc_intf_mailbox_event_recv(op);
1278  	case GB_SVC_TYPE_INTF_OOPS:
1279  		return gb_svc_intf_oops_recv(op);
1280  	default:
1281  		dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
1282  		return -EINVAL;
1283  	}
1284  }
1285  
gb_svc_release(struct device * dev)1286  static void gb_svc_release(struct device *dev)
1287  {
1288  	struct gb_svc *svc = to_gb_svc(dev);
1289  
1290  	if (svc->connection)
1291  		gb_connection_destroy(svc->connection);
1292  	ida_destroy(&svc->device_id_map);
1293  	destroy_workqueue(svc->wq);
1294  	kfree(svc);
1295  }
1296  
1297  struct device_type greybus_svc_type = {
1298  	.name		= "greybus_svc",
1299  	.release	= gb_svc_release,
1300  };
1301  
gb_svc_create(struct gb_host_device * hd)1302  struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1303  {
1304  	struct gb_svc *svc;
1305  
1306  	svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1307  	if (!svc)
1308  		return NULL;
1309  
1310  	svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1311  	if (!svc->wq) {
1312  		kfree(svc);
1313  		return NULL;
1314  	}
1315  
1316  	svc->dev.parent = &hd->dev;
1317  	svc->dev.bus = &greybus_bus_type;
1318  	svc->dev.type = &greybus_svc_type;
1319  	svc->dev.groups = svc_groups;
1320  	svc->dev.dma_mask = svc->dev.parent->dma_mask;
1321  	device_initialize(&svc->dev);
1322  
1323  	dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1324  
1325  	ida_init(&svc->device_id_map);
1326  	svc->state = GB_SVC_STATE_RESET;
1327  	svc->hd = hd;
1328  
1329  	svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1330  						      gb_svc_request_handler);
1331  	if (IS_ERR(svc->connection)) {
1332  		dev_err(&svc->dev, "failed to create connection: %ld\n",
1333  			PTR_ERR(svc->connection));
1334  		goto err_put_device;
1335  	}
1336  
1337  	gb_connection_set_data(svc->connection, svc);
1338  
1339  	return svc;
1340  
1341  err_put_device:
1342  	put_device(&svc->dev);
1343  	return NULL;
1344  }
1345  
gb_svc_add(struct gb_svc * svc)1346  int gb_svc_add(struct gb_svc *svc)
1347  {
1348  	int ret;
1349  
1350  	/*
1351  	 * The SVC protocol is currently driven by the SVC, so the SVC device
1352  	 * is added from the connection request handler when enough
1353  	 * information has been received.
1354  	 */
1355  	ret = gb_connection_enable(svc->connection);
1356  	if (ret)
1357  		return ret;
1358  
1359  	return 0;
1360  }
1361  
gb_svc_remove_modules(struct gb_svc * svc)1362  static void gb_svc_remove_modules(struct gb_svc *svc)
1363  {
1364  	struct gb_host_device *hd = svc->hd;
1365  	struct gb_module *module, *tmp;
1366  
1367  	list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
1368  		gb_module_del(module);
1369  		list_del(&module->hd_node);
1370  		gb_module_put(module);
1371  	}
1372  }
1373  
gb_svc_del(struct gb_svc * svc)1374  void gb_svc_del(struct gb_svc *svc)
1375  {
1376  	gb_connection_disable_rx(svc->connection);
1377  
1378  	/*
1379  	 * The SVC device may have been registered from the request handler.
1380  	 */
1381  	if (device_is_registered(&svc->dev)) {
1382  		gb_svc_debugfs_exit(svc);
1383  		gb_svc_watchdog_destroy(svc);
1384  		device_del(&svc->dev);
1385  	}
1386  
1387  	flush_workqueue(svc->wq);
1388  
1389  	gb_svc_remove_modules(svc);
1390  
1391  	gb_connection_disable(svc->connection);
1392  }
1393  
gb_svc_put(struct gb_svc * svc)1394  void gb_svc_put(struct gb_svc *svc)
1395  {
1396  	put_device(&svc->dev);
1397  }
1398