1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_imem.h"
11 #include "iosm_ipc_imem_ops.h"
12 #include "iosm_ipc_port.h"
13 #include "iosm_ipc_task_queue.h"
14 
15 /* Open a packet data online channel between the network layer and CP. */
ipc_imem_sys_wwan_open(struct iosm_imem * ipc_imem,int if_id)16 int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
17 {
18 	dev_dbg(ipc_imem->dev, "%s if id: %d",
19 		ipc_imem_phase_get_string(ipc_imem->phase), if_id);
20 
21 	/* The network interface is only supported in the runtime phase. */
22 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
23 		dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
24 			ipc_imem_phase_get_string(ipc_imem->phase));
25 		return -EIO;
26 	}
27 
28 	return ipc_mux_open_session(ipc_imem->mux, if_id);
29 }
30 
31 /* Release a net link to CP. */
ipc_imem_sys_wwan_close(struct iosm_imem * ipc_imem,int if_id,int channel_id)32 void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
33 			     int channel_id)
34 {
35 	if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
36 	    if_id <= IP_MUX_SESSION_END)
37 		ipc_mux_close_session(ipc_imem->mux, if_id);
38 }
39 
40 /* Tasklet call to do uplink transfer. */
ipc_imem_tq_cdev_write(struct iosm_imem * ipc_imem,int arg,void * msg,size_t size)41 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
42 				  void *msg, size_t size)
43 {
44 	ipc_imem_ul_send(ipc_imem);
45 
46 	return 0;
47 }
48 
49 /* Through tasklet to do sio write. */
ipc_imem_call_cdev_write(struct iosm_imem * ipc_imem)50 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
51 {
52 	return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
53 					NULL, 0, false);
54 }
55 
56 /* Function for transfer UL data */
ipc_imem_sys_wwan_transmit(struct iosm_imem * ipc_imem,int if_id,int channel_id,struct sk_buff * skb)57 int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
58 			       int if_id, int channel_id, struct sk_buff *skb)
59 {
60 	int ret = -EINVAL;
61 
62 	if (!ipc_imem || channel_id < 0)
63 		goto out;
64 
65 	/* Is CP Running? */
66 	if (ipc_imem->phase != IPC_P_RUN) {
67 		dev_dbg(ipc_imem->dev, "phase %s transmit",
68 			ipc_imem_phase_get_string(ipc_imem->phase));
69 		ret = -EIO;
70 		goto out;
71 	}
72 
73 	/* Route the UL packet through IP MUX Layer */
74 	ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
75 out:
76 	return ret;
77 }
78 
79 /* Initialize wwan channel */
ipc_imem_wwan_channel_init(struct iosm_imem * ipc_imem,enum ipc_mux_protocol mux_type)80 void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
81 				enum ipc_mux_protocol mux_type)
82 {
83 	struct ipc_chnl_cfg chnl_cfg = { 0 };
84 
85 	ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
86 
87 	/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
88 	if (ipc_imem->cp_version == -1) {
89 		dev_err(ipc_imem->dev, "invalid CP version");
90 		return;
91 	}
92 
93 	ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
94 	ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
95 			      IRQ_MOD_OFF);
96 
97 	/* WWAN registration. */
98 	ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
99 	if (!ipc_imem->wwan)
100 		dev_err(ipc_imem->dev,
101 			"failed to register the ipc_wwan interfaces");
102 }
103 
104 /* Map SKB to DMA for transfer */
ipc_imem_map_skb_to_dma(struct iosm_imem * ipc_imem,struct sk_buff * skb)105 static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
106 				   struct sk_buff *skb)
107 {
108 	struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
109 	char *buf = skb->data;
110 	int len = skb->len;
111 	dma_addr_t mapping;
112 	int ret;
113 
114 	ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
115 
116 	if (ret)
117 		goto err;
118 
119 	BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
120 
121 	IPC_CB(skb)->mapping = mapping;
122 	IPC_CB(skb)->direction = DMA_TO_DEVICE;
123 	IPC_CB(skb)->len = len;
124 	IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
125 
126 err:
127 	return ret;
128 }
129 
130 /* return true if channel is ready for use */
ipc_imem_is_channel_active(struct iosm_imem * ipc_imem,struct ipc_mem_channel * channel)131 static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
132 				       struct ipc_mem_channel *channel)
133 {
134 	enum ipc_phase phase;
135 
136 	/* Update the current operation phase. */
137 	phase = ipc_imem->phase;
138 
139 	/* Select the operation depending on the execution stage. */
140 	switch (phase) {
141 	case IPC_P_RUN:
142 	case IPC_P_PSI:
143 	case IPC_P_EBL:
144 		break;
145 
146 	case IPC_P_ROM:
147 		/* Prepare the PSI image for the CP ROM driver and
148 		 * suspend the flash app.
149 		 */
150 		if (channel->state != IMEM_CHANNEL_RESERVED) {
151 			dev_err(ipc_imem->dev,
152 				"ch[%d]:invalid channel state %d,expected %d",
153 				channel->channel_id, channel->state,
154 				IMEM_CHANNEL_RESERVED);
155 			goto channel_unavailable;
156 		}
157 		goto channel_available;
158 
159 	default:
160 		/* Ignore uplink actions in all other phases. */
161 		dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
162 			channel->channel_id, phase);
163 		goto channel_unavailable;
164 	}
165 	/* Check the full availability of the channel. */
166 	if (channel->state != IMEM_CHANNEL_ACTIVE) {
167 		dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
168 			channel->channel_id, channel->state);
169 		goto channel_unavailable;
170 	}
171 
172 channel_available:
173 	return true;
174 
175 channel_unavailable:
176 	return false;
177 }
178 
179 /* Release a sio link to CP. */
ipc_imem_sys_cdev_close(struct iosm_cdev * ipc_cdev)180 void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev)
181 {
182 	struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
183 	struct ipc_mem_channel *channel = ipc_cdev->channel;
184 	enum ipc_phase curr_phase;
185 	int status = 0;
186 	u32 tail = 0;
187 
188 	curr_phase = ipc_imem->phase;
189 
190 	/* If current phase is IPC_P_OFF or SIO ID is -ve then
191 	 * channel is already freed. Nothing to do.
192 	 */
193 	if (curr_phase == IPC_P_OFF) {
194 		dev_err(ipc_imem->dev,
195 			"nothing to do. Current Phase: %s",
196 			ipc_imem_phase_get_string(curr_phase));
197 		return;
198 	}
199 
200 	if (channel->state == IMEM_CHANNEL_FREE) {
201 		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
202 			channel->channel_id, channel->state);
203 		return;
204 	}
205 
206 	/* If there are any pending TDs then wait for Timeout/Completion before
207 	 * closing pipe.
208 	 */
209 	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
210 		ipc_imem->app_notify_ul_pend = 1;
211 
212 		/* Suspend the user app and wait a certain time for processing
213 		 * UL Data.
214 		 */
215 		status = wait_for_completion_interruptible_timeout
216 			 (&ipc_imem->ul_pend_sem,
217 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
218 		if (status == 0) {
219 			dev_dbg(ipc_imem->dev,
220 				"Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
221 				channel->ul_pipe.pipe_nr,
222 				channel->ul_pipe.old_head,
223 				channel->ul_pipe.old_tail);
224 		}
225 
226 		ipc_imem->app_notify_ul_pend = 0;
227 	}
228 
229 	/* If there are any pending TDs then wait for Timeout/Completion before
230 	 * closing pipe.
231 	 */
232 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
233 					 &channel->dl_pipe, NULL, &tail);
234 
235 	if (tail != channel->dl_pipe.old_tail) {
236 		ipc_imem->app_notify_dl_pend = 1;
237 
238 		/* Suspend the user app and wait a certain time for processing
239 		 * DL Data.
240 		 */
241 		status = wait_for_completion_interruptible_timeout
242 			 (&ipc_imem->dl_pend_sem,
243 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
244 		if (status == 0) {
245 			dev_dbg(ipc_imem->dev,
246 				"Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
247 				channel->dl_pipe.pipe_nr,
248 				channel->dl_pipe.old_head,
249 				channel->dl_pipe.old_tail);
250 		}
251 
252 		ipc_imem->app_notify_dl_pend = 0;
253 	}
254 
255 	/* Due to wait for completion in messages, there is a small window
256 	 * between closing the pipe and updating the channel is closed. In this
257 	 * small window there could be HP update from Host Driver. Hence update
258 	 * the channel state as CLOSING to aviod unnecessary interrupt
259 	 * towards CP.
260 	 */
261 	channel->state = IMEM_CHANNEL_CLOSING;
262 
263 	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
264 	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
265 
266 	ipc_imem_channel_free(channel);
267 }
268 
269 /* Open a PORT link to CP and return the channel */
ipc_imem_sys_port_open(struct iosm_imem * ipc_imem,int chl_id,int hp_id)270 struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
271 					       int chl_id, int hp_id)
272 {
273 	struct ipc_mem_channel *channel;
274 	int ch_id;
275 
276 	/* The PORT interface is only supported in the runtime phase. */
277 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
278 		dev_err(ipc_imem->dev, "PORT open refused, phase %s",
279 			ipc_imem_phase_get_string(ipc_imem->phase));
280 		return NULL;
281 	}
282 
283 	ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
284 
285 	if (ch_id < 0) {
286 		dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
287 		return NULL;
288 	}
289 
290 	channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
291 
292 	if (!channel) {
293 		dev_err(ipc_imem->dev, "PORT channel id open failed");
294 		return NULL;
295 	}
296 
297 	return channel;
298 }
299 
300 /* transfer skb to modem */
ipc_imem_sys_cdev_write(struct iosm_cdev * ipc_cdev,struct sk_buff * skb)301 int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
302 {
303 	struct ipc_mem_channel *channel = ipc_cdev->channel;
304 	struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
305 	int ret = -EIO;
306 
307 	if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
308 	    ipc_imem->phase == IPC_P_OFF_REQ)
309 		goto out;
310 
311 	ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
312 
313 	if (ret)
314 		goto out;
315 
316 	/* Add skb to the uplink skbuf accumulator. */
317 	skb_queue_tail(&channel->ul_list, skb);
318 
319 	ret = ipc_imem_call_cdev_write(ipc_imem);
320 
321 	if (ret) {
322 		skb_dequeue_tail(&channel->ul_list);
323 		dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
324 			ipc_cdev->channel->channel_id);
325 	}
326 out:
327 	return ret;
328 }
329 
330 /* Open a SIO link to CP and return the channel instance */
ipc_imem_sys_devlink_open(struct iosm_imem * ipc_imem)331 struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
332 {
333 	struct ipc_mem_channel *channel;
334 	enum ipc_phase phase;
335 	int channel_id;
336 
337 	phase = ipc_imem_phase_update(ipc_imem);
338 	switch (phase) {
339 	case IPC_P_OFF:
340 	case IPC_P_ROM:
341 		/* Get a channel id as flash id and reserve it. */
342 		channel_id = ipc_imem_channel_alloc(ipc_imem,
343 						    IPC_MEM_CTRL_CHL_ID_7,
344 						    IPC_CTYPE_CTRL);
345 
346 		if (channel_id < 0) {
347 			dev_err(ipc_imem->dev,
348 				"reservation of a flash channel id failed");
349 			goto error;
350 		}
351 
352 		ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
353 		channel = &ipc_imem->channels[channel_id];
354 
355 		/* Enqueue chip info data to be read */
356 		if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
357 			dev_err(ipc_imem->dev, "Enqueue of chip info failed");
358 			channel->state = IMEM_CHANNEL_FREE;
359 			goto error;
360 		}
361 
362 		return channel;
363 
364 	case IPC_P_PSI:
365 	case IPC_P_EBL:
366 		ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
367 		if (ipc_imem->cp_version == -1) {
368 			dev_err(ipc_imem->dev, "invalid CP version");
369 			goto error;
370 		}
371 
372 		channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
373 		return ipc_imem_channel_open(ipc_imem, channel_id,
374 					     IPC_HP_CDEV_OPEN);
375 
376 	default:
377 		/* CP is in the wrong state (e.g. CRASH or CD_READY) */
378 		dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
379 	}
380 error:
381 	return NULL;
382 }
383 
384 /* Release a SIO channel link to CP. */
ipc_imem_sys_devlink_close(struct iosm_devlink * ipc_devlink)385 void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
386 {
387 	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
388 	int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
389 	enum ipc_mem_exec_stage exec_stage;
390 	struct ipc_mem_channel *channel;
391 	int status = 0;
392 	u32 tail = 0;
393 
394 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
395 	/* Increase the total wait time to boot_check_timeout */
396 	do {
397 		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
398 		if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
399 		    exec_stage == IPC_MEM_EXEC_STAGE_PSI)
400 			break;
401 		msleep(20);
402 		boot_check_timeout -= 20;
403 	} while (boot_check_timeout > 0);
404 
405 	/* If there are any pending TDs then wait for Timeout/Completion before
406 	 * closing pipe.
407 	 */
408 	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
409 		status = wait_for_completion_interruptible_timeout
410 			(&ipc_imem->ul_pend_sem,
411 			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
412 		if (status == 0) {
413 			dev_dbg(ipc_imem->dev,
414 				"Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
415 				channel->ul_pipe.pipe_nr,
416 				channel->ul_pipe.old_head,
417 				channel->ul_pipe.old_tail);
418 		}
419 	}
420 
421 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
422 					 &channel->dl_pipe, NULL, &tail);
423 
424 	if (tail != channel->dl_pipe.old_tail) {
425 		status = wait_for_completion_interruptible_timeout
426 			(&ipc_imem->dl_pend_sem,
427 			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
428 		if (status == 0) {
429 			dev_dbg(ipc_imem->dev,
430 				"Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
431 				channel->dl_pipe.pipe_nr,
432 				channel->dl_pipe.old_head,
433 				channel->dl_pipe.old_tail);
434 		}
435 	}
436 
437 	/* Due to wait for completion in messages, there is a small window
438 	 * between closing the pipe and updating the channel is closed. In this
439 	 * small window there could be HP update from Host Driver. Hence update
440 	 * the channel state as CLOSING to aviod unnecessary interrupt
441 	 * towards CP.
442 	 */
443 	channel->state = IMEM_CHANNEL_CLOSING;
444 	/* Release the pipe resources */
445 	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
446 	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
447 	ipc_imem->nr_of_channels--;
448 }
449 
ipc_imem_sys_devlink_notify_rx(struct iosm_devlink * ipc_devlink,struct sk_buff * skb)450 void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
451 				    struct sk_buff *skb)
452 {
453 	skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
454 	complete(&ipc_devlink->devlink_sio.read_sem);
455 }
456 
457 /* PSI transfer */
ipc_imem_sys_psi_transfer(struct iosm_imem * ipc_imem,struct ipc_mem_channel * channel,unsigned char * buf,int count)458 static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
459 				     struct ipc_mem_channel *channel,
460 				     unsigned char *buf, int count)
461 {
462 	int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
463 	enum ipc_mem_exec_stage exec_stage;
464 
465 	dma_addr_t mapping = 0;
466 	int ret;
467 
468 	ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
469 				DMA_TO_DEVICE);
470 	if (ret)
471 		goto pcie_addr_map_fail;
472 
473 	/* Save the PSI information for the CP ROM driver on the doorbell
474 	 * scratchpad.
475 	 */
476 	ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
477 	ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
478 
479 	ret = wait_for_completion_interruptible_timeout
480 		(&channel->ul_sem,
481 		 msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
482 
483 	if (ret <= 0) {
484 		dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
485 			ret);
486 		goto psi_transfer_fail;
487 	}
488 	/* If the PSI download fails, return the CP boot ROM exit code */
489 	if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
490 	    ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
491 		ret = (-1) * ((int)ipc_imem->rom_exit_code);
492 		goto psi_transfer_fail;
493 	}
494 
495 	dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
496 
497 	/* Wait psi_start_timeout milliseconds until the CP PSI image is
498 	 * running and updates the execution_stage field with
499 	 * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
500 	 */
501 	do {
502 		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
503 
504 		if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
505 			break;
506 
507 		msleep(20);
508 		psi_start_timeout -= 20;
509 	} while (psi_start_timeout > 0);
510 
511 	if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
512 		goto psi_transfer_fail; /* Unknown status of CP PSI process. */
513 
514 	ipc_imem->phase = IPC_P_PSI;
515 
516 	/* Enter the PSI phase. */
517 	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
518 
519 	/* Request the RUNNING state from CP and wait until it was reached
520 	 * or timeout.
521 	 */
522 	ipc_imem_ipc_init_check(ipc_imem);
523 
524 	ret = wait_for_completion_interruptible_timeout
525 		(&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
526 	if (ret <= 0) {
527 		dev_err(ipc_imem->dev,
528 			"Failed PSI RUNNING state on CP, Error-%d", ret);
529 		goto psi_transfer_fail;
530 	}
531 
532 	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
533 			IPC_MEM_DEVICE_IPC_RUNNING) {
534 		dev_err(ipc_imem->dev,
535 			"ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
536 			channel->channel_id,
537 			ipc_imem_phase_get_string(ipc_imem->phase),
538 			ipc_mmio_get_ipc_state(ipc_imem->mmio));
539 
540 		goto psi_transfer_fail;
541 	}
542 
543 	/* Create the flash channel for the transfer of the images. */
544 	if (!ipc_imem_sys_devlink_open(ipc_imem)) {
545 		dev_err(ipc_imem->dev, "can't open flash_channel");
546 		goto psi_transfer_fail;
547 	}
548 
549 	ret = 0;
550 psi_transfer_fail:
551 	ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
552 pcie_addr_map_fail:
553 	return ret;
554 }
555 
ipc_imem_sys_devlink_write(struct iosm_devlink * ipc_devlink,unsigned char * buf,int count)556 int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
557 			       unsigned char *buf, int count)
558 {
559 	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
560 	struct ipc_mem_channel *channel;
561 	struct sk_buff *skb;
562 	dma_addr_t mapping;
563 	int ret;
564 
565 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
566 
567 	/* In the ROM phase the PSI image is passed to CP about a specific
568 	 *  shared memory area and doorbell scratchpad directly.
569 	 */
570 	if (ipc_imem->phase == IPC_P_ROM) {
571 		ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
572 		/* If the PSI transfer fails then send crash
573 		 * Signature.
574 		 */
575 		if (ret > 0)
576 			ipc_imem_msg_send_feature_set(ipc_imem,
577 						      IPC_MEM_INBAND_CRASH_SIG,
578 						      false);
579 		goto out;
580 	}
581 
582 	/* Allocate skb memory for the uplink buffer. */
583 	skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
584 				 DMA_TO_DEVICE, 0);
585 	if (!skb) {
586 		ret = -ENOMEM;
587 		goto out;
588 	}
589 
590 	memcpy(skb_put(skb, count), buf, count);
591 
592 	IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
593 
594 	/* Add skb to the uplink skbuf accumulator. */
595 	skb_queue_tail(&channel->ul_list, skb);
596 
597 	/* Inform the IPC tasklet to pass uplink IP packets to CP. */
598 	if (!ipc_imem_call_cdev_write(ipc_imem)) {
599 		ret = wait_for_completion_interruptible(&channel->ul_sem);
600 
601 		if (ret < 0) {
602 			dev_err(ipc_imem->dev,
603 				"ch[%d] no CP confirmation, status = %d",
604 				channel->channel_id, ret);
605 			ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
606 			goto out;
607 		}
608 	}
609 	ret = 0;
610 out:
611 	return ret;
612 }
613 
ipc_imem_sys_devlink_read(struct iosm_devlink * devlink,u8 * data,u32 bytes_to_read,u32 * bytes_read)614 int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
615 			      u32 bytes_to_read, u32 *bytes_read)
616 {
617 	struct sk_buff *skb = NULL;
618 	int rc = 0;
619 
620 	/* check skb is available in rx_list or wait for skb */
621 	devlink->devlink_sio.devlink_read_pend = 1;
622 	while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
623 		if (!wait_for_completion_interruptible_timeout
624 				(&devlink->devlink_sio.read_sem,
625 				 msecs_to_jiffies(IPC_READ_TIMEOUT))) {
626 			dev_err(devlink->dev, "Read timedout");
627 			rc =  -ETIMEDOUT;
628 			goto devlink_read_fail;
629 		}
630 	}
631 	devlink->devlink_sio.devlink_read_pend = 0;
632 	if (bytes_to_read < skb->len) {
633 		dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
634 		rc = -EINVAL;
635 		goto devlink_read_fail;
636 	}
637 	*bytes_read = skb->len;
638 	memcpy(data, skb->data, skb->len);
639 
640 devlink_read_fail:
641 	ipc_pcie_kfree_skb(devlink->pcie, skb);
642 	return rc;
643 }
644