1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2018-2021 Linaro Ltd.
5  */
6 #ifndef _GSI_H_
7 #define _GSI_H_
8 
9 #include <linux/types.h>
10 #include <linux/spinlock.h>
11 #include <linux/mutex.h>
12 #include <linux/completion.h>
13 #include <linux/platform_device.h>
14 #include <linux/netdevice.h>
15 
16 #include "ipa_version.h"
17 
18 /* Maximum number of channels and event rings supported by the driver */
19 #define GSI_CHANNEL_COUNT_MAX	23
20 #define GSI_EVT_RING_COUNT_MAX	24
21 
22 /* Maximum TLV FIFO size for a channel; 64 here is arbitrary (and high) */
23 #define GSI_TLV_MAX		64
24 
25 struct device;
26 struct scatterlist;
27 struct platform_device;
28 
29 struct gsi;
30 struct gsi_trans;
31 struct gsi_channel_data;
32 struct ipa_gsi_endpoint_data;
33 
34 /* Execution environment IDs */
35 enum gsi_ee_id {
36 	GSI_EE_AP				= 0x0,
37 	GSI_EE_MODEM				= 0x1,
38 	GSI_EE_UC				= 0x2,
39 	GSI_EE_TZ				= 0x3,
40 };
41 
42 struct gsi_ring {
43 	void *virt;			/* ring array base address */
44 	dma_addr_t addr;		/* primarily low 32 bits used */
45 	u32 count;			/* number of elements in ring */
46 
47 	/* The ring index value indicates the next "open" entry in the ring.
48 	 *
49 	 * A channel ring consists of TRE entries filled by the AP and passed
50 	 * to the hardware for processing.  For a channel ring, the ring index
51 	 * identifies the next unused entry to be filled by the AP.
52 	 *
53 	 * An event ring consists of event structures filled by the hardware
54 	 * and passed to the AP.  For event rings, the ring index identifies
55 	 * the next ring entry that is not known to have been filled by the
56 	 * hardware.
57 	 */
58 	u32 index;
59 };
60 
61 /* Transactions use several resources that can be allocated dynamically
62  * but taken from a fixed-size pool.  The number of elements required for
63  * the pool is limited by the total number of TREs that can be outstanding.
64  *
65  * If sufficient TREs are available to reserve for a transaction,
66  * allocation from these pools is guaranteed to succeed.  Furthermore,
67  * these resources are implicitly freed whenever the TREs in the
68  * transaction they're associated with are released.
69  *
70  * The result of a pool allocation of multiple elements is always
71  * contiguous.
72  */
73 struct gsi_trans_pool {
74 	void *base;			/* base address of element pool */
75 	u32 count;			/* # elements in the pool */
76 	u32 free;			/* next free element in pool (modulo) */
77 	u32 size;			/* size (bytes) of an element */
78 	u32 max_alloc;			/* max allocation request */
79 	dma_addr_t addr;		/* DMA address if DMA pool (or 0) */
80 };
81 
82 struct gsi_trans_info {
83 	atomic_t tre_avail;		/* TREs available for allocation */
84 	struct gsi_trans_pool pool;	/* transaction pool */
85 	struct gsi_trans_pool sg_pool;	/* scatterlist pool */
86 	struct gsi_trans_pool cmd_pool;	/* command payload DMA pool */
87 	struct gsi_trans_pool info_pool;/* command information pool */
88 	struct gsi_trans **map;		/* TRE -> transaction map */
89 
90 	spinlock_t spinlock;		/* protects updates to the lists */
91 	struct list_head alloc;		/* allocated, not committed */
92 	struct list_head pending;	/* committed, awaiting completion */
93 	struct list_head complete;	/* completed, awaiting poll */
94 	struct list_head polled;	/* returned by gsi_channel_poll_one() */
95 };
96 
97 /* Hardware values signifying the state of a channel */
98 enum gsi_channel_state {
99 	GSI_CHANNEL_STATE_NOT_ALLOCATED		= 0x0,
100 	GSI_CHANNEL_STATE_ALLOCATED		= 0x1,
101 	GSI_CHANNEL_STATE_STARTED		= 0x2,
102 	GSI_CHANNEL_STATE_STOPPED		= 0x3,
103 	GSI_CHANNEL_STATE_STOP_IN_PROC		= 0x4,
104 	GSI_CHANNEL_STATE_ERROR			= 0xf,
105 };
106 
107 /* We only care about channels between IPA and AP */
108 struct gsi_channel {
109 	struct gsi *gsi;
110 	bool toward_ipa;
111 	bool command;			/* AP command TX channel or not */
112 
113 	u8 tlv_count;			/* # entries in TLV FIFO */
114 	u16 tre_count;
115 	u16 event_count;
116 
117 	struct completion completion;	/* signals channel command completion */
118 
119 	struct gsi_ring tre_ring;
120 	u32 evt_ring_id;
121 
122 	u64 byte_count;			/* total # bytes transferred */
123 	u64 trans_count;		/* total # transactions */
124 	/* The following counts are used only for TX endpoints */
125 	u64 queued_byte_count;		/* last reported queued byte count */
126 	u64 queued_trans_count;		/* ...and queued trans count */
127 	u64 compl_byte_count;		/* last reported completed byte count */
128 	u64 compl_trans_count;		/* ...and completed trans count */
129 
130 	struct gsi_trans_info trans_info;
131 
132 	struct napi_struct napi;
133 };
134 
135 /* Hardware values signifying the state of an event ring */
136 enum gsi_evt_ring_state {
137 	GSI_EVT_RING_STATE_NOT_ALLOCATED	= 0x0,
138 	GSI_EVT_RING_STATE_ALLOCATED		= 0x1,
139 	GSI_EVT_RING_STATE_ERROR		= 0xf,
140 };
141 
142 struct gsi_evt_ring {
143 	struct gsi_channel *channel;
144 	struct completion completion;	/* signals event ring state changes */
145 	struct gsi_ring ring;
146 };
147 
148 struct gsi {
149 	struct device *dev;		/* Same as IPA device */
150 	enum ipa_version version;
151 	struct net_device dummy_dev;	/* needed for NAPI */
152 	void __iomem *virt_raw;		/* I/O mapped address range */
153 	void __iomem *virt;		/* Adjusted for most registers */
154 	u32 irq;
155 	u32 channel_count;
156 	u32 evt_ring_count;
157 	struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
158 	struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
159 	u32 event_bitmap;		/* allocated event rings */
160 	u32 modem_channel_bitmap;	/* modem channels to allocate */
161 	u32 type_enabled_bitmap;	/* GSI IRQ types enabled */
162 	u32 ieob_enabled_bitmap;	/* IEOB IRQ enabled (event rings) */
163 	struct completion completion;	/* for global EE commands */
164 	int result;			/* Negative errno (generic commands) */
165 	struct mutex mutex;		/* protects commands, programming */
166 };
167 
168 /**
169  * gsi_setup() - Set up the GSI subsystem
170  * @gsi:	Address of GSI structure embedded in an IPA structure
171  *
172  * Return:	0 if successful, or a negative error code
173  *
174  * Performs initialization that must wait until the GSI hardware is
175  * ready (including firmware loaded).
176  */
177 int gsi_setup(struct gsi *gsi);
178 
179 /**
180  * gsi_teardown() - Tear down GSI subsystem
181  * @gsi:	GSI address previously passed to a successful gsi_setup() call
182  */
183 void gsi_teardown(struct gsi *gsi);
184 
185 /**
186  * gsi_channel_tre_max() - Channel maximum number of in-flight TREs
187  * @gsi:	GSI pointer
188  * @channel_id:	Channel whose limit is to be returned
189  *
190  * Return:	 The maximum number of TREs oustanding on the channel
191  */
192 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
193 
194 /**
195  * gsi_channel_trans_tre_max() - Maximum TREs in a single transaction
196  * @gsi:	GSI pointer
197  * @channel_id:	Channel whose limit is to be returned
198  *
199  * Return:	 The maximum TRE count per transaction on the channel
200  */
201 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
202 
203 /**
204  * gsi_channel_start() - Start an allocated GSI channel
205  * @gsi:	GSI pointer
206  * @channel_id:	Channel to start
207  *
208  * Return:	0 if successful, or a negative error code
209  */
210 int gsi_channel_start(struct gsi *gsi, u32 channel_id);
211 
212 /**
213  * gsi_channel_stop() - Stop a started GSI channel
214  * @gsi:	GSI pointer returned by gsi_setup()
215  * @channel_id:	Channel to stop
216  *
217  * Return:	0 if successful, or a negative error code
218  */
219 int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
220 
221 /**
222  * gsi_channel_reset() - Reset an allocated GSI channel
223  * @gsi:	GSI pointer
224  * @channel_id:	Channel to be reset
225  * @doorbell:	Whether to (possibly) enable the doorbell engine
226  *
227  * Reset a channel and reconfigure it.  The @doorbell flag indicates
228  * that the doorbell engine should be enabled if needed.
229  *
230  * GSI hardware relinquishes ownership of all pending receive buffer
231  * transactions and they will complete with their cancelled flag set.
232  */
233 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell);
234 
235 /**
236  * gsi_suspend() - Prepare the GSI subsystem for suspend
237  * @gsi:	GSI pointer
238  */
239 void gsi_suspend(struct gsi *gsi);
240 
241 /**
242  * gsi_resume() - Resume the GSI subsystem following suspend
243  * @gsi:	GSI pointer
244  */
245 void gsi_resume(struct gsi *gsi);
246 
247 /**
248  * gsi_channel_suspend() - Suspend a GSI channel
249  * @gsi:	GSI pointer
250  * @channel_id:	Channel to suspend
251  *
252  * For IPA v4.0+, suspend is implemented by stopping the channel.
253  */
254 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id);
255 
256 /**
257  * gsi_channel_resume() - Resume a suspended GSI channel
258  * @gsi:	GSI pointer
259  * @channel_id:	Channel to resume
260  *
261  * For IPA v4.0+, the stopped channel is started again.
262  */
263 int gsi_channel_resume(struct gsi *gsi, u32 channel_id);
264 
265 /**
266  * gsi_init() - Initialize the GSI subsystem
267  * @gsi:	Address of GSI structure embedded in an IPA structure
268  * @pdev:	IPA platform device
269  * @version:	IPA hardware version (implies GSI version)
270  * @count:	Number of entries in the configuration data array
271  * @data:	Endpoint and channel configuration data
272  *
273  * Return:	0 if successful, or a negative error code
274  *
275  * Early stage initialization of the GSI subsystem, performing tasks
276  * that can be done before the GSI hardware is ready to use.
277  */
278 int gsi_init(struct gsi *gsi, struct platform_device *pdev,
279 	     enum ipa_version version, u32 count,
280 	     const struct ipa_gsi_endpoint_data *data);
281 
282 /**
283  * gsi_exit() - Exit the GSI subsystem
284  * @gsi:	GSI address previously passed to a successful gsi_init() call
285  */
286 void gsi_exit(struct gsi *gsi);
287 
288 #endif /* _GSI_H_ */
289