1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2015-2021, Linaro Limited
4 */
5
6 #include <compiler.h>
7 #include <kernel/notif.h>
8 #include <kernel/spinlock.h>
9 #include <kernel/thread.h>
10 #include <kernel/wait_queue.h>
11 #include <optee_rpc_cmd.h>
12 #include <string.h>
13 #include <tee_api_defines.h>
14 #include <trace.h>
15 #include <types_ext.h>
16
17 static unsigned wq_spin_lock;
18
19
wq_init(struct wait_queue * wq)20 void wq_init(struct wait_queue *wq)
21 {
22 *wq = (struct wait_queue)WAIT_QUEUE_INITIALIZER;
23 }
24
do_notif(TEE_Result (* fn)(uint32_t),int id,const char * cmd_str __maybe_unused,const void * sync_obj __maybe_unused,const char * fname,int lineno __maybe_unused)25 static void do_notif(TEE_Result (*fn)(uint32_t), int id,
26 const char *cmd_str __maybe_unused,
27 const void *sync_obj __maybe_unused,
28 const char *fname, int lineno __maybe_unused)
29 {
30 TEE_Result res = TEE_SUCCESS;
31
32 if (fname)
33 DMSG("%s thread %d %p %s:%d", cmd_str, id,
34 sync_obj, fname, lineno);
35 else
36 DMSG("%s thread %d %p", cmd_str, id, sync_obj);
37
38 res = fn(id + NOTIF_SYNC_VALUE_BASE);
39 if (res)
40 DMSG("%s thread %d res %#"PRIx32, cmd_str, id, res);
41 }
42
slist_add_tail(struct wait_queue * wq,struct wait_queue_elem * wqe)43 static void slist_add_tail(struct wait_queue *wq, struct wait_queue_elem *wqe)
44 {
45 struct wait_queue_elem *wqe_iter;
46
47 /* Add elem to end of wait queue */
48 wqe_iter = SLIST_FIRST(wq);
49 if (wqe_iter) {
50 while (SLIST_NEXT(wqe_iter, link))
51 wqe_iter = SLIST_NEXT(wqe_iter, link);
52 SLIST_INSERT_AFTER(wqe_iter, wqe, link);
53 } else
54 SLIST_INSERT_HEAD(wq, wqe, link);
55 }
56
wq_wait_init_condvar(struct wait_queue * wq,struct wait_queue_elem * wqe,struct condvar * cv,bool wait_read)57 void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
58 struct condvar *cv, bool wait_read)
59 {
60 uint32_t old_itr_status;
61
62 wqe->handle = thread_get_id();
63 wqe->done = false;
64 wqe->wait_read = wait_read;
65 wqe->cv = cv;
66
67 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
68
69 slist_add_tail(wq, wqe);
70
71 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
72 }
73
wq_wait_final(struct wait_queue * wq,struct wait_queue_elem * wqe,const void * sync_obj,const char * fname,int lineno)74 void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
75 const void *sync_obj, const char *fname, int lineno)
76 {
77 uint32_t old_itr_status;
78 unsigned done;
79
80 do {
81 do_notif(notif_wait, wqe->handle,
82 "sleep", sync_obj, fname, lineno);
83
84 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
85
86 done = wqe->done;
87 if (done)
88 SLIST_REMOVE(wq, wqe, wait_queue_elem, link);
89
90 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
91 } while (!done);
92 }
93
wq_wake_next(struct wait_queue * wq,const void * sync_obj,const char * fname,int lineno)94 void wq_wake_next(struct wait_queue *wq, const void *sync_obj,
95 const char *fname, int lineno)
96 {
97 uint32_t old_itr_status;
98 struct wait_queue_elem *wqe;
99 int handle = -1;
100 bool do_wakeup = false;
101 bool wake_type_assigned = false;
102 bool wake_read = false; /* avoid gcc warning */
103
104 /*
105 * If next type is wait_read wakeup all wqe with wait_read true.
106 * If next type isn't wait_read wakeup only the first wqe which isn't
107 * done.
108 */
109
110 while (true) {
111 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
112
113 SLIST_FOREACH(wqe, wq, link) {
114 if (wqe->cv)
115 continue;
116 if (wqe->done)
117 continue;
118 if (!wake_type_assigned) {
119 wake_read = wqe->wait_read;
120 wake_type_assigned = true;
121 }
122
123 if (wqe->wait_read != wake_read)
124 continue;
125
126 wqe->done = true;
127 handle = wqe->handle;
128 do_wakeup = true;
129 break;
130 }
131
132 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
133
134 if (do_wakeup)
135 do_notif(notif_send_sync, handle,
136 "wake ", sync_obj, fname, lineno);
137
138 if (!do_wakeup || !wake_read)
139 break;
140 do_wakeup = false;
141 }
142 }
143
wq_promote_condvar(struct wait_queue * wq,struct condvar * cv,bool only_one,const void * sync_obj __unused,const char * fname,int lineno __maybe_unused)144 void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,
145 bool only_one, const void *sync_obj __unused,
146 const char *fname, int lineno __maybe_unused)
147 {
148 uint32_t old_itr_status;
149 struct wait_queue_elem *wqe;
150
151 if (!cv)
152 return;
153
154 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
155
156 /*
157 * Find condvar waiter(s) and promote each to an active waiter.
158 * This is a bit unfair to eventual other active waiters as a
159 * condvar waiter is added to the queue when waiting for the
160 * condvar.
161 */
162 SLIST_FOREACH(wqe, wq, link) {
163 if (wqe->cv == cv) {
164 if (fname)
165 FMSG("promote thread %u %p %s:%d",
166 wqe->handle, (void *)cv->m, fname, lineno);
167 else
168 FMSG("promote thread %u %p",
169 wqe->handle, (void *)cv->m);
170
171 wqe->cv = NULL;
172 if (only_one)
173 break;
174 }
175 }
176
177 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
178 }
179
wq_have_condvar(struct wait_queue * wq,struct condvar * cv)180 bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv)
181 {
182 uint32_t old_itr_status;
183 struct wait_queue_elem *wqe;
184 bool rc = false;
185
186 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
187
188 SLIST_FOREACH(wqe, wq, link) {
189 if (wqe->cv == cv) {
190 rc = true;
191 break;
192 }
193 }
194
195 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
196
197 return rc;
198 }
199
wq_is_empty(struct wait_queue * wq)200 bool wq_is_empty(struct wait_queue *wq)
201 {
202 uint32_t old_itr_status;
203 bool ret;
204
205 old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
206
207 ret = SLIST_EMPTY(wq);
208
209 cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
210
211 return ret;
212 }
213