1 /******************************************************************************
2 * tasklet.c
3 *
4 * Tasklets are dynamically-allocatable tasks run in either VCPU context
5 * (specifically, the idle VCPU's context) or in softirq context, on at most
6 * one CPU at a time. Softirq versus VCPU context execution is specified
7 * during per-tasklet initialisation.
8 *
9 * Copyright (c) 2010, Citrix Systems, Inc.
10 * Copyright (c) 1992, Linus Torvalds
11 *
12 * Authors:
13 * Keir Fraser <keir@xen.org>
14 */
15
16 #include <xen/init.h>
17 #include <xen/sched.h>
18 #include <xen/softirq.h>
19 #include <xen/tasklet.h>
20 #include <xen/cpu.h>
21
22 /* Some subsystems call into us before we are initialised. We ignore them. */
23 static bool_t tasklets_initialised;
24
25 DEFINE_PER_CPU(unsigned long, tasklet_work_to_do);
26
27 static DEFINE_PER_CPU(struct list_head, tasklet_list);
28 static DEFINE_PER_CPU(struct list_head, softirq_tasklet_list);
29
30 /* Protects all lists and tasklet structures. */
31 static DEFINE_SPINLOCK(tasklet_lock);
32
tasklet_enqueue(struct tasklet * t)33 static void tasklet_enqueue(struct tasklet *t)
34 {
35 unsigned int cpu = t->scheduled_on;
36
37 if ( t->is_softirq )
38 {
39 struct list_head *list = &per_cpu(softirq_tasklet_list, cpu);
40 bool_t was_empty = list_empty(list);
41 list_add_tail(&t->list, list);
42 if ( was_empty )
43 cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
44 }
45 else
46 {
47 unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
48 list_add_tail(&t->list, &per_cpu(tasklet_list, cpu));
49 if ( !test_and_set_bit(_TASKLET_enqueued, work_to_do) )
50 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
51 }
52 }
53
tasklet_schedule_on_cpu(struct tasklet * t,unsigned int cpu)54 void tasklet_schedule_on_cpu(struct tasklet *t, unsigned int cpu)
55 {
56 unsigned long flags;
57
58 spin_lock_irqsave(&tasklet_lock, flags);
59
60 if ( tasklets_initialised && !t->is_dead )
61 {
62 t->scheduled_on = cpu;
63 if ( !t->is_running )
64 {
65 list_del(&t->list);
66 tasklet_enqueue(t);
67 }
68 }
69
70 spin_unlock_irqrestore(&tasklet_lock, flags);
71 }
72
tasklet_schedule(struct tasklet * t)73 void tasklet_schedule(struct tasklet *t)
74 {
75 tasklet_schedule_on_cpu(t, smp_processor_id());
76 }
77
do_tasklet_work(unsigned int cpu,struct list_head * list)78 static void do_tasklet_work(unsigned int cpu, struct list_head *list)
79 {
80 struct tasklet *t;
81
82 if ( unlikely(list_empty(list) || cpu_is_offline(cpu)) )
83 return;
84
85 t = list_entry(list->next, struct tasklet, list);
86 list_del_init(&t->list);
87
88 BUG_ON(t->is_dead || t->is_running || (t->scheduled_on != cpu));
89 t->scheduled_on = -1;
90 t->is_running = 1;
91
92 spin_unlock_irq(&tasklet_lock);
93 sync_local_execstate();
94 t->func(t->data);
95 spin_lock_irq(&tasklet_lock);
96
97 t->is_running = 0;
98
99 if ( t->scheduled_on >= 0 )
100 {
101 BUG_ON(t->is_dead || !list_empty(&t->list));
102 tasklet_enqueue(t);
103 }
104 }
105
106 /* VCPU context work */
do_tasklet(void)107 void do_tasklet(void)
108 {
109 unsigned int cpu = smp_processor_id();
110 unsigned long *work_to_do = &per_cpu(tasklet_work_to_do, cpu);
111 struct list_head *list = &per_cpu(tasklet_list, cpu);
112
113 /*
114 * We want to be sure any caller has checked that a tasklet is both
115 * enqueued and scheduled, before calling this. And, if the caller has
116 * actually checked, it's not an issue that we are outside of the
117 * critical region, in fact:
118 * - TASKLET_enqueued is cleared only here,
119 * - TASKLET_scheduled is only cleared when schedule() find it set,
120 * without TASKLET_enqueued being set as well.
121 */
122 ASSERT(tasklet_work_to_do(cpu));
123
124 spin_lock_irq(&tasklet_lock);
125
126 do_tasklet_work(cpu, list);
127
128 if ( list_empty(list) )
129 {
130 clear_bit(_TASKLET_enqueued, work_to_do);
131 raise_softirq(SCHEDULE_SOFTIRQ);
132 }
133
134 spin_unlock_irq(&tasklet_lock);
135 }
136
137 /* Softirq context work */
tasklet_softirq_action(void)138 static void tasklet_softirq_action(void)
139 {
140 unsigned int cpu = smp_processor_id();
141 struct list_head *list = &per_cpu(softirq_tasklet_list, cpu);
142
143 spin_lock_irq(&tasklet_lock);
144
145 do_tasklet_work(cpu, list);
146
147 if ( !list_empty(list) && !cpu_is_offline(cpu) )
148 raise_softirq(TASKLET_SOFTIRQ);
149
150 spin_unlock_irq(&tasklet_lock);
151 }
152
tasklet_kill(struct tasklet * t)153 void tasklet_kill(struct tasklet *t)
154 {
155 unsigned long flags;
156
157 spin_lock_irqsave(&tasklet_lock, flags);
158
159 /* Cope with uninitialised tasklets. */
160 if ( list_head_is_null(&t->list) )
161 goto unlock;
162
163 if ( !list_empty(&t->list) )
164 {
165 BUG_ON(t->is_dead || t->is_running || (t->scheduled_on < 0));
166 list_del_init(&t->list);
167 }
168
169 t->scheduled_on = -1;
170 t->is_dead = 1;
171
172 while ( t->is_running )
173 {
174 spin_unlock_irqrestore(&tasklet_lock, flags);
175 cpu_relax();
176 spin_lock_irqsave(&tasklet_lock, flags);
177 }
178
179 unlock:
180 spin_unlock_irqrestore(&tasklet_lock, flags);
181 }
182
migrate_tasklets_from_cpu(unsigned int cpu,struct list_head * list)183 static void migrate_tasklets_from_cpu(unsigned int cpu, struct list_head *list)
184 {
185 unsigned long flags;
186 struct tasklet *t;
187
188 spin_lock_irqsave(&tasklet_lock, flags);
189
190 while ( !list_empty(list) )
191 {
192 t = list_entry(list->next, struct tasklet, list);
193 BUG_ON(t->scheduled_on != cpu);
194 t->scheduled_on = smp_processor_id();
195 list_del(&t->list);
196 tasklet_enqueue(t);
197 }
198
199 spin_unlock_irqrestore(&tasklet_lock, flags);
200 }
201
tasklet_init(struct tasklet * t,void (* func)(void *),void * data)202 void tasklet_init(struct tasklet *t, void (*func)(void *), void *data)
203 {
204 memset(t, 0, sizeof(*t));
205 INIT_LIST_HEAD(&t->list);
206 t->scheduled_on = -1;
207 t->func = func;
208 t->data = data;
209 }
210
softirq_tasklet_init(struct tasklet * t,void (* func)(void *),void * data)211 void softirq_tasklet_init(struct tasklet *t, void (*func)(void *), void *data)
212 {
213 tasklet_init(t, func, data);
214 t->is_softirq = 1;
215 }
216
cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)217 static int cpu_callback(
218 struct notifier_block *nfb, unsigned long action, void *hcpu)
219 {
220 unsigned int cpu = (unsigned long)hcpu;
221
222 switch ( action )
223 {
224 case CPU_UP_PREPARE:
225 INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
226 INIT_LIST_HEAD(&per_cpu(softirq_tasklet_list, cpu));
227 break;
228 case CPU_UP_CANCELED:
229 case CPU_DEAD:
230 migrate_tasklets_from_cpu(cpu, &per_cpu(tasklet_list, cpu));
231 migrate_tasklets_from_cpu(cpu, &per_cpu(softirq_tasklet_list, cpu));
232 break;
233 default:
234 break;
235 }
236
237 return NOTIFY_DONE;
238 }
239
240 static struct notifier_block cpu_nfb = {
241 .notifier_call = cpu_callback,
242 .priority = 99
243 };
244
tasklet_subsys_init(void)245 void __init tasklet_subsys_init(void)
246 {
247 void *hcpu = (void *)(long)smp_processor_id();
248 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
249 register_cpu_notifier(&cpu_nfb);
250 open_softirq(TASKLET_SOFTIRQ, tasklet_softirq_action);
251 tasklets_initialised = 1;
252 }
253
254 /*
255 * Local variables:
256 * mode: C
257 * c-file-style: "BSD"
258 * c-basic-offset: 4
259 * tab-width: 4
260 * indent-tabs-mode: nil
261 * End:
262 */
263