1 /******************************************************************************
2  * common/softirq.c
3  *
4  * Softirqs in Xen are only executed in an outermost activation (e.g., never
5  * within an interrupt activation). This simplifies some things and generally
6  * seems a good thing.
7  *
8  * Copyright (c) 2003, K A Fraser
9  * Copyright (c) 1992, Linus Torvalds
10  */
11 
12 #include <xen/init.h>
13 #include <xen/mm.h>
14 #include <xen/preempt.h>
15 #include <xen/sched.h>
16 #include <xen/rcupdate.h>
17 #include <xen/softirq.h>
18 
19 #ifndef __ARCH_IRQ_STAT
20 irq_cpustat_t irq_stat[NR_CPUS];
21 #endif
22 
23 static softirq_handler softirq_handlers[NR_SOFTIRQS];
24 
25 static DEFINE_PER_CPU(cpumask_t, batch_mask);
26 static DEFINE_PER_CPU(unsigned int, batching);
27 
__do_softirq(unsigned long ignore_mask)28 static void __do_softirq(unsigned long ignore_mask)
29 {
30     unsigned int i, cpu;
31     unsigned long pending;
32     bool rcu_allowed = !(ignore_mask & (1ul << RCU_SOFTIRQ));
33 
34     ASSERT(!rcu_allowed || rcu_quiesce_allowed());
35 
36     for ( ; ; )
37     {
38         /*
39          * Initialise @cpu on every iteration: SCHEDULE_SOFTIRQ or
40          * SCHED_SLAVE_SOFTIRQ may move us to another processor.
41          */
42         cpu = smp_processor_id();
43 
44         if ( rcu_allowed && rcu_pending(cpu) )
45             rcu_check_callbacks(cpu);
46 
47         if ( ((pending = (softirq_pending(cpu) & ~ignore_mask)) == 0)
48              || cpu_is_offline(cpu) )
49             break;
50 
51         i = find_first_set_bit(pending);
52         clear_bit(i, &softirq_pending(cpu));
53         (*softirq_handlers[i])();
54     }
55 }
56 
process_pending_softirqs(void)57 void process_pending_softirqs(void)
58 {
59     /* Do not enter scheduler as it can preempt the calling context. */
60     unsigned long ignore_mask = (1ul << SCHEDULE_SOFTIRQ) |
61                                 (1ul << SCHED_SLAVE_SOFTIRQ);
62 
63     /* Block RCU processing in case of rcu_read_lock() held. */
64     if ( !rcu_quiesce_allowed() )
65         ignore_mask |= 1ul << RCU_SOFTIRQ;
66 
67     ASSERT(!in_irq() && local_irq_is_enabled());
68     __do_softirq(ignore_mask);
69 }
70 
do_softirq(void)71 void do_softirq(void)
72 {
73     ASSERT_NOT_IN_ATOMIC();
74     __do_softirq(0);
75 }
76 
open_softirq(int nr,softirq_handler handler)77 void open_softirq(int nr, softirq_handler handler)
78 {
79     ASSERT(nr < NR_SOFTIRQS);
80     softirq_handlers[nr] = handler;
81 }
82 
cpumask_raise_softirq(const cpumask_t * mask,unsigned int nr)83 void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr)
84 {
85     unsigned int cpu, this_cpu = smp_processor_id();
86     cpumask_t send_mask, *raise_mask;
87 
88     if ( !per_cpu(batching, this_cpu) || in_irq() )
89     {
90         cpumask_clear(&send_mask);
91         raise_mask = &send_mask;
92     }
93     else
94         raise_mask = &per_cpu(batch_mask, this_cpu);
95 
96     for_each_cpu(cpu, mask)
97         if ( !test_and_set_bit(nr, &softirq_pending(cpu)) &&
98              cpu != this_cpu &&
99              !arch_skip_send_event_check(cpu) )
100             __cpumask_set_cpu(cpu, raise_mask);
101 
102     if ( raise_mask == &send_mask )
103         smp_send_event_check_mask(raise_mask);
104 }
105 
cpu_raise_softirq(unsigned int cpu,unsigned int nr)106 void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
107 {
108     unsigned int this_cpu = smp_processor_id();
109 
110     if ( test_and_set_bit(nr, &softirq_pending(cpu))
111          || (cpu == this_cpu)
112          || arch_skip_send_event_check(cpu) )
113         return;
114 
115     if ( !per_cpu(batching, this_cpu) || in_irq() )
116         smp_send_event_check_cpu(cpu);
117     else
118         __cpumask_set_cpu(cpu, &per_cpu(batch_mask, this_cpu));
119 }
120 
cpu_raise_softirq_batch_begin(void)121 void cpu_raise_softirq_batch_begin(void)
122 {
123     ++this_cpu(batching);
124 }
125 
cpu_raise_softirq_batch_finish(void)126 void cpu_raise_softirq_batch_finish(void)
127 {
128     unsigned int cpu, this_cpu = smp_processor_id();
129     cpumask_t *mask = &per_cpu(batch_mask, this_cpu);
130 
131     ASSERT(per_cpu(batching, this_cpu));
132     for_each_cpu ( cpu, mask )
133         if ( !softirq_pending(cpu) )
134             __cpumask_clear_cpu(cpu, mask);
135     smp_send_event_check_mask(mask);
136     cpumask_clear(mask);
137     --per_cpu(batching, this_cpu);
138 }
139 
raise_softirq(unsigned int nr)140 void raise_softirq(unsigned int nr)
141 {
142     set_bit(nr, &softirq_pending(smp_processor_id()));
143 }
144 
145 /*
146  * Local variables:
147  * mode: C
148  * c-file-style: "BSD"
149  * c-basic-offset: 4
150  * tab-width: 4
151  * indent-tabs-mode: nil
152  * End:
153  */
154