1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; If not, see <http://www.gnu.org/licenses/>.
16  *
17  * Copyright (C) IBM Corporation, 2001
18  *
19  * Author: Dipankar Sarma <dipankar@in.ibm.com>
20  *
21  * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
22  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
23  * Papers:
24  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
25  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
26  *
27  * For detailed explanation of Read-Copy Update mechanism see -
28  * http://lse.sourceforge.net/locking/rcupdate.html
29  */
30 
31 #ifndef __XEN_RCUPDATE_H
32 #define __XEN_RCUPDATE_H
33 
34 #include <xen/cache.h>
35 #include <xen/compiler.h>
36 #include <xen/spinlock.h>
37 #include <xen/cpumask.h>
38 #include <xen/percpu.h>
39 #include <xen/preempt.h>
40 
41 #define __rcu
42 
43 DECLARE_PER_CPU(unsigned int, rcu_lock_cnt);
44 
rcu_quiesce_disable(void)45 static inline void rcu_quiesce_disable(void)
46 {
47     preempt_disable();
48     this_cpu(rcu_lock_cnt)++;
49     barrier();
50 }
51 
rcu_quiesce_enable(void)52 static inline void rcu_quiesce_enable(void)
53 {
54     barrier();
55     this_cpu(rcu_lock_cnt)--;
56     preempt_enable();
57 }
58 
rcu_quiesce_allowed(void)59 static inline bool rcu_quiesce_allowed(void)
60 {
61     return !this_cpu(rcu_lock_cnt);
62 }
63 
64 /**
65  * struct rcu_head - callback structure for use with RCU
66  * @next: next update requests in a list
67  * @func: actual update function to call after the grace period.
68  */
69 struct rcu_head {
70     struct rcu_head *next;
71     void (*func)(struct rcu_head *head);
72 };
73 
74 #define RCU_HEAD_INIT   { .next = NULL, .func = NULL }
75 #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
76 #define INIT_RCU_HEAD(ptr) do { \
77        (ptr)->next = NULL; (ptr)->func = NULL; \
78 } while (0)
79 
80 
81 int rcu_pending(int cpu);
82 int rcu_needs_cpu(int cpu);
83 
84 /*
85  * Dummy lock type for passing to rcu_read_{lock,unlock}. Currently exists
86  * only to document the reason for rcu_read_lock() critical sections.
87  */
88 struct _rcu_read_lock {};
89 typedef struct _rcu_read_lock rcu_read_lock_t;
90 #define DEFINE_RCU_READ_LOCK(x) rcu_read_lock_t x
91 #define RCU_READ_LOCK_INIT(x)
92 
93 /**
94  * rcu_read_lock - mark the beginning of an RCU read-side critical section.
95  *
96  * When call_rcu() is invoked
97  * on one CPU while other CPUs are within RCU read-side critical
98  * sections, invocation of the corresponding RCU callback is deferred
99  * until after the all the other CPUs exit their critical sections.
100  *
101  * Note, however, that RCU callbacks are permitted to run concurrently
102  * with RCU read-side critical sections.  One way that this can happen
103  * is via the following sequence of events: (1) CPU 0 enters an RCU
104  * read-side critical section, (2) CPU 1 invokes call_rcu() to register
105  * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
106  * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
107  * callback is invoked.  This is legal, because the RCU read-side critical
108  * section that was running concurrently with the call_rcu() (and which
109  * therefore might be referencing something that the corresponding RCU
110  * callback would free up) has completed before the corresponding
111  * RCU callback is invoked.
112  *
113  * RCU read-side critical sections may be nested.  Any deferred actions
114  * will be deferred until the outermost RCU read-side critical section
115  * completes.
116  *
117  * It is illegal to process softirqs or block while in an RCU read-side
118  * critical section.
119  */
rcu_read_lock(rcu_read_lock_t * lock)120 static inline void rcu_read_lock(rcu_read_lock_t *lock)
121 {
122     rcu_quiesce_disable();
123 }
124 
125 /**
126  * rcu_read_unlock - marks the end of an RCU read-side critical section.
127  *
128  * See rcu_read_lock() for more information.
129  */
rcu_read_unlock(rcu_read_lock_t * lock)130 static inline void rcu_read_unlock(rcu_read_lock_t *lock)
131 {
132     ASSERT(!rcu_quiesce_allowed());
133     rcu_quiesce_enable();
134 }
135 
136 /*
137  * So where is rcu_write_lock()?  It does not exist, as there is no
138  * way for writers to lock out RCU readers.  This is a feature, not
139  * a bug -- this property is what provides RCU's performance benefits.
140  * Of course, writers must coordinate with each other.  The normal
141  * spinlock primitives work well for this, but any other technique may be
142  * used as well.  RCU does not care how the writers keep out of each
143  * others' way, as long as they do so.
144  */
145 
146 /**
147  * rcu_dereference - fetch an RCU-protected pointer in an
148  * RCU read-side critical section.  This pointer may later
149  * be safely dereferenced.
150  *
151  * Inserts memory barriers on architectures that require them
152  * (currently only the Alpha), and, more importantly, documents
153  * exactly which pointers are protected by RCU.
154  */
155 #define rcu_dereference(p)     (p)
156 
157 /**
158  * rcu_assign_pointer - assign (publicize) a pointer to a newly
159  * initialized structure that will be dereferenced by RCU read-side
160  * critical sections.  Returns the value assigned.
161  *
162  * Inserts memory barriers on architectures that require them
163  * (pretty much all of them other than x86), and also prevents
164  * the compiler from reordering the code that initializes the
165  * structure after the pointer assignment.  More importantly, this
166  * call documents which pointers will be dereferenced by RCU read-side
167  * code.
168  */
169 #define rcu_assign_pointer(p, v) ({ smp_wmb(); (p) = (v); })
170 
171 void rcu_init(void);
172 void rcu_check_callbacks(int cpu);
173 
174 /* Exported interfaces */
175 void call_rcu(struct rcu_head *head,
176               void (*func)(struct rcu_head *head));
177 
178 void rcu_barrier(void);
179 
180 void rcu_idle_enter(unsigned int cpu);
181 void rcu_idle_exit(unsigned int cpu);
182 
183 #endif /* __XEN_RCUPDATE_H */
184