1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * RCU segmented callback lists 4 * 5 * This seemingly RCU-private file must be available to SRCU users 6 * because the size of the TREE SRCU srcu_struct structure depends 7 * on these definitions. 8 * 9 * Copyright IBM Corporation, 2017 10 * 11 * Authors: Paul E. McKenney <paulmck@linux.net.ibm.com> 12 */ 13 14 #ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H 15 #define __INCLUDE_LINUX_RCU_SEGCBLIST_H 16 17 #include <linux/types.h> 18 #include <linux/atomic.h> 19 20 /* Simple unsegmented callback lists. */ 21 struct rcu_cblist { 22 struct rcu_head *head; 23 struct rcu_head **tail; 24 long len; 25 }; 26 27 #define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head } 28 29 /* Complicated segmented callback lists. ;-) */ 30 31 /* 32 * Index values for segments in rcu_segcblist structure. 33 * 34 * The segments are as follows: 35 * 36 * [head, *tails[RCU_DONE_TAIL]): 37 * Callbacks whose grace period has elapsed, and thus can be invoked. 38 * [*tails[RCU_DONE_TAIL], *tails[RCU_WAIT_TAIL]): 39 * Callbacks waiting for the current GP from the current CPU's viewpoint. 40 * [*tails[RCU_WAIT_TAIL], *tails[RCU_NEXT_READY_TAIL]): 41 * Callbacks that arrived before the next GP started, again from 42 * the current CPU's viewpoint. These can be handled by the next GP. 43 * [*tails[RCU_NEXT_READY_TAIL], *tails[RCU_NEXT_TAIL]): 44 * Callbacks that might have arrived after the next GP started. 45 * There is some uncertainty as to when a given GP starts and 46 * ends, but a CPU knows the exact times if it is the one starting 47 * or ending the GP. Other CPUs know that the previous GP ends 48 * before the next one starts. 49 * 50 * Note that RCU_WAIT_TAIL cannot be empty unless RCU_NEXT_READY_TAIL is also 51 * empty. 52 * 53 * The ->gp_seq[] array contains the grace-period number at which the 54 * corresponding segment of callbacks will be ready to invoke. A given 55 * element of this array is meaningful only when the corresponding segment 56 * is non-empty, and it is never valid for RCU_DONE_TAIL (whose callbacks 57 * are already ready to invoke) or for RCU_NEXT_TAIL (whose callbacks have 58 * not yet been assigned a grace-period number). 59 */ 60 #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ 61 #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ 62 #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ 63 #define RCU_NEXT_TAIL 3 64 #define RCU_CBLIST_NSEGS 4 65 66 67 /* 68 * ==NOCB Offloading state machine== 69 * 70 * 71 * ---------------------------------------------------------------------------- 72 * | SEGCBLIST_SOFTIRQ_ONLY | 73 * | | 74 * | Callbacks processed by rcu_core() from softirqs or local | 75 * | rcuc kthread, without holding nocb_lock. | 76 * ---------------------------------------------------------------------------- 77 * | 78 * v 79 * ---------------------------------------------------------------------------- 80 * | SEGCBLIST_OFFLOADED | 81 * | | 82 * | Callbacks processed by rcu_core() from softirqs or local | 83 * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, | 84 * | allowing nocb_timer to be armed. | 85 * ---------------------------------------------------------------------------- 86 * | 87 * v 88 * ----------------------------------- 89 * | | 90 * v v 91 * --------------------------------------- ----------------------------------| 92 * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | | 93 * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP | 94 * | | | | 95 * | | | | 96 * | CB kthread woke up and | | GP kthread woke up and | 97 * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED| 98 * | Processes callbacks concurrently | | | 99 * | with rcu_core(), holding | | | 100 * | nocb_lock. | | | 101 * --------------------------------------- ----------------------------------- 102 * | | 103 * ----------------------------------- 104 * | 105 * v 106 * |--------------------------------------------------------------------------| 107 * | SEGCBLIST_OFFLOADED | | 108 * | SEGCBLIST_KTHREAD_CB | | 109 * | SEGCBLIST_KTHREAD_GP | 110 * | | 111 * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops | 112 * | handling callbacks. Enable bypass queueing. | 113 * ---------------------------------------------------------------------------- 114 */ 115 116 117 118 /* 119 * ==NOCB De-Offloading state machine== 120 * 121 * 122 * |--------------------------------------------------------------------------| 123 * | SEGCBLIST_OFFLOADED | | 124 * | SEGCBLIST_KTHREAD_CB | | 125 * | SEGCBLIST_KTHREAD_GP | 126 * | | 127 * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() | 128 * | ignores callbacks. Bypass enqueue is enabled. | 129 * ---------------------------------------------------------------------------- 130 * | 131 * v 132 * |--------------------------------------------------------------------------| 133 * | SEGCBLIST_KTHREAD_CB | | 134 * | SEGCBLIST_KTHREAD_GP | 135 * | | 136 * | CB/GP kthreads and local rcu_core() handle callbacks concurrently | 137 * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable | 138 * | bypass enqueue. | 139 * ---------------------------------------------------------------------------- 140 * | 141 * v 142 * ----------------------------------- 143 * | | 144 * v v 145 * ---------------------------------------------------------------------------| 146 * | | 147 * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | 148 * | | | 149 * | GP kthread woke up and | CB kthread woke up and | 150 * | acknowledged the fact that | acknowledged the fact that | 151 * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. | 152 * | | The CB kthread goes to sleep | 153 * | The callbacks from the target CPU | until it ever gets re-offloaded. | 154 * | will be ignored from the GP kthread | | 155 * | loop. | | 156 * ---------------------------------------------------------------------------- 157 * | | 158 * ----------------------------------- 159 * | 160 * v 161 * ---------------------------------------------------------------------------- 162 * | 0 | 163 * | | 164 * | Callbacks processed by rcu_core() from softirqs or local | 165 * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. | 166 * | Flush pending nocb_timer. Flush nocb bypass callbacks. | 167 * ---------------------------------------------------------------------------- 168 * | 169 * v 170 * ---------------------------------------------------------------------------- 171 * | SEGCBLIST_SOFTIRQ_ONLY | 172 * | | 173 * | Callbacks processed by rcu_core() from softirqs or local | 174 * | rcuc kthread, without holding nocb_lock. | 175 * ---------------------------------------------------------------------------- 176 */ 177 #define SEGCBLIST_ENABLED BIT(0) 178 #define SEGCBLIST_SOFTIRQ_ONLY BIT(1) 179 #define SEGCBLIST_KTHREAD_CB BIT(2) 180 #define SEGCBLIST_KTHREAD_GP BIT(3) 181 #define SEGCBLIST_OFFLOADED BIT(4) 182 183 struct rcu_segcblist { 184 struct rcu_head *head; 185 struct rcu_head **tails[RCU_CBLIST_NSEGS]; 186 unsigned long gp_seq[RCU_CBLIST_NSEGS]; 187 #ifdef CONFIG_RCU_NOCB_CPU 188 atomic_long_t len; 189 #else 190 long len; 191 #endif 192 long seglen[RCU_CBLIST_NSEGS]; 193 u8 flags; 194 }; 195 196 #define RCU_SEGCBLIST_INITIALIZER(n) \ 197 { \ 198 .head = NULL, \ 199 .tails[RCU_DONE_TAIL] = &n.head, \ 200 .tails[RCU_WAIT_TAIL] = &n.head, \ 201 .tails[RCU_NEXT_READY_TAIL] = &n.head, \ 202 .tails[RCU_NEXT_TAIL] = &n.head, \ 203 } 204 205 #endif /* __INCLUDE_LINUX_RCU_SEGCBLIST_H */ 206