1 #ifndef __SPINLOCK_H__
2 #define __SPINLOCK_H__
3 
4 #include <xen/time.h>
5 #include <asm/system.h>
6 #include <asm/spinlock.h>
7 #include <asm/types.h>
8 
9 #define SPINLOCK_CPU_BITS  12
10 
11 #ifdef CONFIG_DEBUG_LOCKS
12 union lock_debug {
13     uint16_t val;
14 #define LOCK_DEBUG_INITVAL 0xffff
15     struct {
16         uint16_t cpu:SPINLOCK_CPU_BITS;
17 #define LOCK_DEBUG_PAD_BITS (14 - SPINLOCK_CPU_BITS)
18         uint16_t :LOCK_DEBUG_PAD_BITS;
19         bool irq_safe:1;
20         bool unseen:1;
21     };
22 };
23 #define _LOCK_DEBUG { LOCK_DEBUG_INITVAL }
24 void spin_debug_enable(void);
25 void spin_debug_disable(void);
26 #else
27 union lock_debug { };
28 #define _LOCK_DEBUG { }
29 #define spin_debug_enable() ((void)0)
30 #define spin_debug_disable() ((void)0)
31 #endif
32 
33 #ifdef CONFIG_DEBUG_LOCK_PROFILE
34 
35 #include <public/sysctl.h>
36 
37 /*
38     lock profiling on:
39 
40     Global locks which should be subject to profiling must be declared via
41     DEFINE_SPINLOCK.
42 
43     For locks in structures further measures are necessary:
44     - the structure definition must include a profile_head with exactly this
45       name:
46 
47       struct lock_profile_qhead   profile_head;
48 
49     - the single locks which are subject to profiling have to be initialized
50       via
51 
52       spin_lock_init_prof(ptr, lock);
53 
54       with ptr being the main structure pointer and lock the spinlock field
55 
56     - each structure has to be added to profiling with
57 
58       lock_profile_register_struct(type, ptr, idx, print);
59 
60       with:
61         type:  something like LOCKPROF_TYPE_PERDOM
62         ptr:   pointer to the structure
63         idx:   index of that structure, e.g. domid
64         print: descriptive string like "domain"
65 
66     - removing of a structure is done via
67 
68       lock_profile_deregister_struct(type, ptr);
69 */
70 
71 struct spinlock;
72 
73 struct lock_profile {
74     struct lock_profile *next;       /* forward link */
75     char                *name;       /* lock name */
76     struct spinlock     *lock;       /* the lock itself */
77     u64                 lock_cnt;    /* # of complete locking ops */
78     u64                 block_cnt;   /* # of complete wait for lock */
79     s_time_t            time_hold;   /* cumulated lock time */
80     s_time_t            time_block;  /* cumulated wait time */
81     s_time_t            time_locked; /* system time of last locking */
82 };
83 
84 struct lock_profile_qhead {
85     struct lock_profile_qhead *head_q; /* next head of this type */
86     struct lock_profile       *elem_q; /* first element in q */
87     int32_t                   idx;     /* index for printout */
88 };
89 
90 #define _LOCK_PROFILE(name) { 0, #name, &name, 0, 0, 0, 0, 0 }
91 #define _LOCK_PROFILE_PTR(name)                                               \
92     static struct lock_profile * const __lock_profile_##name                  \
93     __used_section(".lockprofile.data") =                                     \
94     &__lock_profile_data_##name
95 #define _SPIN_LOCK_UNLOCKED(x) { { 0 }, SPINLOCK_NO_CPU, 0, _LOCK_DEBUG, x }
96 #define SPIN_LOCK_UNLOCKED _SPIN_LOCK_UNLOCKED(NULL)
97 #define DEFINE_SPINLOCK(l)                                                    \
98     spinlock_t l = _SPIN_LOCK_UNLOCKED(NULL);                                 \
99     static struct lock_profile __lock_profile_data_##l = _LOCK_PROFILE(l);    \
100     _LOCK_PROFILE_PTR(l)
101 
102 #define spin_lock_init_prof(s, l)                                             \
103     do {                                                                      \
104         struct lock_profile *prof;                                            \
105         prof = xzalloc(struct lock_profile);                                  \
106         if (!prof) break;                                                     \
107         prof->name = #l;                                                      \
108         prof->lock = &(s)->l;                                                 \
109         (s)->l = (spinlock_t)_SPIN_LOCK_UNLOCKED(prof);                       \
110         prof->next = (s)->profile_head.elem_q;                                \
111         (s)->profile_head.elem_q = prof;                                      \
112     } while(0)
113 
114 void _lock_profile_register_struct(
115     int32_t, struct lock_profile_qhead *, int32_t, char *);
116 void _lock_profile_deregister_struct(int32_t, struct lock_profile_qhead *);
117 
118 #define lock_profile_register_struct(type, ptr, idx, print)                   \
119     _lock_profile_register_struct(type, &((ptr)->profile_head), idx, print)
120 #define lock_profile_deregister_struct(type, ptr)                             \
121     _lock_profile_deregister_struct(type, &((ptr)->profile_head))
122 
123 extern int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc);
124 extern void spinlock_profile_printall(unsigned char key);
125 extern void spinlock_profile_reset(unsigned char key);
126 
127 #else
128 
129 struct lock_profile_qhead { };
130 
131 #define SPIN_LOCK_UNLOCKED { { 0 }, SPINLOCK_NO_CPU, 0, _LOCK_DEBUG }
132 #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED
133 
134 #define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l))
135 #define lock_profile_register_struct(type, ptr, idx, print)
136 #define lock_profile_deregister_struct(type, ptr)
137 #define spinlock_profile_printall(key)
138 
139 #endif
140 
141 typedef union {
142     u32 head_tail;
143     struct {
144         u16 head;
145         u16 tail;
146     };
147 } spinlock_tickets_t;
148 
149 #define SPINLOCK_TICKET_INC { .head_tail = 0x10000, }
150 
151 typedef struct spinlock {
152     spinlock_tickets_t tickets;
153     u16 recurse_cpu:SPINLOCK_CPU_BITS;
154 #define SPINLOCK_NO_CPU        ((1u << SPINLOCK_CPU_BITS) - 1)
155 #define SPINLOCK_RECURSE_BITS  (16 - SPINLOCK_CPU_BITS)
156     u16 recurse_cnt:SPINLOCK_RECURSE_BITS;
157 #define SPINLOCK_MAX_RECURSE   ((1u << SPINLOCK_RECURSE_BITS) - 1)
158     union lock_debug debug;
159 #ifdef CONFIG_DEBUG_LOCK_PROFILE
160     struct lock_profile *profile;
161 #endif
162 } spinlock_t;
163 
164 
165 #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
166 
167 void _spin_lock(spinlock_t *lock);
168 void _spin_lock_cb(spinlock_t *lock, void (*cond)(void *), void *data);
169 void _spin_lock_irq(spinlock_t *lock);
170 unsigned long _spin_lock_irqsave(spinlock_t *lock);
171 
172 void _spin_unlock(spinlock_t *lock);
173 void _spin_unlock_irq(spinlock_t *lock);
174 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags);
175 
176 int _spin_is_locked(spinlock_t *lock);
177 int _spin_trylock(spinlock_t *lock);
178 void _spin_barrier(spinlock_t *lock);
179 
180 int _spin_trylock_recursive(spinlock_t *lock);
181 void _spin_lock_recursive(spinlock_t *lock);
182 void _spin_unlock_recursive(spinlock_t *lock);
183 
184 #define spin_lock(l)                  _spin_lock(l)
185 #define spin_lock_cb(l, c, d)         _spin_lock_cb(l, c, d)
186 #define spin_lock_irq(l)              _spin_lock_irq(l)
187 #define spin_lock_irqsave(l, f)                                 \
188     ({                                                          \
189         BUILD_BUG_ON(sizeof(f) != sizeof(unsigned long));       \
190         ((f) = _spin_lock_irqsave(l));                          \
191     })
192 
193 #define spin_unlock(l)                _spin_unlock(l)
194 #define spin_unlock_irq(l)            _spin_unlock_irq(l)
195 #define spin_unlock_irqrestore(l, f)  _spin_unlock_irqrestore(l, f)
196 
197 #define spin_is_locked(l)             _spin_is_locked(l)
198 #define spin_trylock(l)               _spin_trylock(l)
199 
200 #define spin_trylock_irqsave(lock, flags)       \
201 ({                                              \
202     local_irq_save(flags);                      \
203     spin_trylock(lock) ?                        \
204     1 : ({ local_irq_restore(flags); 0; });     \
205 })
206 
207 #define spin_lock_kick(l)             arch_lock_signal_wmb()
208 
209 /* Ensure a lock is quiescent between two critical operations. */
210 #define spin_barrier(l)               _spin_barrier(l)
211 
212 /*
213  * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be
214  * reentered recursively on the same CPU. All critical regions that may form
215  * part of a recursively-nested set must be protected by these forms. If there
216  * are any critical regions that cannot form part of such a set, they can use
217  * standard spin_[un]lock().
218  */
219 #define spin_trylock_recursive(l)     _spin_trylock_recursive(l)
220 #define spin_lock_recursive(l)        _spin_lock_recursive(l)
221 #define spin_unlock_recursive(l)      _spin_unlock_recursive(l)
222 
223 #endif /* __SPINLOCK_H__ */
224