-
Notifications
You must be signed in to change notification settings - Fork 51
/
kernel_cc.c
291 lines (226 loc) · 6.77 KB
/
kernel_cc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
#include <assert.h>
#include "kernel_sched.h"
#include "kernel_proc.h"
#include "kernel_cc.h"
/**
@file kernel_cc.c
@brief The implementation for concurrency control .
Locks for scheduler and device drivers. Because we support
multiple cores, we need to avoid race conditions
with an interrupt handler on the same core, and also to
avoid race conditions between cores.
*/
/*
Pre-emption aware mutex.
-------------------------
This mutex will act as a spinlock if preemption is off, and a
yielding mutex if preemption is on.
Therefore, we can call the same function from both the preemptive and
the non-preemptive domain of the kernel.
The implementation is based on GCC atomics, as the standard C11 primitives
are not supported by all recent compilers. Eventually, this will change.
*/
void Mutex_Lock(Mutex* lock)
{
#define MUTEX_SPINS (cpu_cores()>1 ? 1000 : 10000)
while(__atomic_test_and_set(lock,__ATOMIC_ACQUIRE)) {
int spin=MUTEX_SPINS;
while(__atomic_load_n(lock, __ATOMIC_RELAXED)) {
#if defined(__x86__) || defined(__x86_64__)
__builtin_ia32_pause();
#endif
if(spin>0)
spin--;
else {
spin=MUTEX_SPINS;
if(cpu_interrupts_enabled())
yield(SCHED_MUTEX);
}
}
}
#undef MUTEX_SPINS
}
void Mutex_Unlock(Mutex* lock)
{
__atomic_clear(lock, __ATOMIC_RELEASE);
}
/*
Condition variables.
*/
/** \cond HELPER Helper structure for condition variables. */
typedef struct __cv_waiter {
rlnode node; /* become part of a ring */
TCB* thread; /* thread to wait */
sig_atomic_t signalled; /* this is set if the thread is signalled */
sig_atomic_t removed; /* this is set if the waiter is removed
from the ring */
} __cv_waiter;
/** \endcond */
/**
@internal
A helper routine to remove a condition waiter from the CondVar ring.
*/
static inline void remove_from_ring(CondVar* cv, __cv_waiter* w)
{
if(cv->waitset == w) {
/* Make cv->waitset safe */
__cv_waiter * nextw = w->node.next->obj;
cv->waitset = (nextw == w) ? NULL : nextw;
}
rlist_remove(& w->node);
}
/**
@internal
@brief Wait on a condition variable, specifying the cause.
This function is the basic implementation for the 'wait' operation on
condition variables. It is used to implement the @c Cond_Wait and @c Cond_TimedWait
system calls, as well as internal kernel 'wait' functionality.
The function must be called only while we have locked the mutex that
is associated with this call. It will put the calling thread to sleep,
unlocking the mutex. These operations happen atomically.
When the thread is woken up later (by another thread that calls @c
Cond_Signal or @c Cond_Broadcast, or because the timeout has expired, or
because the thread was awoken by another kernel routine),
it first re-locks the mutex and then returns.
@param mx The mutex to be unlocked as the thread sleeps.
@param cv The condition variable to sleep on.
@param cause A cause provided to the kernel scheduler.
@param timeout The time to sleep, or @c NO_TIMEOUT to sleep for ever.
@returns 1 if this thread was woken up by signal/broadcast, 0 otherwise
@see Cond_Signal
@see Cond_Broadcast
*/
static int cv_wait(Mutex* mutex, CondVar* cv,
enum SCHED_CAUSE cause, TimerDuration timeout)
{
__cv_waiter waiter = { .thread=cur_thread(), .signalled = 0, .removed=0 };
rlnode_init(& waiter.node, &waiter);
Mutex_Lock(&(cv->waitset_lock));
/* We just push the current thread to the back of the list */
if(cv->waitset) {
__cv_waiter* wset = cv->waitset;
rlist_push_back(& wset->node, & waiter.node);
} else {
cv->waitset = &waiter;
}
/* Now atomically release mutex and sleep */
Mutex_Unlock(mutex);
sleep_releasing(STOPPED, &(cv->waitset_lock), cause, timeout);
/* Woke up, we must check wether we were signaled, and tidy up */
Mutex_Lock(&(cv->waitset_lock));
if(! waiter.removed) {
assert(! waiter.signalled);
/* We must remove ourselves from the ring! */
remove_from_ring(cv, &waiter);
}
Mutex_Unlock(&(cv->waitset_lock));
Mutex_Lock(mutex);
return waiter.signalled;
}
/**
@internal
Helper for Cond_Signal and Cond_Broadcast. This method
will actually find a waiter to signal, if one exists.
Else, it leaves the cv->waitset == NULL.
*/
static inline void cv_signal(CondVar* cv)
{
/* Wakeup first process in the waiters' queue, if it exists. */
while(cv->waitset) {
__cv_waiter* waiter = cv->waitset;
remove_from_ring(cv, waiter);
waiter->removed = 1;
if(wakeup(waiter->thread)) {
waiter->signalled = 1;
return;
}
}
}
int Cond_Wait(Mutex* mutex, CondVar* cv)
{
return cv_wait(mutex, cv, SCHED_USER, NO_TIMEOUT);
}
int Cond_TimedWait(Mutex* mutex, CondVar* cv, timeout_t timeout)
{
/* We have to translate timeout from msec to usec */
return cv_wait(mutex, cv, SCHED_USER, timeout*1000ul);
}
void Cond_Signal(CondVar* cv)
{
Mutex_Lock(&(cv->waitset_lock));
cv_signal(cv);
Mutex_Unlock(&(cv->waitset_lock));
}
void Cond_Broadcast(CondVar* cv)
{
Mutex_Lock(&(cv->waitset_lock));
while(cv->waitset) cv_signal(cv);
Mutex_Unlock(&(cv->waitset_lock));
}
/*
*
* The kernel locks
*
*/
/**
* @brief The kernel lock.
*
* Kernel locking is provided by a semaphore, implemented as a monitor.
* A semaphre for kernel locking has advantages over a simple mutex.
* The main advantage is that @c kernel_mutex is held for a very short time
* regardless of contention. Thus, in multicore machines, it allows for cores
* to be passed to other threads.
*
*/
/* This mutex is used to implement the kernel semaphore as a monitor. */
static Mutex kernel_mutex = MUTEX_INIT;
/* Semaphore counter */
static int kernel_sem = 1;
/* Semaphore condition */
static CondVar kernel_sem_cv = COND_INIT;
void kernel_lock()
{
Mutex_Lock(& kernel_mutex);
while(kernel_sem<=0) {
Cond_Wait(& kernel_mutex, &kernel_sem_cv);
}
kernel_sem--;
Mutex_Unlock(& kernel_mutex);
}
void kernel_unlock()
{
Mutex_Lock(& kernel_mutex);
kernel_sem++;
Cond_Signal(&kernel_sem_cv);
Mutex_Unlock(& kernel_mutex);
}
int kernel_wait_wchan(CondVar* cv, enum SCHED_CAUSE cause,
const char* wchan_name, TimerDuration timeout)
{
/* Atomically release kernel semaphore */
Mutex_Lock(& kernel_mutex);
kernel_sem++;
Cond_Signal(&kernel_sem_cv);
int ret = cv_wait(&kernel_mutex, cv, cause, timeout);
/* Reacquire kernel semaphore */
while(kernel_sem<=0)
Cond_Wait(& kernel_mutex, &kernel_sem_cv);
kernel_sem--;
Mutex_Unlock(& kernel_mutex);
return ret;
}
void kernel_signal(CondVar* cv)
{
Cond_Signal(cv);
}
void kernel_broadcast(CondVar* cv)
{
Cond_Broadcast(cv);
}
void kernel_sleep(Thread_state newstate, enum SCHED_CAUSE cause)
{
Mutex_Lock(& kernel_mutex);
kernel_sem++;
Cond_Signal(&kernel_sem_cv);
sleep_releasing(newstate, &kernel_mutex, cause, NO_TIMEOUT);
}