@@ -90,11 +90,7 @@ module Fiber::ExecutionContext
90
90
91
91
@global_queue = GlobalQueue .new(@mutex )
92
92
@schedulers = Array (Scheduler ).new(capacity)
93
-
94
- # FIXME: invalid since schedulers will be transfered to other threads,
95
- # keep a mere `@size : Int32` counter instead!
96
- @threads = Array (Thread ).new(capacity)
97
-
93
+ @started = @size .begin
98
94
@rng = Random ::PCG32 .new
99
95
100
96
start_schedulers
@@ -105,7 +101,7 @@ module Fiber::ExecutionContext
105
101
106
102
# The number of threads that have been started.
107
103
def size : Int32
108
- @threads .size
104
+ @started
109
105
end
110
106
111
107
# The maximum number of threads that can be started.
@@ -120,16 +116,15 @@ module Fiber::ExecutionContext
120
116
121
117
# Starts all schedulers at once.
122
118
#
123
- # We could lazily initialize them as needed, like we do for threads, which
124
- # would be safe as long as we only mutate when the mutex is locked... but
125
- # unlike @threads, we do iterate the schedulers in #steal without locking
126
- # the mutex (for obvious reasons) and there are no guarantees that the new
127
- # schedulers.@size will be written after the scheduler has been written to
128
- # the array's buffer.
119
+ # We could lazily initialize them as needed, would be safe as long as we
120
+ # only mutate when the mutex is locked, but we iterate the schedulers in
121
+ # #steal without locking the mutex (for obvious reasons) and there are no
122
+ # guarantees that the new schedulers.@size will be written after the
123
+ # scheduler has been written to the array's buffer.
129
124
#
130
125
# OPTIMIZE: consider storing schedulers to an array-like object that would
131
- # use an atomic/fence to make sure that @size can only be incremented
132
- # *after* the value has been written to @buffer.
126
+ # use an atomic/fence to make sure that @size is only incremented *after*
127
+ # the value has been written to @buffer.
133
128
private def start_schedulers
134
129
capacity.times do |index |
135
130
@schedulers << Scheduler .new(self , " #{ @name } -#{ index } " )
@@ -140,18 +135,18 @@ module Fiber::ExecutionContext
140
135
offset = 0
141
136
142
137
if hijack
143
- @threads << hijack_current_thread(@schedulers [0 ])
138
+ hijack_current_thread(@schedulers [0 ])
144
139
offset += 1
145
140
end
146
141
147
142
offset.upto(@size .begin - 1 ) do |index |
148
- @threads << start_thread(@schedulers [index])
143
+ start_thread(@schedulers [index])
149
144
end
150
145
end
151
146
152
147
# Attaches *scheduler* to the current `Thread`, usually the process' main
153
148
# thread. Starts a `Fiber` to run the scheduler loop.
154
- private def hijack_current_thread (scheduler ) : Thread
149
+ private def hijack_current_thread (scheduler ) : Nil
155
150
thread = Thread .current
156
151
thread.internal_name = scheduler.name
157
152
thread.execution_context = self
@@ -161,7 +156,7 @@ module Fiber::ExecutionContext
161
156
162
157
# Starts a new `Thread` and attaches *scheduler*. Runs the scheduler loop
163
158
# directly in the thread's main `Fiber`.
164
- private def start_thread (scheduler ) : Thread
159
+ private def start_thread (scheduler ) : Nil
165
160
ExecutionContext .thread_pool.checkout(scheduler)
166
161
end
167
162
@@ -181,7 +176,7 @@ module Fiber::ExecutionContext
181
176
# local enqueue: push to local queue of current scheduler
182
177
ExecutionContext ::Scheduler .current.enqueue(fiber)
183
178
else
184
- # cross context: push to global queue
179
+ # cross context or detached thread : push to global queue
185
180
Crystal .trace :sched , " enqueue" , fiber: fiber, to_context: self
186
181
@global_queue .push(fiber)
187
182
wake_scheduler
@@ -213,8 +208,8 @@ module Fiber::ExecutionContext
213
208
Crystal .trace :sched , " park"
214
209
@parked .add(1 , :acquire_release )
215
210
216
- # TODO: consider detaching the scheduler and returning the thread back
217
- # into ExecutionContext.thread_pool instead
211
+ # TODO: detach the scheduler and return the thread back into ThreadPool
212
+ # instead
218
213
@condition .wait(@mutex )
219
214
220
215
# we don't decrement @parked because #wake_scheduler did
@@ -277,13 +272,12 @@ module Fiber::ExecutionContext
277
272
# check if we can start another thread; no need for atomics, the values
278
273
# shall be rather stable over time and we check them again inside the
279
274
# mutex
280
- return if @threads .size == capacity
275
+ return if @started == capacity
281
276
282
277
@mutex .synchronize do
283
- index = @threads .size
278
+ index = @started
284
279
return if index == capacity # check again
285
-
286
- @threads << start_thread(@schedulers [index])
280
+ start_thread(@schedulers [index])
287
281
end
288
282
end
289
283
0 commit comments