37
37
#include "caml/sys.h"
38
38
#include "caml/memprof.h"
39
39
40
- /* threads.h is *not* included since it contains the _external_ declarations for
41
- the caml_c_thread_register and caml_c_thread_unregister functions. */
40
+ #define CAMLextern_libthreads
41
+ #include "threads.h"
42
42
43
43
#ifndef NATIVE_CODE
44
44
/* Initial size of bytecode stack when a thread is created (4 Ko) */
55
55
#include "st_posix.h"
56
56
#endif
57
57
58
+ /* Atomics */
59
+ #if defined(__GNUC__ ) && __GNUC__ == 4 && __GNUC_MINOR__ == 8
60
+ /* GCC 4.8 shipped with a working implementation of atomics, but no
61
+ stdatomic.h header, so we need to use GCC-specific intrinsics. */
62
+
63
+ #define _Atomic /* GCC intrinsics work on normal variables */
64
+ #define atomic_store (v , x ) \
65
+ __atomic_store_n((v), (x), __ATOMIC_SEQ_CST)
66
+ #define atomic_load (v ) \
67
+ __atomic_load_n((v), __ATOMIC_SEQ_CST)
68
+ #define atomic_exchange (v , x ) \
69
+ __atomic_exchange_n((v), (x), __ATOMIC_SEQ_CST)
70
+ #else
71
+ #include <stdatomic.h>
72
+ #endif
73
+
74
+
58
75
/* The ML value describing a thread (heap-allocated) */
59
76
60
77
struct caml_thread_descr {
@@ -111,7 +128,7 @@ static caml_thread_t all_threads = NULL;
111
128
static caml_thread_t curr_thread = NULL ;
112
129
113
130
/* The master lock protecting the OCaml runtime system */
114
- static st_masterlock caml_master_lock ;
131
+ static _Atomic struct caml_locking_scheme * caml_locking_scheme ;
115
132
116
133
/* Whether the "tick" thread is already running */
117
134
static int caml_tick_thread_running = 0 ;
@@ -143,6 +160,36 @@ extern struct longjmp_buffer caml_termination_jmpbuf;
143
160
extern void (* caml_termination_hook )(void );
144
161
#endif
145
162
163
+ /* The default locking scheme */
164
+ static st_masterlock default_master_lock ;
165
+ struct caml_locking_scheme default_locking_scheme =
166
+ { & default_master_lock ,
167
+ (void (* )(void * ))& st_masterlock_acquire ,
168
+ (void (* )(void * ))& st_masterlock_release ,
169
+ (void (* )(void * ))& st_masterlock_init ,
170
+ (int (* )(void * ))& st_masterlock_waiters ,
171
+ (void (* )(void * ))& st_thread_yield };
172
+
173
+ static void acquire_runtime_lock ()
174
+ {
175
+ struct caml_locking_scheme * s ;
176
+
177
+ retry :
178
+ s = atomic_load (& caml_locking_scheme );
179
+ s -> lock (s -> context );
180
+ if (atomic_load (& caml_locking_scheme ) != s ) {
181
+ s -> unlock (s -> context );
182
+ goto retry ;
183
+ }
184
+ }
185
+
186
+ static void release_runtime_lock ()
187
+ {
188
+ struct caml_locking_scheme * s ;
189
+ s = atomic_load (& caml_locking_scheme );
190
+ s -> unlock (s -> context );
191
+ }
192
+
146
193
/* Hook for scanning the stacks of the other threads */
147
194
148
195
static void (* prev_scan_roots_hook ) (scanning_action );
@@ -182,7 +229,7 @@ static void memprof_ctx_iter(th_ctx_action f, void* data)
182
229
183
230
/* Saving and restoring runtime state in curr_thread */
184
231
185
- Caml_inline void caml_thread_save_runtime_state (void )
232
+ CAMLexport void caml_thread_save_runtime_state (void )
186
233
{
187
234
#ifdef NATIVE_CODE
188
235
curr_thread -> top_of_stack = Caml_state -> top_of_stack ;
@@ -208,8 +255,12 @@ Caml_inline void caml_thread_save_runtime_state(void)
208
255
caml_memprof_leave_thread ();
209
256
}
210
257
211
- Caml_inline void caml_thread_restore_runtime_state (void )
258
+ CAMLexport void caml_thread_restore_runtime_state (void )
212
259
{
260
+ /* Update curr_thread to point to the thread descriptor corresponding
261
+ to the thread currently executing */
262
+ curr_thread = st_tls_get (thread_descriptor_key );
263
+
213
264
#ifdef NATIVE_CODE
214
265
Caml_state -> top_of_stack = curr_thread -> top_of_stack ;
215
266
Caml_state -> bottom_of_stack = curr_thread -> bottom_of_stack ;
@@ -234,6 +285,19 @@ Caml_inline void caml_thread_restore_runtime_state(void)
234
285
caml_memprof_enter_thread (curr_thread -> memprof_ctx );
235
286
}
236
287
288
+ CAMLexport void caml_switch_runtime_locking_scheme (struct caml_locking_scheme * new )
289
+ {
290
+ struct caml_locking_scheme * old ;
291
+
292
+ caml_thread_save_runtime_state ();
293
+ old = atomic_exchange (& caml_locking_scheme , new );
294
+ /* We hold 'old', but it is no longer the runtime lock */
295
+ old -> unlock (old -> context );
296
+ acquire_runtime_lock ();
297
+ caml_thread_restore_runtime_state ();
298
+ }
299
+
300
+
237
301
/* Hooks for caml_enter_blocking_section and caml_leave_blocking_section */
238
302
239
303
@@ -243,7 +307,7 @@ static void caml_thread_enter_blocking_section(void)
243
307
of the current thread */
244
308
caml_thread_save_runtime_state ();
245
309
/* Tell other threads that the runtime is free */
246
- st_masterlock_release ( & caml_master_lock );
310
+ release_runtime_lock ( );
247
311
}
248
312
249
313
static void caml_thread_leave_blocking_section (void )
@@ -255,11 +319,7 @@ static void caml_thread_leave_blocking_section(void)
255
319
DWORD error = GetLastError ();
256
320
#endif
257
321
/* Wait until the runtime is free */
258
- st_masterlock_acquire (& caml_master_lock );
259
- /* Update curr_thread to point to the thread descriptor corresponding
260
- to the thread currently executing */
261
- curr_thread = st_tls_get (thread_descriptor_key );
262
- /* Restore the runtime state from the curr_thread descriptor */
322
+ acquire_runtime_lock ();
263
323
caml_thread_restore_runtime_state ();
264
324
#ifdef _WIN32
265
325
SetLastError (error );
@@ -419,6 +479,7 @@ static void caml_thread_remove_info(caml_thread_t th)
419
479
static void caml_thread_reinitialize (void )
420
480
{
421
481
struct channel * chan ;
482
+ struct caml_locking_scheme * s ;
422
483
423
484
/* Remove all other threads (now nonexistent)
424
485
from the doubly-linked list of threads */
@@ -430,7 +491,8 @@ static void caml_thread_reinitialize(void)
430
491
/* Reinitialize the master lock machinery,
431
492
just in case the fork happened while other threads were doing
432
493
caml_leave_blocking_section */
433
- st_masterlock_init (& caml_master_lock );
494
+ s = atomic_load (& caml_locking_scheme );
495
+ s -> reinitialize_after_fork (s -> context );
434
496
/* Tick thread is not currently running in child process, will be
435
497
re-created at next Thread.create */
436
498
caml_tick_thread_running = 0 ;
@@ -454,7 +516,8 @@ CAMLprim value caml_thread_initialize(value unit) /* ML */
454
516
/* OS-specific initialization */
455
517
st_initialize ();
456
518
/* Initialize and acquire the master lock */
457
- st_masterlock_init (& caml_master_lock );
519
+ st_masterlock_init (& default_master_lock );
520
+ caml_locking_scheme = & default_locking_scheme ;
458
521
/* Initialize the keys */
459
522
st_tls_newkey (& thread_descriptor_key );
460
523
st_tls_newkey (& last_channel_locked_key );
@@ -562,7 +625,7 @@ static void caml_thread_stop(void)
562
625
/* OS-specific cleanups */
563
626
st_thread_cleanup ();
564
627
/* Release the runtime system */
565
- st_masterlock_release ( & caml_master_lock );
628
+ release_runtime_lock ( );
566
629
}
567
630
568
631
/* Create a thread */
@@ -658,7 +721,7 @@ CAMLexport int caml_c_thread_register(void)
658
721
th -> top_of_stack = (char * ) & err ;
659
722
#endif
660
723
/* Take master lock to protect access to the chaining of threads */
661
- st_masterlock_acquire ( & caml_master_lock );
724
+ acquire_runtime_lock ( );
662
725
/* Add thread info block to the list of threads */
663
726
if (all_threads == NULL ) {
664
727
th -> next = th ;
@@ -673,7 +736,7 @@ CAMLexport int caml_c_thread_register(void)
673
736
/* Associate the thread descriptor with the thread */
674
737
st_tls_set (thread_descriptor_key , (void * ) th );
675
738
/* Release the master lock */
676
- st_masterlock_release ( & caml_master_lock );
739
+ release_runtime_lock ( );
677
740
/* Now we can re-enter the run-time system and heap-allocate the descriptor */
678
741
caml_leave_blocking_section ();
679
742
th -> descr = caml_thread_new_descriptor (Val_unit ); /* no closure */
@@ -694,7 +757,7 @@ CAMLexport int caml_c_thread_unregister(void)
694
757
/* Not registered? */
695
758
if (th == NULL ) return 0 ;
696
759
/* Wait until the runtime is available */
697
- st_masterlock_acquire ( & caml_master_lock );
760
+ acquire_runtime_lock ( );
698
761
/* Forget the thread descriptor */
699
762
st_tls_set (thread_descriptor_key , NULL );
700
763
/* Remove thread info block from list of threads, and free it */
@@ -703,7 +766,7 @@ CAMLexport int caml_c_thread_unregister(void)
703
766
so that it does not prevent the whole process from exiting (#9971) */
704
767
if (all_threads == NULL ) caml_thread_cleanup (Val_unit );
705
768
/* Release the runtime */
706
- st_masterlock_release ( & caml_master_lock );
769
+ release_runtime_lock ( );
707
770
return 1 ;
708
771
}
709
772
@@ -771,21 +834,34 @@ CAMLprim value caml_thread_exit(value unit) /* ML */
771
834
772
835
CAMLprim value caml_thread_yield (value unit ) /* ML */
773
836
{
774
- if (st_masterlock_waiters (& caml_master_lock ) == 0 ) return Val_unit ;
775
-
776
- /* Do all the parts of a blocking section enter/leave except lock
777
- manipulation, which we'll do more efficiently in st_thread_yield. (Since
778
- our blocking section doesn't contain anything interesting, don't bother
779
- with saving errno.)
780
- */
781
- caml_raise_async_if_exception (caml_process_pending_signals_exn (),
782
- "signal handler" );
783
- caml_thread_save_runtime_state ();
784
- st_thread_yield (& caml_master_lock );
785
- curr_thread = st_tls_get (thread_descriptor_key );
786
- caml_thread_restore_runtime_state ();
787
- caml_raise_async_if_exception (caml_process_pending_signals_exn (),
788
- "signal handler" );
837
+ struct caml_locking_scheme * s ;
838
+
839
+ s = atomic_load (& caml_locking_scheme );
840
+ if (s -> count_waiters != NULL && s -> yield != NULL ) {
841
+ if (s -> count_waiters (s -> context ) == 0 ) return Val_unit ;
842
+
843
+ /* Do all the parts of a blocking section enter/leave except lock
844
+ manipulation, which we'll do more efficiently in st_thread_yield. (Since
845
+ our blocking section doesn't contain anything interesting, don't bother
846
+ with saving errno.)
847
+ */
848
+ caml_raise_async_if_exception (caml_process_pending_signals_exn (),
849
+ "signal handler" );
850
+ caml_thread_save_runtime_state ();
851
+ s -> yield (s -> context );
852
+ if (atomic_load (& caml_locking_scheme ) != s ) {
853
+ /* The lock we have is no longer the runtime lock */
854
+ s -> unlock (s -> context );
855
+ acquire_runtime_lock ();
856
+ }
857
+ caml_thread_restore_runtime_state ();
858
+ caml_raise_async_if_exception (caml_process_pending_signals_exn (),
859
+ "signal handler" );
860
+ } else {
861
+ caml_enter_blocking_section ();
862
+ st_fallback_yield ();
863
+ caml_leave_blocking_section ();
864
+ }
789
865
790
866
return Val_unit ;
791
867
}
0 commit comments