@@ -309,6 +309,11 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) {
309
309
return (intptr_t )mi_atomic_addi (p, -sub);
310
310
}
311
311
312
+
313
+ // ----------------------------------------------------------------------
314
+ // Once and Guard
315
+ // ----------------------------------------------------------------------
316
+
312
317
typedef _Atomic (uintptr_t ) mi_atomic_once_t;
313
318
314
319
// Returns true only on the first invocation
@@ -329,7 +334,9 @@ typedef _Atomic(uintptr_t) mi_atomic_guard_t;
329
334
330
335
331
336
337
+ // ----------------------------------------------------------------------
332
338
// Yield
339
+ // ----------------------------------------------------------------------
333
340
#if defined(__cplusplus)
334
341
#include < thread>
335
342
static inline void mi_atomic_yield (void ) {
@@ -393,4 +400,88 @@ static inline void mi_atomic_yield(void) {
393
400
#endif
394
401
395
402
403
+ // ----------------------------------------------------------------------
404
+ // Locks are only used for abandoned segment visiting
405
+ // ----------------------------------------------------------------------
406
+ #if defined(_WIN32)
407
+
408
+ #define mi_lock_t CRITICAL_SECTION
409
+
410
+ static inline bool _mi_prim_lock (mi_lock_t * lock) {
411
+ EnterCriticalSection (lock);
412
+ return true ;
413
+ }
414
+
415
+ static inline bool _mi_prim_try_lock (mi_lock_t * lock) {
416
+ return TryEnterCriticalSection (lock);
417
+ }
418
+
419
+ static inline void _mi_prim_unlock (mi_lock_t * lock) {
420
+ LeaveCriticalSection (lock);
421
+ }
422
+
423
+
424
+ #elif defined(MI_USE_PTHREADS)
425
+
426
+ #define mi_lock_t pthread_mutex_t
427
+
428
+ static inline bool _mi_prim_lock (mi_lock_t * lock) {
429
+ return (pthread_mutex_lock (lock) == 0 );
430
+ }
431
+
432
+ static inline bool _mi_prim_try_lock (mi_lock_t * lock) {
433
+ return (pthread_mutex_trylock (lock) == 0 );
434
+ }
435
+
436
+ static inline void _mi_prim_unlock (mi_lock_t * lock) {
437
+ pthread_mutex_unlock (lock);
438
+ }
439
+
440
+ #elif defined(__cplusplus)
441
+
442
+ #include < mutex>
443
+ #define mi_lock_t std::mutex
444
+
445
+ static inline bool _mi_prim_lock (mi_lock_t * lock) {
446
+ lock->lock ();
447
+ return true ;
448
+ }
449
+
450
+ static inline bool _mi_prim_try_lock (mi_lock_t * lock) {
451
+ return (lock->try_lock ();
452
+ }
453
+
454
+ static inline void _mi_prim_unlock (mi_lock_t * lock) {
455
+ lock->unlock ();
456
+ }
457
+
458
+ #else
459
+
460
+ // fall back to poor man's locks.
461
+ // this should only be the case in a single-threaded environment (like __wasi__)
462
+
463
+ #define mi_lock_t _Atomic (uintptr_t )
464
+
465
+ static inline bool _mi_prim_try_lock(mi_lock_t * lock) {
466
+ uintptr_t expected = 0 ;
467
+ return mi_atomic_cas_strong_acq_rel (lock, &expected, (uintptr_t )1 );
468
+ }
469
+
470
+ static inline bool _mi_prim_lock (mi_lock_t * lock) {
471
+ for (int i = 0 ; i < 1000 ; i++) { // for at most 1000 tries?
472
+ if (_mi_prim_try_lock (lock)) return true ;
473
+ mi_atomic_yield ();
474
+ }
475
+ return true ;
476
+ }
477
+
478
+ static inline void _mi_prim_unlock (mi_lock_t * lock) {
479
+ mi_atomic_store_release (lock, (uintptr_t )0 );
480
+ }
481
+
482
+ #endif
483
+
484
+
485
+
486
+
396
487
#endif // __MIMALLOC_ATOMIC_H
0 commit comments