Skip to content

Commit 8f87455

Browse files
committed
add initial support for visiting abandoned segments per subprocess, upstream for python/cpython#114133
1 parent f93fb90 commit 8f87455

File tree

8 files changed

+206
-88
lines changed

8 files changed

+206
-88
lines changed

include/mimalloc.h

+8-3
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ typedef struct mi_heap_area_s {
262262

263263
typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
264264

265-
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
265+
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
266266

267267
// Experimental
268268
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
@@ -292,9 +292,13 @@ mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t a
292292
// Experimental: allow sub-processes whose memory segments stay separated (and no reclamation between them)
293293
// Used for example for separate interpreter's in one process.
294294
typedef void* mi_subproc_id_t;
295+
mi_decl_export mi_subproc_id_t mi_subproc_main(void);
295296
mi_decl_export mi_subproc_id_t mi_subproc_new(void);
296-
mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc);
297-
mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet)
297+
mi_decl_export void mi_subproc_delete(mi_subproc_id_t subproc);
298+
mi_decl_export void mi_subproc_add_current_thread(mi_subproc_id_t subproc); // this should be called right after a thread is created (and no allocation has taken place yet)
299+
300+
// Experimental: visit abandoned heap areas (from threads that have been terminated)
301+
mi_decl_export bool mi_abandoned_visit_blocks(mi_subproc_id_t subproc_id, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
298302

299303
// deprecated
300304
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
@@ -355,6 +359,7 @@ typedef enum mi_option_e {
355359
mi_option_abandoned_reclaim_on_free, // allow to reclaim an abandoned segment on a free (=1)
356360
mi_option_disallow_arena_alloc, // 1 = do not use arena's for allocation (except if using specific arena id's)
357361
mi_option_retry_on_oom, // retry on out-of-memory for N milli seconds (=400), set to 0 to disable retries. (only on windows)
362+
mi_option_visit_abandoned, // allow visiting heap blocks from abandoned threads (=0)
358363
_mi_option_last,
359364
// legacy option names
360365
mi_option_large_os_pages = mi_option_allow_large_os_pages,

include/mimalloc/atomic.h

+51-32
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ terms of the MIT license. A copy of the license can be found in the file
1414
#define WIN32_LEAN_AND_MEAN
1515
#endif
1616
#include <windows.h>
17-
#elif !defined(_WIN32) && (defined(__EMSCRIPTEN_SHARED_MEMORY__) || !defined(__wasi__))
17+
#elif !defined(__wasi__) && (!defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__))
1818
#define MI_USE_PTHREADS
1919
#include <pthread.h>
2020
#endif
@@ -35,9 +35,9 @@ terms of the MIT license. A copy of the license can be found in the file
3535
#define mi_atomic(name) std::atomic_##name
3636
#define mi_memory_order(name) std::memory_order_##name
3737
#if (__cplusplus >= 202002L) // c++20, see issue #571
38-
#define MI_ATOMIC_VAR_INIT(x) x
38+
#define MI_ATOMIC_VAR_INIT(x) x
3939
#elif !defined(ATOMIC_VAR_INIT)
40-
#define MI_ATOMIC_VAR_INIT(x) x
40+
#define MI_ATOMIC_VAR_INIT(x) x
4141
#else
4242
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
4343
#endif
@@ -337,6 +337,7 @@ typedef _Atomic(uintptr_t) mi_atomic_guard_t;
337337
// ----------------------------------------------------------------------
338338
// Yield
339339
// ----------------------------------------------------------------------
340+
340341
#if defined(__cplusplus)
341342
#include <thread>
342343
static inline void mi_atomic_yield(void) {
@@ -401,59 +402,73 @@ static inline void mi_atomic_yield(void) {
401402

402403

403404
// ----------------------------------------------------------------------
404-
// Locks are only used for abandoned segment visiting
405+
// Locks are only used for abandoned segment visiting in `arena.c`
405406
// ----------------------------------------------------------------------
407+
406408
#if defined(_WIN32)
407409

408-
#define mi_lock_t CRITICAL_SECTION
410+
#define mi_lock_t CRITICAL_SECTION
409411

410-
static inline bool _mi_prim_lock(mi_lock_t* lock) {
412+
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
413+
return TryEnterCriticalSection(lock);
414+
}
415+
static inline bool mi_lock_acquire(mi_lock_t* lock) {
411416
EnterCriticalSection(lock);
412417
return true;
413418
}
414-
415-
static inline bool _mi_prim_try_lock(mi_lock_t* lock) {
416-
return TryEnterCriticalSection(lock);
417-
}
418-
419-
static inline void _mi_prim_unlock(mi_lock_t* lock) {
419+
static inline void mi_lock_release(mi_lock_t* lock) {
420420
LeaveCriticalSection(lock);
421421
}
422+
static inline void mi_lock_init(mi_lock_t* lock) {
423+
InitializeCriticalSection(lock);
424+
}
425+
static inline void mi_lock_done(mi_lock_t* lock) {
426+
DeleteCriticalSection(lock);
427+
}
422428

423429

424430
#elif defined(MI_USE_PTHREADS)
425431

426432
#define mi_lock_t pthread_mutex_t
427433

428-
static inline bool _mi_prim_lock(mi_lock_t* lock) {
429-
return (pthread_mutex_lock(lock) == 0);
430-
}
431-
432-
static inline bool _mi_prim_try_lock(mi_lock_t* lock) {
434+
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
433435
return (pthread_mutex_trylock(lock) == 0);
434436
}
435-
436-
static inline void _mi_prim_unlock(mi_lock_t* lock) {
437+
static inline bool mi_lock_acquire(mi_lock_t* lock) {
438+
return (pthread_mutex_lock(lock) == 0);
439+
}
440+
static inline void mi_lock_release(mi_lock_t* lock) {
437441
pthread_mutex_unlock(lock);
438442
}
443+
static inline void mi_lock_init(mi_lock_t* lock) {
444+
(void)(lock);
445+
}
446+
static inline void mi_lock_done(mi_lock_t* lock) {
447+
(void)(lock);
448+
}
449+
439450

440451
#elif defined(__cplusplus)
441452

442453
#include <mutex>
443454
#define mi_lock_t std::mutex
444455

445-
static inline bool _mi_prim_lock(mi_lock_t* lock) {
456+
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
457+
return lock->lock_try_acquire();
458+
}
459+
static inline bool mi_lock_acquire(mi_lock_t* lock) {
446460
lock->lock();
447461
return true;
448462
}
449-
450-
static inline bool _mi_prim_try_lock(mi_lock_t* lock) {
451-
return (lock->try_lock();
452-
}
453-
454-
static inline void _mi_prim_unlock(mi_lock_t* lock) {
463+
static inline void mi_lock_release(mi_lock_t* lock) {
455464
lock->unlock();
456465
}
466+
static inline void mi_lock_init(mi_lock_t* lock) {
467+
(void)(lock);
468+
}
469+
static inline void mi_lock_done(mi_lock_t* lock) {
470+
(void)(lock);
471+
}
457472

458473
#else
459474

@@ -462,22 +477,26 @@ static inline void _mi_prim_unlock(mi_lock_t* lock) {
462477

463478
#define mi_lock_t _Atomic(uintptr_t)
464479

465-
static inline bool _mi_prim_try_lock(mi_lock_t* lock) {
480+
static inline bool mi_lock_try_acquire(mi_lock_t* lock) {
466481
uintptr_t expected = 0;
467482
return mi_atomic_cas_strong_acq_rel(lock, &expected, (uintptr_t)1);
468483
}
469-
470-
static inline bool _mi_prim_lock(mi_lock_t* lock) {
484+
static inline bool mi_lock_acquire(mi_lock_t* lock) {
471485
for (int i = 0; i < 1000; i++) { // for at most 1000 tries?
472-
if (_mi_prim_try_lock(lock)) return true;
486+
if (mi_lock_try_acquire(lock)) return true;
473487
mi_atomic_yield();
474488
}
475489
return true;
476490
}
477-
478-
static inline void _mi_prim_unlock(mi_lock_t* lock) {
491+
static inline void mi_lock_release(mi_lock_t* lock) {
479492
mi_atomic_store_release(lock, (uintptr_t)0);
480493
}
494+
static inline void mi_lock_init(mi_lock_t* lock) {
495+
mi_lock_release(lock);
496+
}
497+
static inline void mi_lock_done(mi_lock_t* lock) {
498+
(void)(lock);
499+
}
481500

482501
#endif
483502

include/mimalloc/internal.h

+7-3
Original file line numberDiff line numberDiff line change
@@ -79,11 +79,12 @@ extern mi_decl_cache_align const mi_page_t _mi_page_empty;
7979
bool _mi_is_main_thread(void);
8080
size_t _mi_current_thread_count(void);
8181
bool _mi_preloading(void); // true while the C runtime is not initialized yet
82-
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
83-
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
8482
void _mi_thread_done(mi_heap_t* heap);
8583
void _mi_thread_data_collect(void);
8684
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
85+
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
86+
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
87+
mi_subproc_t* _mi_subproc_from_id(mi_subproc_id_t subproc_id);
8788

8889
// os.c
8990
void _mi_os_init(void); // called from process init
@@ -136,7 +137,7 @@ typedef struct mi_arena_field_cursor_s { // abstract struct
136137
mi_subproc_t* subproc;
137138
} mi_arena_field_cursor_t;
138139
void _mi_arena_field_cursor_init(mi_heap_t* heap, mi_subproc_t* subproc, mi_arena_field_cursor_t* current);
139-
mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous);
140+
mi_segment_t* _mi_arena_segment_clear_abandoned_next(mi_arena_field_cursor_t* previous, bool visit_all);
140141

141142
// "segment-map.c"
142143
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
@@ -158,6 +159,7 @@ void _mi_segments_collect(bool force, mi_segments_tld_t* tld);
158159
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
159160
void _mi_abandoned_await_readers(void);
160161
bool _mi_segment_attempt_reclaim(mi_heap_t* heap, mi_segment_t* segment);
162+
bool _mi_segment_visit_blocks(mi_segment_t* segment, int heap_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
161163

162164
// "page.c"
163165
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
@@ -189,6 +191,8 @@ void _mi_heap_set_default_direct(mi_heap_t* heap);
189191
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
190192
void _mi_heap_unsafe_destroy_all(void);
191193
mi_heap_t* _mi_heap_by_tag(mi_heap_t* heap, uint8_t tag);
194+
void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page);
195+
bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t* page, mi_block_visit_fun* visitor, void* arg);
192196

193197
// "stats.c"
194198
void _mi_stats_done(mi_stats_t* stats);

0 commit comments

Comments
 (0)