Skip to content

Commit 710d613

Browse files
committed
refactor thread meta-data initilazation, upstream of python/cpython#113263
1 parent 66052f1 commit 710d613

File tree

3 files changed

+42
-27
lines changed

3 files changed

+42
-27
lines changed

include/mimalloc/internal.h

+2
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
8888
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
8989
void _mi_thread_done(mi_heap_t* heap);
9090
void _mi_thread_data_collect(void);
91+
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
9192

9293
// os.c
9394
void _mi_os_init(void); // called from process init
@@ -183,6 +184,7 @@ size_t _mi_bin_size(uint8_t bin); // for stats
183184
uint8_t _mi_bin(size_t size); // for stats
184185

185186
// "heap.c"
187+
void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id);
186188
void _mi_heap_destroy_pages(mi_heap_t* heap);
187189
void _mi_heap_collect_abandon(mi_heap_t* heap);
188190
void _mi_heap_set_default_direct(mi_heap_t* heap);

src/heap.c

+23-10
Original file line numberDiff line numberDiff line change
@@ -123,14 +123,17 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
123123
const bool force = (collect >= MI_FORCE);
124124
_mi_deferred_free(heap, force);
125125

126+
// python/cpython#112532: we may be called from a thread that is not the owner of the heap
127+
const bool is_main_thread = (_mi_is_main_thread() && heap->thread_id == _mi_thread_id());
128+
126129
// note: never reclaim on collect but leave it to threads that need storage to reclaim
127130
if (
128131
#ifdef NDEBUG
129132
collect == MI_FORCE
130133
#else
131134
collect >= MI_FORCE
132135
#endif
133-
&& _mi_is_main_thread() && mi_heap_is_backing(heap) && !heap->no_reclaim)
136+
&& is_main_thread && mi_heap_is_backing(heap) && !heap->no_reclaim)
134137
{
135138
// the main thread is abandoned (end-of-program), try to reclaim all abandoned segments.
136139
// if all memory is freed by now, all segments should be freed.
@@ -157,7 +160,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
157160
_mi_segments_collect(collect == MI_FORCE, &heap->tld->segments);
158161

159162
// if forced, collect thread data cache on program-exit (or shared library unload)
160-
if (force && _mi_is_main_thread() && mi_heap_is_backing(heap)) {
163+
if (force && is_main_thread && mi_heap_is_backing(heap)) {
161164
_mi_thread_data_collect(); // collect thread data cache
162165
}
163166

@@ -201,19 +204,29 @@ mi_heap_t* mi_heap_get_backing(void) {
201204
return bheap;
202205
}
203206

204-
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
205-
mi_heap_t* bheap = mi_heap_get_backing();
206-
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
207-
if (heap == NULL) return NULL;
207+
void _mi_heap_init(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id) {
208208
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(mi_heap_t));
209-
heap->tld = bheap->tld;
209+
heap->tld = tld;
210210
heap->thread_id = _mi_thread_id();
211211
heap->arena_id = arena_id;
212-
_mi_random_split(&bheap->random, &heap->random);
213-
heap->cookie = _mi_heap_random_next(heap) | 1;
212+
if (heap == tld->heap_backing) {
213+
_mi_random_init(&heap->random);
214+
}
215+
else {
216+
_mi_random_split(&tld->heap_backing->random, &heap->random);
217+
}
218+
heap->cookie = _mi_heap_random_next(heap) | 1;
214219
heap->keys[0] = _mi_heap_random_next(heap);
215220
heap->keys[1] = _mi_heap_random_next(heap);
216-
heap->no_reclaim = true; // don't reclaim abandoned pages or otherwise destroy is unsafe
221+
}
222+
223+
mi_decl_nodiscard mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id) {
224+
mi_heap_t* bheap = mi_heap_get_backing();
225+
mi_heap_t* heap = mi_heap_malloc_tp(bheap, mi_heap_t); // todo: OS allocate in secure mode?
226+
if (heap == NULL) return NULL;
227+
_mi_heap_init(heap, bheap->tld, arena_id);
228+
// don't reclaim abandoned pages or otherwise destroy is unsafe
229+
heap->no_reclaim = true;
217230
// push on the thread local heaps list
218231
heap->next = heap->tld->heaps;
219232
heap->tld->heaps = heap;

src/init.c

+17-17
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ void _mi_thread_data_collect(void) {
264264
}
265265

266266
// Initialize the thread local default heap, called from `mi_thread_init`
267-
static bool _mi_heap_init(void) {
267+
static bool _mi_thread_heap_init(void) {
268268
if (mi_heap_is_initialized(mi_prim_get_default_heap())) return true;
269269
if (_mi_is_main_thread()) {
270270
// mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization
@@ -280,25 +280,25 @@ static bool _mi_heap_init(void) {
280280

281281
mi_tld_t* tld = &td->tld;
282282
mi_heap_t* heap = &td->heap;
283-
_mi_memcpy_aligned(heap, &_mi_heap_empty, sizeof(*heap));
284-
heap->thread_id = _mi_thread_id();
285-
_mi_random_init(&heap->random);
286-
heap->cookie = _mi_heap_random_next(heap) | 1;
287-
heap->keys[0] = _mi_heap_random_next(heap);
288-
heap->keys[1] = _mi_heap_random_next(heap);
289-
heap->tld = tld;
290-
tld->heap_backing = heap;
291-
tld->heaps = heap;
292-
tld->segments.stats = &tld->stats;
293-
tld->segments.os = &tld->os;
294-
tld->os.stats = &tld->stats;
295-
_mi_heap_set_default_direct(heap);
283+
_mi_tld_init(tld, heap); // must be before `_mi_heap_init`
284+
_mi_heap_init(heap, tld, _mi_arena_id_none());
285+
_mi_heap_set_default_direct(heap);
296286
}
297287
return false;
298288
}
299289

290+
// initialize thread local data
291+
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap) {
292+
_mi_memzero_aligned(tld,sizeof(mi_tld_t));
293+
tld->heap_backing = bheap;
294+
tld->heaps = bheap;
295+
tld->segments.stats = &tld->stats;
296+
tld->segments.os = &tld->os;
297+
tld->os.stats = &tld->stats;
298+
}
299+
300300
// Free the thread local default heap (called from `mi_thread_done`)
301-
static bool _mi_heap_done(mi_heap_t* heap) {
301+
static bool _mi_thread_heap_done(mi_heap_t* heap) {
302302
if (!mi_heap_is_initialized(heap)) return true;
303303

304304
// reset default heap
@@ -392,7 +392,7 @@ void mi_thread_init(void) mi_attr_noexcept
392392
// initialize the thread local default heap
393393
// (this will call `_mi_heap_set_default_direct` and thus set the
394394
// fiber/pthread key to a non-zero value, ensuring `_mi_thread_done` is called)
395-
if (_mi_heap_init()) return; // returns true if already initialized
395+
if (_mi_thread_heap_init()) return; // returns true if already initialized
396396

397397
_mi_stat_increase(&_mi_stats_main.threads, 1);
398398
mi_atomic_increment_relaxed(&thread_count);
@@ -424,7 +424,7 @@ void _mi_thread_done(mi_heap_t* heap)
424424
if (heap->thread_id != _mi_thread_id()) return;
425425

426426
// abandon the thread local heap
427-
if (_mi_heap_done(heap)) return; // returns true if already ran
427+
if (_mi_thread_heap_done(heap)) return; // returns true if already ran
428428
}
429429

430430
void _mi_heap_set_default_direct(mi_heap_t* heap) {

0 commit comments

Comments
 (0)