diff --git a/.release-notes/4368.md b/.release-notes/4368.md new file mode 100644 index 0000000000..7015cce3bd --- /dev/null +++ b/.release-notes/4368.md @@ -0,0 +1,3 @@ +## Move heap ownership info from chunk to pagemap + +An internal runtime change has been made around where/how heap chunk ownership information is stored in order to improve performance. The tradeoff is that this will now use a little more memory in order to realize the performance gains. diff --git a/packages/builtin_test/_test.pony b/packages/builtin_test/_test.pony index 5ac260fd8d..2d603c80c9 100644 --- a/packages/builtin_test/_test.pony +++ b/packages/builtin_test/_test.pony @@ -11,7 +11,7 @@ use @pony_alloc_final[Pointer[U8]](ctx: Pointer[None], size: USize) use @pony_exitcode[None](code: I32) use @pony_get_exitcode[I32]() use @pony_triggergc[None](ctx: Pointer[None]) -use @ponyint_pagemap_get[Pointer[None]](p: Pointer[None] tag) +use @ponyint_pagemap_get_chunk[Pointer[None]](p: Pointer[None] tag) use "pony_test" use "collections" @@ -617,12 +617,12 @@ class \nodoc\ iso _TestStringTrimInPlace is UnitTest space: USize = 0) => let copy: String ref = orig.clone() - let pre_trim_pagemap = @ponyint_pagemap_get(copy.cpointer()) + let pre_trim_pagemap = @ponyint_pagemap_get_chunk(copy.cpointer()) copy.trim_in_place(from, to) h.assert_eq[String box](expected, copy) h.assert_eq[USize](space, copy.space()) h.assert_eq[String box](expected, copy.clone()) // safe to clone - let post_trim_pagemap = @ponyint_pagemap_get(copy.cpointer()) + let post_trim_pagemap = @ponyint_pagemap_get_chunk(copy.cpointer()) if copy.space() == 0 then h.assert_eq[USize](0, post_trim_pagemap.usize()) else @@ -1480,11 +1480,11 @@ class \nodoc\ iso _TestArrayTrimInPlace is UnitTest space: USize = 0) => let copy: Array[U8] ref = orig.clone() - let pre_trim_pagemap = @ponyint_pagemap_get(copy.cpointer()) + let pre_trim_pagemap = @ponyint_pagemap_get_chunk(copy.cpointer()) copy.trim_in_place(from, to) h.assert_eq[USize](space, copy.space()) h.assert_array_eq[U8](expected, copy) - let post_trim_pagemap = @ponyint_pagemap_get(copy.cpointer()) + let post_trim_pagemap = @ponyint_pagemap_get_chunk(copy.cpointer()) if copy.space() == 0 then h.assert_eq[USize](0, post_trim_pagemap.usize()) else diff --git a/src/libponyrt/gc/gc.c b/src/libponyrt/gc/gc.c index db8297fd5a..6a3c755a2e 100644 --- a/src/libponyrt/gc/gc.c +++ b/src/libponyrt/gc/gc.c @@ -482,7 +482,8 @@ static void acq_or_rel_remote_object(pony_ctx_t* ctx, pony_actor_t* actor, void ponyint_gc_sendobject(pony_ctx_t* ctx, void* p, pony_type_t* t, int mutability) { - chunk_t* chunk = ponyint_pagemap_get(p); + pony_actor_t* actor = NULL; + chunk_t* chunk = ponyint_pagemap_get(p, &actor); // Don't gc memory that wasn't pony_allocated, but do recurse. if(chunk == NULL) @@ -492,8 +493,6 @@ void ponyint_gc_sendobject(pony_ctx_t* ctx, void* p, pony_type_t* t, return; } - pony_actor_t* actor = ponyint_heap_owner(chunk); - if(actor == ctx->current) send_local_object(ctx, p, t, mutability); else @@ -503,7 +502,8 @@ void ponyint_gc_sendobject(pony_ctx_t* ctx, void* p, pony_type_t* t, void ponyint_gc_recvobject(pony_ctx_t* ctx, void* p, pony_type_t* t, int mutability) { - chunk_t* chunk = ponyint_pagemap_get(p); + pony_actor_t* actor = NULL; + chunk_t* chunk = ponyint_pagemap_get(p, &actor); // Don't gc memory that wasn't pony_allocated, but do recurse. if(chunk == NULL) @@ -513,8 +513,6 @@ void ponyint_gc_recvobject(pony_ctx_t* ctx, void* p, pony_type_t* t, return; } - pony_actor_t* actor = ponyint_heap_owner(chunk); - if(actor == ctx->current) recv_local_object(ctx, p, t, mutability); else @@ -524,7 +522,8 @@ void ponyint_gc_recvobject(pony_ctx_t* ctx, void* p, pony_type_t* t, void ponyint_gc_markobject(pony_ctx_t* ctx, void* p, pony_type_t* t, int mutability) { - chunk_t* chunk = ponyint_pagemap_get(p); + pony_actor_t* actor = NULL; + chunk_t* chunk = ponyint_pagemap_get(p, &actor); // Don't gc memory that wasn't pony_allocated, but do recurse. if(chunk == NULL) @@ -534,8 +533,6 @@ void ponyint_gc_markobject(pony_ctx_t* ctx, void* p, pony_type_t* t, return; } - pony_actor_t* actor = ponyint_heap_owner(chunk); - if(actor == ctx->current) mark_local_object(ctx, chunk, p, t, mutability); else @@ -545,7 +542,8 @@ void ponyint_gc_markobject(pony_ctx_t* ctx, void* p, pony_type_t* t, void ponyint_gc_acquireobject(pony_ctx_t* ctx, void* p, pony_type_t* t, int mutability) { - chunk_t* chunk = ponyint_pagemap_get(p); + pony_actor_t* actor = NULL; + chunk_t* chunk = ponyint_pagemap_get(p, &actor); // Don't gc memory that wasn't pony_allocated, but do recurse. if(chunk == NULL) @@ -555,8 +553,6 @@ void ponyint_gc_acquireobject(pony_ctx_t* ctx, void* p, pony_type_t* t, return; } - pony_actor_t* actor = ponyint_heap_owner(chunk); - if(actor == ctx->current) acquire_local_object(ctx, p, t, mutability); else @@ -566,7 +562,8 @@ void ponyint_gc_acquireobject(pony_ctx_t* ctx, void* p, pony_type_t* t, void ponyint_gc_releaseobject(pony_ctx_t* ctx, void* p, pony_type_t* t, int mutability) { - chunk_t* chunk = ponyint_pagemap_get(p); + pony_actor_t* actor = NULL; + chunk_t* chunk = ponyint_pagemap_get(p, &actor); // Don't gc memory that wasn't pony_allocated, but do recurse. if(chunk == NULL) @@ -576,8 +573,6 @@ void ponyint_gc_releaseobject(pony_ctx_t* ctx, void* p, pony_type_t* t, return; } - pony_actor_t* actor = ponyint_heap_owner(chunk); - if(actor == ctx->current) release_local_object(ctx, p, t, mutability); else @@ -665,7 +660,7 @@ void ponyint_gc_markimmutable(pony_ctx_t* ctx, gc_t* gc) { // Mark in our heap and recurse if it wasn't already marked. void* p = obj->address; - chunk_t* chunk = ponyint_pagemap_get(p); + chunk_t* chunk = ponyint_pagemap_get_chunk(p); mark_local_object(ctx, chunk, p, obj->type, PONY_TRACE_IMMUTABLE); } } diff --git a/src/libponyrt/gc/objectmap.c b/src/libponyrt/gc/objectmap.c index 3ad4981e7e..96c998431d 100644 --- a/src/libponyrt/gc/objectmap.c +++ b/src/libponyrt/gc/objectmap.c @@ -71,7 +71,7 @@ void ponyint_objectmap_sweep(objectmap_t* map) if(obj->rc > 0) { - chunk_t* chunk = ponyint_pagemap_get(p); + chunk_t* chunk = ponyint_pagemap_get_chunk(p); ponyint_heap_mark_shallow(chunk, p); } else { ponyint_objectmap_clearindex(map, i); diff --git a/src/libponyrt/mem/heap.c b/src/libponyrt/mem/heap.c index 4f7a0ce327..b2e6beb956 100644 --- a/src/libponyrt/mem/heap.c +++ b/src/libponyrt/mem/heap.c @@ -10,9 +10,6 @@ typedef struct chunk_t { - // immutable - pony_actor_t* actor; - // used for pointer tagging // bit 0 (lowest bit) for keeping track of chunk type (1 = small; 0 = large) // bit 1 for keeping track of chunks to be cleared @@ -238,9 +235,9 @@ static char* get_m(chunk_t* chunk) return (char*)((uintptr_t)chunk->m & CHUNK_M_BITMASK); } -static void large_pagemap(char* m, size_t size, chunk_t* chunk) +static void large_pagemap(char* m, size_t size, chunk_t* chunk, pony_actor_t* actor) { - ponyint_pagemap_set_bulk(m, chunk, size); + ponyint_pagemap_set_bulk(m, chunk, actor, size); } static void maybe_clear_chunk(chunk_t* chunk) @@ -326,7 +323,7 @@ static void destroy_small(small_chunk_t* chunk, uint32_t mark) final_small(chunk, FORCE_ALL_FINALISERS); char* m = get_m((chunk_t*)chunk); - ponyint_pagemap_set(m, NULL); + ponyint_pagemap_set(m, NULL, NULL); POOL_FREE(block_t, m); POOL_FREE(small_chunk_t, chunk); } @@ -340,7 +337,7 @@ static void destroy_large(large_chunk_t* chunk, uint32_t mark) final_large(chunk, mark); char* m = get_m((chunk_t*)chunk); - large_pagemap(m, chunk->size, NULL); + large_pagemap(m, chunk->size, NULL, NULL); if(m != NULL) ponyint_pool_free_size(chunk->size, m); @@ -597,7 +594,6 @@ void* ponyint_heap_alloc_small(pony_actor_t* actor, heap_t* heap, } } else { small_chunk_t* n = (small_chunk_t*) POOL_ALLOC(small_chunk_t); - n->base.actor = actor; n->base.m = (char*) POOL_ALLOC(block_t); set_small_chunk_size(n, sizeclass); #ifdef USE_RUNTIMESTATS @@ -617,7 +613,7 @@ void* ponyint_heap_alloc_small(pony_actor_t* actor, heap_t* heap, set_chunk_needs_clearing((chunk_t*)n); - ponyint_pagemap_set(get_m((chunk_t*)n), (chunk_t*)n); + ponyint_pagemap_set(get_m((chunk_t*)n), (chunk_t*)n, actor); heap->small_free[sizeclass] = n; chunk = n; @@ -648,7 +644,6 @@ void* ponyint_heap_alloc_large(pony_actor_t* actor, heap_t* heap, size_t size, size = ponyint_pool_adjust_size(size); large_chunk_t* chunk = (large_chunk_t*) POOL_ALLOC(large_chunk_t); - chunk->base.actor = actor; chunk->size = size; chunk->base.m = (char*) ponyint_pool_alloc_size(size); #ifdef USE_RUNTIMESTATS @@ -666,7 +661,7 @@ void* ponyint_heap_alloc_large(pony_actor_t* actor, heap_t* heap, size_t size, // note if a finaliser needs to run or not set_large_chunk_finaliser(chunk, (track_finalisers_mask & 1)); - large_pagemap(get_m((chunk_t*)chunk), size, (chunk_t*)chunk); + large_pagemap(get_m((chunk_t*)chunk), size, (chunk_t*)chunk, actor); chunk->next = heap->large; heap->large = chunk; @@ -685,7 +680,7 @@ void* ponyint_heap_realloc(pony_actor_t* actor, heap_t* heap, void* p, actor->actorstats.heap_realloc_counter++; #endif - chunk_t* chunk = ponyint_pagemap_get(p); + chunk_t* chunk = ponyint_pagemap_get_chunk(p); // We can't realloc memory that wasn't pony_alloc'ed since we can't know how // much to copy from the previous location. @@ -908,18 +903,6 @@ void ponyint_heap_endgc(heap_t* heap heap->next_gc = heap_initialgc; } -pony_actor_t* ponyint_heap_owner(chunk_t* chunk) -{ - // FIX: false sharing - // reading from something that will never be written - // but is on a cache line that will often be written - // called during tracing - // actual chunk only needed for GC tracing - // all other tracing only needs the owner - // so the owner needs the chunk and everyone else just needs the owner - return chunk->actor; -} - size_t ponyint_heap_size(chunk_t* chunk) { if(get_chunk_is_small_chunk(chunk)) diff --git a/src/libponyrt/mem/heap.h b/src/libponyrt/mem/heap.h index 3ee8f5d1e9..2496e033bb 100644 --- a/src/libponyrt/mem/heap.h +++ b/src/libponyrt/mem/heap.h @@ -102,8 +102,6 @@ void ponyint_heap_endgc(heap_t* heap ); #endif -pony_actor_t* ponyint_heap_owner(chunk_t* chunk); - size_t ponyint_heap_size(chunk_t* chunk); #ifdef USE_RUNTIMESTATS diff --git a/src/libponyrt/mem/pagemap.c b/src/libponyrt/mem/pagemap.c index b3d6f7607e..88874bfc28 100644 --- a/src/libponyrt/mem/pagemap.c +++ b/src/libponyrt/mem/pagemap.c @@ -2,6 +2,7 @@ #include "pagemap.h" #include "alloc.h" +#include "ponyassert.h" #include "pool.h" #include @@ -60,8 +61,10 @@ static const pagemap_level_t level[PAGEMAP_LEVELS] = POOL_INDEX((1 << L2_MASK) * sizeof(void*)) }, #endif - { L1_SHIFT, (1 << L1_MASK) - 1, (1 << L1_MASK) * sizeof(void*), - POOL_INDEX((1 << L1_MASK) * sizeof(void*)) } + // The lowest level is sized to hold 2 `void*` entries (the `* 2` in the + // `size`/`size_index` fields), one for `chunk_t*` and one for `pony_actor_t*`. + { L1_SHIFT, (1 << L1_MASK) - 1, (1 << L1_MASK) * sizeof(void*) * 2, + POOL_INDEX((1 << L1_MASK) * sizeof(void*) * 2) } }; static PONY_ATOMIC(void*) root; @@ -85,31 +88,113 @@ size_t ponyint_pagemap_alloc_size() } #endif -chunk_t* ponyint_pagemap_get(const void* addr) +chunk_t* ponyint_pagemap_get_chunk(const void* addr) { PONY_ATOMIC(void*)* next_node = &root; void* node = atomic_load_explicit(next_node, memory_order_acquire); + uintptr_t ix = 0; for(size_t i = 0; i < PAGEMAP_LEVELS; i++) { if(node == NULL) return NULL; - uintptr_t ix = ((uintptr_t)addr >> level[i].shift) & level[i].mask; - next_node = &(((PONY_ATOMIC_RVALUE(void*)*)node)[ix]); - node = atomic_load_explicit(next_node, memory_order_acquire); + ix = ((uintptr_t)addr >> level[i].shift) & level[i].mask; + if (i == PAGEMAP_LEVELS - 1) + { + // at the lowest level we have to double the index since we store + // two pointers (`chunk_t*` and `pony_actor_t*`) instead of one + ix = ix * 2; + next_node = &(((PONY_ATOMIC_RVALUE(void*)*)node)[ix]); + pony_assert(ix <= (uintptr_t)level[i].size); + pony_assert(ix/2 <= (uintptr_t)level[i].mask); + } + else + { + next_node = &(((PONY_ATOMIC_RVALUE(void*)*)node)[ix]); + node = atomic_load_explicit(next_node, memory_order_acquire); + pony_assert(ix <= (uintptr_t)level[i].size); + pony_assert(ix <= (uintptr_t)level[i].mask); + } + } + + chunk_t* chunk = (chunk_t*)atomic_load_explicit(next_node, memory_order_acquire); + return chunk; +} + +/* This function looks up the chunk_t* to returns it and also looks + * up the pony_actor_t* to return in the out paramter `actor`. This + * is done in order to avoid allocating a struct to return as a single + * return value while minimizing the need to traverse the pagemap multiple + * times in order to retrieve both the chunk_t* and the pony_actor_t*. + * + * NOTE: the multiple atomic instructions to retrieve the chunk_t* followed + * by the pony_actor_t* should be safe because the only time the pagemap + * is updated is when a chunk_t is first allocated by an actor or when the + * chunk_t is deallocated/freed by GC and in both scenarios the only actor + * with a reference to memory that would resolve to that pagemap entry is + * the owning actor of the chunk_t. + */ +chunk_t* ponyint_pagemap_get(const void* addr, pony_actor_t** actor) +{ + PONY_ATOMIC(void*)* next_node = &root; + void* node = atomic_load_explicit(next_node, memory_order_acquire); + uintptr_t ix = 0; + + for(size_t i = 0; i < PAGEMAP_LEVELS; i++) + { + if(node == NULL) + return NULL; + + ix = ((uintptr_t)addr >> level[i].shift) & level[i].mask; + if (i == PAGEMAP_LEVELS - 1) + { + // at the lowest level we have to double the index since we store + // two pointers (`chunk_t*` and `pony_actor_t*`) instead of one + ix = ix * 2; + next_node = &(((PONY_ATOMIC_RVALUE(void*)*)node)[ix]); + pony_assert(ix <= (uintptr_t)level[i].size); + pony_assert(ix/2 <= (uintptr_t)level[i].mask); + } + else + { + next_node = &(((PONY_ATOMIC_RVALUE(void*)*)node)[ix]); + node = atomic_load_explicit(next_node, memory_order_acquire); + pony_assert(ix <= (uintptr_t)level[i].size); + pony_assert(ix <= (uintptr_t)level[i].mask); + } } - return (chunk_t*)node; + chunk_t* chunk = (chunk_t*)atomic_load_explicit(next_node, memory_order_acquire); + ix++; + pony_assert(ix <= (uintptr_t)level[PAGEMAP_LEVELS - 1].size); + pony_assert(ix/2 <= (uintptr_t)level[PAGEMAP_LEVELS - 1].mask); + next_node = &(((PONY_ATOMIC_RVALUE(void*)*)node)[ix]); + void* pagemap_actor = atomic_load_explicit(next_node, memory_order_acquire); + *actor = (pony_actor_t*)pagemap_actor; + + return chunk; } -void ponyint_pagemap_set(const void* addr, chunk_t* chunk) +/* This function sets the chunk_t* and the pony_actor_t* in the pagemap for + * a small chunk_t (exactly one pagemap entry). + * + * NOTE: the multiple atomic instructions to set the chunk_t* followed + * by the pony_actor_t* should be safe because the only time the pagemap + * is updated is when a chunk_t is first allocated by an actor or when the + * chunk_t is deallocated/freed by GC and in both scenarios the only actor + * with a reference to memory that would resolve to that pagemap entry is + * the owning actor of the chunk_t. + */ +void ponyint_pagemap_set(const void* addr, chunk_t* chunk, pony_actor_t* actor) { PONY_ATOMIC(void*)* next_node = &root; + void* node = NULL; + uintptr_t ix = 0; for(size_t i = 0; i < PAGEMAP_LEVELS; i++) { - void* node = atomic_load_explicit(next_node, memory_order_relaxed); + node = atomic_load_explicit(next_node, memory_order_relaxed); if(node == NULL) { @@ -146,14 +231,38 @@ void ponyint_pagemap_set(const void* addr, chunk_t* chunk) #endif } - uintptr_t ix = ((uintptr_t)addr >> level[i].shift) & level[i].mask; + ix = ((uintptr_t)addr >> level[i].shift) & level[i].mask; + pony_assert(ix <= (uintptr_t)level[i].size); + pony_assert(ix <= (uintptr_t)level[i].mask); + + // at the lowest level we have to double the index since we store + // two pointers (`chunk_t*` and `pony_actor_t*`) instead of one + if (i == PAGEMAP_LEVELS - 1) + ix = ix * 2; next_node = &(((PONY_ATOMIC_RVALUE(void*)*)node)[ix]); } + pony_assert(ix <= (uintptr_t)level[PAGEMAP_LEVELS - 1].size); + pony_assert(ix/2 <= (uintptr_t)level[PAGEMAP_LEVELS - 1].mask); atomic_store_explicit(next_node, chunk, memory_order_release); + ix++; + pony_assert(ix <= (uintptr_t)level[PAGEMAP_LEVELS - 1].size); + pony_assert(ix/2 <= (uintptr_t)level[PAGEMAP_LEVELS - 1].mask); + next_node = &(((PONY_ATOMIC_RVALUE(void*)*)node)[ix]); + atomic_store_explicit(next_node, actor, memory_order_release); } -void ponyint_pagemap_set_bulk(const void* addr, chunk_t* chunk, size_t size) +/* This function sets the chunk_t* and the pony_actor_t* in the pagemap for + * a large chunk_t's (more than one pagemap entry required). + * + * NOTE: the multiple atomic instructions to set the chunk_t* followed + * by the pony_actor_t* should be safe because the only time the pagemap + * is updated is when a chunk_t is first allocated by an actor or when the + * chunk_t is deallocated/freed by GC and in both scenarios the only actor + * with a reference to memory that would resolve to that pagemap entry is + * the owning actor of the chunk_t. + */ +void ponyint_pagemap_set_bulk(const void* addr, chunk_t* chunk, pony_actor_t* actor, size_t size) { PONY_ATOMIC(void*)* next_node = NULL; void* node = NULL; @@ -205,6 +314,12 @@ void ponyint_pagemap_set_bulk(const void* addr, chunk_t* chunk, size_t size) } ix = (addr_ptr >> level[i].shift) & level[i].mask; + pony_assert(ix <= (uintptr_t)level[i].size); + pony_assert(ix <= (uintptr_t)level[i].mask); + // at the lowest level we have to double the index since we store + // two pointers (`chunk_t*` and `pony_actor_t*`) instead of one + if (i == PAGEMAP_LEVELS - 1) + ix = ix * 2; next_node = &(((PONY_ATOMIC_RVALUE(void*)*)node)[ix]); } @@ -215,10 +330,17 @@ void ponyint_pagemap_set_bulk(const void* addr, chunk_t* chunk, size_t size) atomic_store_explicit(next_node, chunk, memory_order_release); addr_ptr += POOL_ALIGN; ix++; + pony_assert(ix <= (uintptr_t)level[PAGEMAP_LEVELS - 1].size); + pony_assert(ix/2 <= (uintptr_t)level[PAGEMAP_LEVELS - 1].mask); + next_node = &(((PONY_ATOMIC_RVALUE(void*)*)node)[ix]); + atomic_store_explicit(next_node, actor, memory_order_release); + ix++; next_node = &(((PONY_ATOMIC_RVALUE(void*)*)node)[ix]); - // If ix is greater than mask we need to move to the next pagemap level - // segment. + // If ix/2 is greater than mask we need to move to the next pagemap level + // segment. Note: we use `ix/2` instead of `ix` since `ix` has already been + // doubled to account for our storage of two pointers instead of one and + // `ix/2` represents the actual entry offset in the array. } while((addr_ptr < addr_end) && - (ix <= (uintptr_t)level[PAGEMAP_LEVELS-1].mask)); + (ix/2 <= (uintptr_t)level[PAGEMAP_LEVELS-1].mask)); } } diff --git a/src/libponyrt/mem/pagemap.h b/src/libponyrt/mem/pagemap.h index 183c527e35..a84f16f764 100644 --- a/src/libponyrt/mem/pagemap.h +++ b/src/libponyrt/mem/pagemap.h @@ -6,12 +6,15 @@ PONY_EXTERN_C_BEGIN typedef struct chunk_t chunk_t; +typedef struct pony_actor_t pony_actor_t; -chunk_t* ponyint_pagemap_get(const void* addr); +chunk_t* ponyint_pagemap_get_chunk(const void* addr); -void ponyint_pagemap_set(const void* addr, chunk_t* chunk); +chunk_t* ponyint_pagemap_get(const void* addr, pony_actor_t** actor); -void ponyint_pagemap_set_bulk(const void* addr, chunk_t* chunk, size_t size); +void ponyint_pagemap_set(const void* addr, chunk_t* chunk, pony_actor_t* actor); + +void ponyint_pagemap_set_bulk(const void* addr, chunk_t* chunk, pony_actor_t* actor, size_t size); #ifdef USE_RUNTIMESTATS /** Get the memory used by the pagemap. diff --git a/test/libponyrt/mem/heap.cc b/test/libponyrt/mem/heap.cc index 51e7f6a273..8bc149d6e8 100644 --- a/test/libponyrt/mem/heap.cc +++ b/test/libponyrt/mem/heap.cc @@ -30,8 +30,9 @@ TEST(Heap, Init) ASSERT_EQ((size_t)(64 + 1024), actor->actorstats.heap_mem_allocated); #endif - chunk_t* chunk = (chunk_t*)ponyint_pagemap_get(p); - ASSERT_EQ(actor, ponyint_heap_owner(chunk)); + pony_actor_t* pagemap_actor = NULL; + chunk_t* chunk = (chunk_t*)ponyint_pagemap_get(p, &pagemap_actor); + ASSERT_EQ(actor, pagemap_actor); size_t size = ponyint_heap_size(chunk); ASSERT_EQ(size, (size_t)128); @@ -136,8 +137,8 @@ TEST(Heap, Init) size_t large_size = (1 << 22) - 7; void* p5 = ponyint_heap_alloc(actor, &heap, large_size, TRACK_NO_FINALISERS); - chunk_t* chunk5 = (chunk_t*)ponyint_pagemap_get(p5); - ASSERT_EQ(actor, ponyint_heap_owner(chunk5)); + chunk_t* chunk5 = (chunk_t*)ponyint_pagemap_get(p5, &pagemap_actor); + ASSERT_EQ(actor, pagemap_actor); #ifdef USE_RUNTIMESTATS ASSERT_EQ((size_t)5, actor->actorstats.heap_alloc_counter); @@ -156,12 +157,14 @@ TEST(Heap, Init) while(p5_curr < p5_end) { - p5_chunk = (chunk_t*)ponyint_pagemap_get(p5_curr); + p5_chunk = (chunk_t*)ponyint_pagemap_get(p5_curr, &pagemap_actor); p5_curr += POOL_ALIGN; ASSERT_EQ(chunk5, p5_chunk); + ASSERT_EQ(actor, pagemap_actor); } - p5_chunk = (chunk_t*)ponyint_pagemap_get(p5_end); + p5_chunk = (chunk_t*)ponyint_pagemap_get(p5_end, &pagemap_actor); ASSERT_NE(chunk5, p5_chunk); + ASSERT_NE(actor, pagemap_actor); size_t size5 = ponyint_heap_size(chunk5); ASSERT_EQ(adjust_size, size5); diff --git a/test/libponyrt/mem/pagemap.cc b/test/libponyrt/mem/pagemap.cc index 7eacbbcacc..dbe04050d6 100644 --- a/test/libponyrt/mem/pagemap.cc +++ b/test/libponyrt/mem/pagemap.cc @@ -6,64 +6,86 @@ TEST(Pagemap, UnmappedIsNull) { - ASSERT_EQ(NULL, ponyint_pagemap_get((void*)0x0)); - ASSERT_EQ(NULL, ponyint_pagemap_get((void*)0x7623432D)); - ASSERT_EQ(NULL, ponyint_pagemap_get((void*)0xFFFFFFFFFFFFFFFF)); + pony_actor_t* actor = NULL; + ASSERT_EQ(NULL, ponyint_pagemap_get((void*)0x0, &actor)); + ASSERT_EQ(NULL, actor); + ASSERT_EQ(NULL, ponyint_pagemap_get((void*)0x7623432D, &actor)); + ASSERT_EQ(NULL, actor); + ASSERT_EQ(NULL, ponyint_pagemap_get((void*)0xFFFFFFFFFFFFFFFF, &actor)); + ASSERT_EQ(NULL, actor); } TEST(Pagemap, SetAndUnset) { char* m = (char*)(44 << 12); - - ponyint_pagemap_set(m, (chunk_t*)m); - ASSERT_EQ(m, (char*)ponyint_pagemap_get(m)); - - ponyint_pagemap_set(m, NULL); - ASSERT_EQ(NULL, ponyint_pagemap_get(m)); + pony_actor_t* actor = (pony_actor_t*)0xABCD; + pony_actor_t* pagemap_actor = NULL; + + ponyint_pagemap_set(m, (chunk_t*)m, actor); + ASSERT_EQ(m, (char*)ponyint_pagemap_get(m, &pagemap_actor)); + ASSERT_EQ(actor, pagemap_actor); + ASSERT_EQ(m, (char*)ponyint_pagemap_get_chunk(m)); + + ponyint_pagemap_set(m, NULL, NULL); + ASSERT_EQ(NULL, ponyint_pagemap_get(m, &pagemap_actor)); + ASSERT_EQ(NULL, pagemap_actor); + ASSERT_EQ(NULL, ponyint_pagemap_get_chunk(m)); } TEST(Pagemap, SubAddressing) { char* m = (char*)(99 << 12); - ponyint_pagemap_set(m, (chunk_t*)m); + ponyint_pagemap_set(m, (chunk_t*)m, (pony_actor_t*)0xABCD); char* p = m; char* end = p + 1024; + pony_actor_t* actor = NULL; while(p < end) { - ASSERT_EQ(m, (char*)ponyint_pagemap_get((chunk_t*)p)); + ASSERT_EQ(m, (char*)ponyint_pagemap_get((chunk_t*)p, &actor)); + ASSERT_EQ((pony_actor_t*)0xABCD, actor); + ASSERT_EQ(m, (char*)ponyint_pagemap_get_chunk((chunk_t*)p)); p += 64; } - ASSERT_EQ(NULL, ponyint_pagemap_get((chunk_t*)end)); + ASSERT_EQ(NULL, ponyint_pagemap_get((chunk_t*)end, &actor)); + ASSERT_EQ(NULL, actor); + ASSERT_EQ(NULL, ponyint_pagemap_get_chunk((chunk_t*)end)); } TEST(Pagemap, SetAndUnsetBulk) { char* m = (char*)(155 << 12); - size_t size = 65536; + size_t size = 8388608; - ponyint_pagemap_set_bulk(m, (chunk_t*)m, size); + ponyint_pagemap_set_bulk(m, (chunk_t*)m, (pony_actor_t*)0xABCD, size); char* p = m; char* end = p + size; + pony_actor_t* actor = NULL; while(p < end) { - ASSERT_EQ(m, (char*)ponyint_pagemap_get((chunk_t*)p)); + ASSERT_EQ(m, (char*)ponyint_pagemap_get((chunk_t*)p, &actor)); + ASSERT_EQ((pony_actor_t*)0xABCD, actor); + ASSERT_EQ(m, (char*)ponyint_pagemap_get_chunk((chunk_t*)p)); p += 1024; } - ASSERT_EQ(NULL, ponyint_pagemap_get((chunk_t*)end)); + ASSERT_EQ(NULL, ponyint_pagemap_get((chunk_t*)end, &actor)); + ASSERT_EQ(NULL, actor); + ASSERT_EQ(NULL, ponyint_pagemap_get_chunk((chunk_t*)end)); - ponyint_pagemap_set_bulk(m, NULL, size); + ponyint_pagemap_set_bulk(m, NULL, NULL, size); p = m; while(p < end) { - ASSERT_EQ(NULL, (char*)ponyint_pagemap_get((chunk_t*)p)); + ASSERT_EQ(NULL, (char*)ponyint_pagemap_get((chunk_t*)p, &actor)); + ASSERT_EQ(NULL, actor); + ASSERT_EQ(NULL, (char*)ponyint_pagemap_get_chunk((chunk_t*)p)); p += 1024; } }