Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Alloc refactor #1164

Closed
wants to merge 30 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
6eae6b1
Use demand paging
Oct 10, 2024
c343819
use thread_local for global and static variables
Oct 11, 2024
7b8a521
use setThreadLocal() for generated thread_local variables
Oct 18, 2024
bf3435e
deal with BLOCK_SIZE not be size_t
Oct 18, 2024
3566b84
fixed formating
stevenmeker Oct 21, 2024
234de94
NULL -> nullptr
stevenmeker Oct 21, 2024
e34dffb
use CreateThreadLocalAddress() to get address to use for thread_local…
stevenmeker Oct 24, 2024
d47a99b
fixed formatting
stevenmeker Oct 25, 2024
93ac37c
only use CreateThreadLocalAddress() #ifdef __MACH__
stevenmeker Oct 25, 2024
fd50ff0
make global_var_address auto* rather than auto
stevenmeker Oct 25, 2024
5255bbb
use CreateThreadLocalAddress() for time_for_collection #ifdef MACH
stevenmeker Oct 25, 2024
2ec58b7
fix formatting
stevenmeker Oct 25, 2024
f6e4eba
#include "llvm/IR/IRBuilder.h"
stevenmeker Oct 28, 2024
826694f
fix formatting
stevenmeker Oct 29, 2024
8834f60
don't use thread_local for gc_enabled on Mac
stevenmeker Oct 30, 2024
aedb339
fix formatting
stevenmeker Oct 30, 2024
3351c4e
define global_var_address in __MACH__ case
stevenmeker Nov 1, 2024
e4e34ff
don't use thread_local for time_for_collection on Mac
stevenmeker Nov 4, 2024
f96eb8f
don't use thread_local for time_for_collection on Mac - fix arena.h/a…
stevenmeker Nov 4, 2024
5d235fc
struct arena -> class arena
stevenmeker Nov 13, 2024
f557cd9
deleted arena_reset(), made fresh_block() a member function
stevenmeker Nov 14, 2024
841f1ae
deleted get_arena_allocation_semispace_id()
stevenmeker Nov 14, 2024
997c8d0
clean up header file
stevenmeker Nov 14, 2024
52d1783
make arena_size() and arena_clear() into member functions
stevenmeker Nov 14, 2024
17136b5
make arena_resize_last_alloc() and get_arena_collection_semispace_id …
stevenmeker Nov 15, 2024
2bb4539
do_alloc_slow() and arena_swap_and_clear() into member functions; mak…
stevenmeker Nov 15, 2024
704840a
turned kore_arena_alloc() into a member function; deleted youngspace_…
stevenmeker Nov 15, 2024
a379fc8
turned arena_start_ptr() and arena_end_ptr() into member functions
stevenmeker Nov 15, 2024
5c92ee8
fix formatting
stevenmeker Nov 15, 2024
acf641f
Merge branch 'develop' into alloc_refactor
stevenmeker Nov 15, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
115 changes: 61 additions & 54 deletions include/runtime/arena.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,53 @@ extern "C" {

// An arena can be used to allocate objects that can then be deallocated all at
// once.
struct arena {
class arena {
public:
arena(char id)
: allocation_semispace_id(id) { }
void *kore_arena_alloc(size_t requested);

// Returns the address of the first byte that belongs in the given arena.
// Returns 0 if nothing has been allocated ever in that arena.
char *arena_start_ptr() const;

// Returns a pointer to a location holding the address of last allocated
// byte in the given arena plus 1.
// This address is 0 if nothing has been allocated ever in that arena.
char **arena_end_ptr();

// return the total number of allocatable bytes currently in the arena in its
// active semispace.
size_t arena_size() const;

// Clears the current allocation space by setting its start back to its first
// block. It is used during garbage collection to effectively collect all of the
// arena.
void arena_clear();

// Resizes the last allocation as long as the resize does not require a new
// block allocation.
// Returns the address of the byte following the last newlly allocated byte when
// the resize succeeds, returns 0 otherwise.
void *arena_resize_last_alloc(ssize_t increase);

// Returns the given arena's current collection semispace ID.
// Each arena has 2 semispace IDs one equal to the arena ID and the other equal
// to the 1's complement of the arena ID. At any time one of these semispaces
// is used for allocation and the other is used for collection.
char get_arena_collection_semispace_id() const;

// Exchanges the current allocation and collection semispaces and clears the new
// current allocation semispace by setting its start back to its first block.
// It is used before garbage collection.
void arena_swap_and_clear();

private:
void fresh_block();

// helper function for `kore_arena_alloc`. Do not call directly.
void *do_alloc_slow(size_t requested);

char *first_block;
char *block;
char *block_start;
Expand All @@ -29,78 +75,43 @@ using memory_block_header = struct {

// Macro to define a new arena with the given ID. Supports IDs ranging from 0 to
// 127.
#define REGISTER_ARENA(name, id) \
static struct arena name = {.allocation_semispace_id = (id)}
#define REGISTER_ARENA(name, id) static thread_local arena name(id)

#define MEM_BLOCK_START(ptr) \
((char *)(((uintptr_t)(ptr)-1) & ~(BLOCK_SIZE - 1)))

#ifdef __MACH__
//
// thread_local disabled for Apple
//
extern bool time_for_collection;
#else
extern thread_local bool time_for_collection;
#endif

size_t get_gc_threshold();

// Resets the given arena.
void arena_reset(struct arena *);

// Returns the given arena's current allocation semispace ID.
// Each arena has 2 semispace IDs one equal to the arena ID and the other equal
// to the 1's complement of the arena ID. At any time one of these semispaces
// is used for allocation and the other is used for collection.
char get_arena_allocation_semispace_id(const struct arena *);

// Returns the given arena's current collection semispace ID.
// See above for details.
char get_arena_collection_semispace_id(const struct arena *);

// Returns the ID of the semispace where the given address was allocated.
// The behavior is undefined if called with an address that has not been
// allocated within an arena.
char get_arena_semispace_id_of_object(void *);

// helper function for `kore_arena_alloc`. Do not call directly.
void *do_alloc_slow(size_t, struct arena *);

// Allocates the requested number of bytes as a contiguous region and returns a
// pointer to the first allocated byte.
// If called with requested size greater than the maximun single allocation
// size, the space is allocated in a general (not garbage collected pool).
inline void *kore_arena_alloc(struct arena *arena, size_t requested) {
if (arena->block + requested > arena->block_end) {
return do_alloc_slow(requested, arena);
inline void *arena::kore_arena_alloc(size_t requested) {
if (block + requested > block_end) {
return do_alloc_slow(requested);
}
void *result = arena->block;
arena->block += requested;
void *result = block;
block += requested;
MEM_LOG(
"Allocation at %p (size %zd), next alloc at %p (if it fits)\n", result,
requested, arena->block);
requested, block);
return result;
}

// Resizes the last allocation as long as the resize does not require a new
// block allocation.
// Returns the address of the byte following the last newlly allocated byte when
// the resize succeeds, returns 0 otherwise.
void *arena_resize_last_alloc(struct arena *, ssize_t);

// Exchanges the current allocation and collection semispaces and clears the new
// current allocation semispace by setting its start back to its first block.
// It is used before garbage collection.
void arena_swap_and_clear(struct arena *);

// Clears the current allocation space by setting its start back to its first
// block. It is used during garbage collection to effectively collect all of the
// arena.
void arena_clear(struct arena *);

// Returns the address of the first byte that belongs in the given arena.
// Returns 0 if nothing has been allocated ever in that arena.
char *arena_start_ptr(const struct arena *);

// Returns a pointer to a location holding the address of last allocated
// byte in the given arena plus 1.
// This address is 0 if nothing has been allocated ever in that arena.
char **arena_end_ptr(struct arena *);

// Given a starting pointer to an address allocated in an arena and a size in
// bytes, this function returns a pointer to an address allocated in the
// same arena after size bytes from the starting pointer.
Expand All @@ -119,10 +130,6 @@ char *move_ptr(char *, size_t, char const *);
// different arenas.
ssize_t ptr_diff(char *, char *);

// return the total number of allocatable bytes currently in the arena in its
// active semispace.
size_t arena_size(const struct arena *);

// Deallocates all the memory allocated for registered arenas.
void free_all_memory(void);
}
Expand Down
4 changes: 2 additions & 2 deletions include/runtime/collect.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ using set_node = set::iterator::node_t;
using set_impl = set::iterator::tree_t;

extern "C" {
extern size_t numBytesLiveAtCollection[1 << AGE_WIDTH];
extern bool collect_old;
extern thread_local size_t numBytesLiveAtCollection[1 << AGE_WIDTH];
extern thread_local bool collect_old;
size_t get_size(uint64_t, uint16_t);
void migrate_static_roots(void);
void migrate(block **block_ptr);
Expand Down
8 changes: 7 additions & 1 deletion include/runtime/header.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,14 @@ size_t hash_k(block *);
void k_hash(block *, void *);
bool hash_enter(void);
void hash_exit(void);

#ifdef __MACH__
//
// thread_local disabled for Apple
//
extern bool gc_enabled;
#else
extern thread_local bool gc_enabled;
#endif
}

class k_elem {
Expand Down
35 changes: 32 additions & 3 deletions lib/codegen/CreateTerm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -782,18 +782,47 @@ llvm::Value *create_term::disable_gc() {
llvm::Constant *global
= module_->getOrInsertGlobal("gc_enabled", llvm::Type::getInt1Ty(ctx_));
auto *global_var = llvm::cast<llvm::GlobalVariable>(global);
#ifdef __MACH__
//
// thread_local disabled for Apple
//
/*
global_var->setThreadLocal(true);
llvm::IRBuilder b(current_block_);
auto *global_var_address = b.CreateThreadLocalAddress(global_var);
*/
auto *global_var_address = global_var;
#else
global_var->setThreadLocal(true);
auto *global_var_address = global_var;
#endif
auto *old_val = new llvm::LoadInst(
llvm::Type::getInt1Ty(ctx_), global_var, "was_enabled", current_block_);
llvm::Type::getInt1Ty(ctx_), global_var_address, "was_enabled",
current_block_);
new llvm::StoreInst(
llvm::ConstantInt::getFalse(ctx_), global_var, current_block_);
llvm::ConstantInt::getFalse(ctx_), global_var_address, current_block_);
return old_val;
}

void create_term::enable_gc(llvm::Value *was_enabled) {
llvm::Constant *global
= module_->getOrInsertGlobal("gc_enabled", llvm::Type::getInt1Ty(ctx_));
auto *global_var = llvm::cast<llvm::GlobalVariable>(global);
new llvm::StoreInst(was_enabled, global_var, current_block_);
#ifdef __MACH__
//
// thread_local disabled for Apple
//
/*
global_var->setThreadLocal(true);
llvm::IRBuilder b(current_block_);
auto *global_var_address = b.CreateThreadLocalAddress(global_var);
*/
auto *global_var_address = global_var;
#else
global_var->setThreadLocal(true);
auto *global_var_address = global_var;
#endif
new llvm::StoreInst(was_enabled, global_var_address, current_block_);
}

// We use tailcc calling convention for apply_rule_* and eval_* functions to
Expand Down
21 changes: 19 additions & 2 deletions lib/codegen/Decision.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include "kllvm/codegen/ProofEvent.h"
#include "kllvm/codegen/Util.h"

#include "llvm/IR/IRBuilder.h"
#include <llvm/ADT/APInt.h>
#include <llvm/ADT/SmallString.h>
#include <llvm/ADT/StringMap.h>
Expand Down Expand Up @@ -1012,9 +1013,25 @@ std::pair<std::vector<llvm::Value *>, llvm::BasicBlock *> step_function_header(

auto *collection = module->getOrInsertGlobal(
"time_for_collection", llvm::Type::getInt1Ty(module->getContext()));

#ifdef __MACH__
//
// thread_local disabled for Apple
//
/*
llvm::cast<llvm::GlobalVariable>(collection)->setThreadLocal(true);
llvm::IRBuilder b(check_collect);
auto *collection_address = b.CreateThreadLocalAddress(collection);
*/
auto *collection_address = collection;
#else
llvm::cast<llvm::GlobalVariable>(collection)->setThreadLocal(true);
auto *collection_address = collection;
#endif

auto *is_collection = new llvm::LoadInst(
llvm::Type::getInt1Ty(module->getContext()), collection, "is_collection",
check_collect);
llvm::Type::getInt1Ty(module->getContext()), collection_address,
"is_collection", check_collect);
set_debug_loc(is_collection);
auto *collect = llvm::BasicBlock::Create(
module->getContext(), "isCollect", block->getParent());
Expand Down
Loading
Loading