diff options
Diffstat (limited to 'kmod')
| -rwxr-xr-x | kmod/cachepc.c | 5 | ||||
| -rwxr-xr-x | kmod/cachepc.h | 2 |
2 files changed, 4 insertions, 3 deletions
diff --git a/kmod/cachepc.c b/kmod/cachepc.c index 9c896f3..4e07f32 100755 --- a/kmod/cachepc.c +++ b/kmod/cachepc.c @@ -15,7 +15,6 @@ static cacheline *build_cache_ds(cache_ctx *ctx, cacheline **cacheline_ptr_arr); static void build_randomized_list_for_cache_set(cache_ctx *ctx, cacheline **cacheline_ptr_arr); static cacheline **allocate_cache_ds(cache_ctx *ctx); static uint16_t get_virt_cache_set(cache_ctx *ctx, void *ptr); -static void *aligned_alloc(size_t alignment, size_t size); void cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask) @@ -385,7 +384,7 @@ allocate_cache_ds(cache_ctx *ctx) BUG_ON(ctx->addressing != VIRTUAL); // For virtual addressing, allocating a consecutive chunk of memory is enough - cl_arr = aligned_alloc(PAGE_SIZE, ctx->cache_size); + cl_arr = cachepc_aligned_alloc(PAGE_SIZE, ctx->cache_size); BUG_ON(cl_arr == NULL); for (i = 0; i < ctx->nr_of_cachelines; ++i) { @@ -404,7 +403,7 @@ get_virt_cache_set(cache_ctx *ctx, void *ptr) } void * -aligned_alloc(size_t alignment, size_t size) +cachepc_aligned_alloc(size_t alignment, size_t size) { void *p; diff --git a/kmod/cachepc.h b/kmod/cachepc.h index 8a9521c..97746ec 100755 --- a/kmod/cachepc.h +++ b/kmod/cachepc.h @@ -16,6 +16,8 @@ void cachepc_release_ds(cache_ctx *ctx, cacheline *ds); cacheline *cachepc_prepare_victim(cache_ctx *ctx, uint32_t set); void cachepc_release_victim(cache_ctx *ctx, cacheline *ptr); +void *cachepc_aligned_alloc(size_t alignment, size_t size); + void cachepc_save_msrmts(cacheline *head); void cachepc_print_msrmts(cacheline *head); |
