summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rwxr-xr-xsrc/cache_types.h4
-rwxr-xr-xsrc/cachepc.c52
-rwxr-xr-xsrc/cachepc.h10
-rw-r--r--src/util.c4
4 files changed, 49 insertions, 21 deletions
diff --git a/src/cache_types.h b/src/cache_types.h
index 4bc112c..6bba73d 100755
--- a/src/cache_types.h
+++ b/src/cache_types.h
@@ -8,8 +8,8 @@
#define REMOVE_PAGE_OFFSET(ptr) ((void *) (((uintptr_t) ptr) & PAGE_MASK))
-#define GET_BIT(b, i) (((b & (1 << i)) >> i) & 1)
-#define SET_BIT(b, i) (b | (1 << i))
+#define GET_BIT(b, i) (((b) >> (i)) & 1)
+#define SET_BIT(b, i) ((b) | (1 << (i)))
/* Operate cacheline flags
* Used flags:
diff --git a/src/cachepc.c b/src/cachepc.c
index bd5f4d2..b5f8589 100755
--- a/src/cachepc.c
+++ b/src/cachepc.c
@@ -26,16 +26,16 @@ cachepc_init_counters(void)
event = event_no | (event_mask << 8);
event |= (1<< 17); /* OsUserMode bit */
event |= (1 << 22); /* enable performance counter */
- printk(KERN_INFO "Writing to msr event %d", event);
+ printk(KERN_WARNING "CachePC: Initialized event %d\n", event);
asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00));
reg_addr = 0xc0010202;
event_no = 0x64;
event_mask = 0xC8;
event = event_no | (event_mask << 8);
- event |= (1<< 17); /* OsUserMode bit */
+ event |= (1 << 17); /* OsUserMode bit */
event |= (1 << 22); /* enable performance counter */
- printk(KERN_INFO "Writing to msr event %d", event);
+ printk(KERN_WARNING "CachePC: Initialized event %d\n", event);
asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00));
}
@@ -44,11 +44,12 @@ cachepc_get_ctx(cache_level cache_level)
{
cache_ctx *ctx;
- printk(KERN_INFO "CACHEPC_GET_CTX");
+ // printk(KERN_WARNING "CachePC: Getting ctx..\n");
ctx = kzalloc(sizeof(cache_ctx), GFP_KERNEL);
BUG_ON(ctx == NULL);
+ BUG_ON(cache_level != L1);
if (cache_level == L1) {
ctx->addressing = L1_ADDRESSING;
ctx->sets = L1_SETS;
@@ -68,6 +69,8 @@ cachepc_get_ctx(cache_level cache_level)
ctx->set_size = CACHELINE_SIZE * ctx->associativity;
ctx->cache_size = ctx->sets * ctx->set_size;
+ // printk(KERN_WARNING "CachePC: Getting ctx done\n");
+
return ctx;
}
@@ -80,12 +83,14 @@ cachepc_prepare_ds(cache_ctx *ctx)
cacheline **cacheline_ptr_arr;
cacheline *cache_ds;
- printk(KERN_INFO "CACHEPC_BUILD_CACHE_DS");
+ //printk(KERN_WARNING "CachePC: Preparing ds..\n");
cacheline_ptr_arr = allocate_cache_ds(ctx);
cache_ds = build_cache_ds(ctx, cacheline_ptr_arr);
kfree(cacheline_ptr_arr);
+ // printk(KERN_WARNING "CachePC: Preparing ds done\n");
+
return cache_ds;
}
@@ -106,7 +111,7 @@ cachepc_print_msrmts(cacheline *head)
curr_cl = head;
do {
if (IS_FIRST(curr_cl->flags)) {
- printk(KERN_WARNING "Count for cache set %i: %llu\n",
+ printk(KERN_WARNING "CachePC: Count for cache set %i: %llu\n",
curr_cl->cache_set, curr_cl->count);
}
@@ -114,6 +119,24 @@ cachepc_print_msrmts(cacheline *head)
} while (curr_cl != head);
}
+void *
+remove_cache_set(cache_ctx *ctx, void *ptr)
+{
+ return (void *) (((uintptr_t) ptr) & ~SET_MASK(ctx->sets));
+}
+
+void
+cachepc_release_ds(cache_ctx *ctx, cacheline *ds)
+{
+ kfree(remove_cache_set(ctx, ds));
+}
+
+void
+cachepc_release_ctx(cache_ctx *ctx)
+{
+ kfree(ctx);
+}
+
/*
* Create a randomized doubly linked list with the following structure:
* set A <--> set B <--> ... <--> set X <--> set A
@@ -136,13 +159,13 @@ cacheline *build_cache_ds(cache_ctx *ctx, cacheline **cl_ptr_arr) {
idx_per_set = kzalloc(ctx->sets * sizeof(uint32_t), GFP_KERNEL);
BUG_ON(idx_per_set == NULL);
- cl_ptr_arr_sorted = kmalloc(ctx->nr_of_cachelines * sizeof(cacheline *), GFP_KERNEL);
+ cl_ptr_arr_sorted = kzalloc(ctx->nr_of_cachelines * sizeof(cacheline *), GFP_KERNEL);
BUG_ON(cl_ptr_arr_sorted == NULL);
set_len = ctx->associativity;
for (i = 0; i < ctx->nr_of_cachelines; ++i) {
- set_offset = cl_ptr_arr[i]->cache_set * set_len;
- idx_curr_set = idx_per_set[cl_ptr_arr[i]->cache_set];
+ set_offset = cl_ptr_arr[i]->cache_set * set_len;
+ idx_curr_set = idx_per_set[cl_ptr_arr[i]->cache_set];
cl_ptr_arr_sorted[set_offset + idx_curr_set] = cl_ptr_arr[i];
idx_per_set[cl_ptr_arr[i]->cache_set] += 1;
@@ -198,8 +221,8 @@ void build_randomized_list_for_cache_set(cache_ctx *ctx, cacheline **cacheline_p
curr_cl->prev = cacheline_ptr_arr[idx_map[(len - 1 + i) % len]];
curr_cl->count = 0;
- if (curr_cl == cacheline_ptr_arr[0]) {
- curr_cl->flags = SET_FIRST(DEFAULT_FLAGS);
+ if (idx_map[i] == 0) {
+ curr_cl->flags = SET_FIRST(DEFAULT_FLAGS);
curr_cl->prev->flags = SET_LAST(DEFAULT_FLAGS);
} else {
curr_cl->flags = curr_cl->flags | DEFAULT_FLAGS;
@@ -219,13 +242,13 @@ allocate_cache_ds(cache_ctx *ctx)
cacheline **cl_ptr_arr, *cl_arr;
uint32_t i;
- cl_ptr_arr = (cacheline **) kzalloc(ctx->nr_of_cachelines * sizeof(cacheline *), GFP_KERNEL);
+ cl_ptr_arr = kzalloc(ctx->nr_of_cachelines * sizeof(cacheline *), GFP_KERNEL);
BUG_ON(cl_ptr_arr == NULL);
BUG_ON(ctx->addressing != VIRTUAL);
// For virtual addressing, allocating a consecutive chunk of memory is enough
- cl_arr = (cacheline *) aligned_alloc(PAGE_SIZE, ctx->cache_size);
+ cl_arr = aligned_alloc(PAGE_SIZE, ctx->cache_size);
BUG_ON(cl_arr == NULL);
for (i = 0; i < ctx->nr_of_cachelines; ++i) {
@@ -249,10 +272,9 @@ aligned_alloc(size_t alignment, size_t size)
if (size % alignment != 0)
size = size - (size % alignment) + alignment;
- p = kmalloc(size, GFP_KERNEL);
+ p = kzalloc(size, GFP_KERNEL);
BUG_ON(((uintptr_t) p) % alignment != 0);
return p;
}
-
diff --git a/src/cachepc.h b/src/cachepc.h
index 06f85f0..c40fc10 100755
--- a/src/cachepc.h
+++ b/src/cachepc.h
@@ -17,6 +17,8 @@ cache_ctx *cachepc_get_ctx(cache_level cl);
cacheline *cachepc_prepare_ds(cache_ctx *ctx);
void cachepc_save_msrmts(cacheline *head, const char *prefix, int index);
void cachepc_print_msrmts(cacheline *head);
+void cachepc_release_ds(cache_ctx *ctx, cacheline *ds);
+void cachepc_release_ctx(cache_ctx *ctx);
__attribute__((always_inline))
static inline cacheline *cachepc_prime(cacheline *head);
@@ -39,7 +41,7 @@ cachepc_prime(cacheline *head)
{
cacheline *curr_cl;
- printk(KERN_WARNING "PROBE");
+ //printk(KERN_WARNING "CachePC: Priming..\n");
cachepc_cpuid();
curr_cl = head;
@@ -49,6 +51,8 @@ cachepc_prime(cacheline *head)
} while(curr_cl != head);
cachepc_cpuid();
+ //printk(KERN_WARNING "CachePC: Priming done\n");
+
return curr_cl->prev;
}
@@ -128,13 +132,15 @@ cachepc_probe(cacheline *head)
{
cacheline *curr_cs;
- printk(KERN_WARNING "PROBE");
+ //printk(KERN_WARNING "CachePC: Probing..");
curr_cs = head;
do {
curr_cs = cachepc_probe_set(curr_cs);
} while (__builtin_expect(curr_cs != head, 1));
+ //printk(KERN_WARNING "CachePC: Probing done");
+
return curr_cs->next;
}
diff --git a/src/util.c b/src/util.c
index 83f265b..5ad3efa 100644
--- a/src/util.c
+++ b/src/util.c
@@ -12,8 +12,8 @@ random_perm(uint32_t *arr, uint32_t arr_len)
idx = idx % i;
tmp = arr[idx];
- arr[i] = arr[idx];
- arr[idx] = tmp;
+ arr[idx] = arr[i];
+ arr[i] = tmp;
}
}