This repository has been archived by the owner on Dec 2, 2017. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathzsmapbench.c
148 lines (123 loc) · 3.16 KB
/
zsmapbench.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
/*
* zsmapbench.c
*
* Microbenchmark for zsmalloc allocation mapping
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "linux/zsmalloc.h"
static int zsmb_kthread(void *ptr)
{
struct zs_pool *pool;
unsigned long *handles, completed = 0;
cycles_t start, end, dt;
int i, err;
char *buf;
/*
* This size is roughly 40% of PAGE_SIZE an results in an
* underlying zspage size of 2 pages. See the
* get_pages_per_zspage() function in zsmalloc for details.
* The third allocation in this class will span two pages.
*/
int size = 1632;
int handles_nr = 3;
int spanned_index = handles_nr - 1;
pr_info("starting zsmb_kthread\n");
pool = zs_create_pool("zsmapbench", GFP_NOIO | __GFP_HIGHMEM);
if (!pool)
return -ENOMEM;
handles = (unsigned long *)kmalloc(handles_nr * sizeof(unsigned long),
GFP_KERNEL);
if (!handles) {
pr_info("kmalloc failed\n");
return -ENOMEM;
}
memset(handles, 0, sizeof(unsigned long) * handles_nr);
for (i = 0; i < handles_nr; i++) {
handles[i] = zs_malloc(pool, size);
if(!handles[i]) {
pr_err("zs_malloc failed\n");
err = -ENOMEM;
goto free;
}
}
start = get_cycles();
while (unlikely(!kthread_should_stop())) {
buf = zs_map_object(pool, handles[spanned_index], ZS_MM_RW);
if (unlikely(!buf)) {
pr_err("zs_map_object failed\n");
err = -EINVAL;
goto free;
}
zs_unmap_object(pool, handles[spanned_index]);
completed++;
cond_resched();
}
end = get_cycles();
dt = end - start;
pr_info("%llu cycles\n",(unsigned long long)dt);
pr_info("%lu mappings\n",completed);
pr_info("%llu cycles/map\n",(unsigned long long)dt/completed);
pr_info("stopping zsmb_kthread\n");
err = 0;
free:
for (i = 0; i < handles_nr; i++)
if (handles[i])
zs_free(pool, handles[i]);
if (handles)
kfree(handles);
zs_destroy_pool(pool);
return err;
}
/*
* This benchmark isn't made to handle changes in the cpu online mask.
* Please don't hotplug while the benchmark runs.
*/
static DEFINE_PER_CPU(struct task_struct *, pcpu_kthread);
static bool single_threaded;
module_param(single_threaded, bool, 0);
static int __init zsmb_init(void)
{
struct task_struct **kthread;
int cpu;
pr_info("running zsmapbench...\n");
for_each_online_cpu(cpu) {
kthread = per_cpu_ptr(&pcpu_kthread, cpu);
*kthread =
kthread_create(zsmb_kthread, NULL, "zsmb_kthread");
if (IS_ERR(*kthread))
return IS_ERR(*kthread);
kthread_bind(*kthread, cpu);
if (single_threaded)
break;
}
for_each_online_cpu(cpu) {
kthread = per_cpu_ptr(&pcpu_kthread, cpu);
wake_up_process(*kthread);
if (single_threaded)
break;
}
/* Run for about one second */
msleep(1000);
for_each_online_cpu(cpu) {
kthread = per_cpu_ptr(&pcpu_kthread, cpu);
kthread_stop(*kthread);
if (single_threaded)
break;
}
pr_info("zsmapbench complete\n");
return 0;
}
static void __exit zsmb_exit(void)
{
pr_info("unloading zsmapbench\n");
}
module_init(zsmb_init);
module_exit(zsmb_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Seth Jennings <[email protected]");
MODULE_DESCRIPTION("Microbenchmark for zsmalloc mapping methods");