Skip to content

Commit b542ac4

Browse files
executor: refactor struct cover_t
Instead of having a single void *data pointer that is used to both allocate the memory and access the trace, split it into: - void *alloc of size mmap_alloc_size, used by mmap/munmap to manage the memory for kcov (may contain other data apart from the trace) - void *trace of size trace_size, used to access the collected trace Future patches will introduce other collection modes that may relocate the trace, so *alloc and *trace won't be interchangeable anymore. I tried my best to not break Darwin and BSD, but I didn't test them.
1 parent 0808a66 commit b542ac4

File tree

6 files changed

+79
-65
lines changed

6 files changed

+79
-65
lines changed

executor/executor.cc

+20-16
Original file line numberDiff line numberDiff line change
@@ -348,18 +348,22 @@ struct cover_t {
348348
int fd;
349349
uint32 size;
350350
uint32 mmap_alloc_size;
351-
char* data;
352-
char* data_end;
351+
uint32 trace_size;
352+
// Memory buffer shared with kcov.
353+
void* alloc;
354+
// Buffer used by kcov for collecting PCs.
355+
char* trace;
356+
char* trace_end;
353357
// Currently collecting comparisons.
354358
bool collect_comps;
355-
// Note: On everything but darwin the first value in data is the count of
356-
// recorded PCs, followed by the PCs. We therefore set data_offset to the
359+
// Note: On everything but darwin the first value in trace is the count of
360+
// recorded PCs, followed by the PCs. We therefore set trace_offset to the
357361
// size of one PC.
358-
// On darwin data points to an instance of the ksancov_trace struct. Here we
359-
// set data_offset to the offset between data and the structs 'pcs' member,
362+
// On darwin trace points to an instance of the ksancov_trace struct. Here we
363+
// set trace_offset to the offset between trace and the structs 'pcs' member,
360364
// which contains the PCs.
361-
intptr_t data_offset;
362-
// Note: On everything but darwin this is 0, as the PCs contained in data
365+
intptr_t trace_offset;
366+
// Note: On everything but darwin this is 0, as the PCs contained in trace
363367
// are already correct. XNUs KSANCOV API, however, chose to always squeeze
364368
// PCs into 32 bit. To make the recorded PC fit, KSANCOV substracts a fixed
365369
// offset (VM_MIN_KERNEL_ADDRESS for AMD64) and then truncates the result to
@@ -1170,8 +1174,8 @@ uint32 write_signal(flatbuffers::FlatBufferBuilder& fbb, int index, cover_t* cov
11701174
// Write out feedback signals.
11711175
// Currently it is code edges computed as xor of two subsequent basic block PCs.
11721176
fbb.StartVector(0, sizeof(uint64));
1173-
cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
1174-
if ((char*)(cover_data + cov->size) > cov->data_end)
1177+
cover_data_t* cover_data = (cover_data_t*)(cov->trace + cov->trace_offset);
1178+
if ((char*)(cover_data + cov->size) > cov->trace_end)
11751179
failmsg("too much cover", "cov=%u", cov->size);
11761180
uint32 nsig = 0;
11771181
cover_data_t prev_pc = 0;
@@ -1204,7 +1208,7 @@ template <typename cover_data_t>
12041208
uint32 write_cover(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
12051209
{
12061210
uint32 cover_size = cov->size;
1207-
cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
1211+
cover_data_t* cover_data = (cover_data_t*)(cov->trace + cov->trace_offset);
12081212
if (flag_dedup_cover) {
12091213
cover_data_t* end = cover_data + cover_size;
12101214
std::sort(cover_data, end);
@@ -1220,11 +1224,11 @@ uint32 write_cover(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
12201224
uint32 write_comparisons(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
12211225
{
12221226
// Collect only the comparisons
1223-
uint64 ncomps = *(uint64_t*)cov->data;
1224-
kcov_comparison_t* cov_start = (kcov_comparison_t*)(cov->data + sizeof(uint64));
1225-
if ((char*)(cov_start + ncomps) > cov->data_end)
1227+
uint64 ncomps = *(uint64_t*)cov->trace;
1228+
kcov_comparison_t* cov_start = (kcov_comparison_t*)(cov->trace + sizeof(uint64));
1229+
if ((char*)(cov_start + ncomps) > cov->trace_end)
12261230
failmsg("too many comparisons", "ncomps=%llu", ncomps);
1227-
cov->overflow = ((char*)(cov_start + ncomps + 1) > cov->data_end);
1231+
cov->overflow = ((char*)(cov_start + ncomps + 1) > cov->trace_end);
12281232
rpc::ComparisonRaw* start = (rpc::ComparisonRaw*)cov_start;
12291233
rpc::ComparisonRaw* end = start;
12301234
// We will convert kcov_comparison_t to ComparisonRaw inplace.
@@ -1456,7 +1460,7 @@ void thread_create(thread_t* th, int id, bool need_coverage)
14561460

14571461
void thread_mmap_cover(thread_t* th)
14581462
{
1459-
if (th->cov.data != NULL)
1463+
if (th->cov.alloc != NULL)
14601464
return;
14611465
cover_mmap(&th->cov);
14621466
cover_protect(&th->cov);

executor/executor_bsd.h

+12-10
Original file line numberDiff line numberDiff line change
@@ -106,15 +106,17 @@ static void cover_open(cover_t* cov, bool extra)
106106

107107
static void cover_mmap(cover_t* cov)
108108
{
109-
if (cov->data != NULL)
109+
if (cov->alloc != NULL)
110110
fail("cover_mmap invoked on an already mmapped cover_t object");
111111
void* mmap_ptr = mmap(NULL, cov->mmap_alloc_size, PROT_READ | PROT_WRITE,
112112
MAP_SHARED, cov->fd, 0);
113113
if (mmap_ptr == MAP_FAILED)
114114
fail("cover mmap failed");
115-
cov->data = (char*)mmap_ptr;
116-
cov->data_end = cov->data + cov->mmap_alloc_size;
117-
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
115+
cov->alloc = mmap_ptr;
116+
cov->trace = (char*)cov->alloc;
117+
cov->trace_size = cov->mmap_alloc_size;
118+
cov->trace_end = cov->alloc + cov->trace_size;
119+
cov->trace_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
118120
cov->pc_offset = 0;
119121
}
120122

@@ -124,7 +126,7 @@ static void cover_protect(cover_t* cov)
124126
size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
125127
long page_size = sysconf(_SC_PAGESIZE);
126128
if (page_size > 0)
127-
mprotect(cov->data + page_size, mmap_alloc_size - page_size,
129+
mprotect(cov->alloc + page_size, mmap_alloc_size - page_size,
128130
PROT_READ);
129131
#elif GOOS_openbsd
130132
int mib[2], page_size;
@@ -133,18 +135,18 @@ static void cover_protect(cover_t* cov)
133135
mib[1] = HW_PAGESIZE;
134136
size_t len = sizeof(page_size);
135137
if (sysctl(mib, ARRAY_SIZE(mib), &page_size, &len, NULL, 0) != -1)
136-
mprotect(cov->data + page_size, mmap_alloc_size - page_size, PROT_READ);
138+
mprotect(cov->alloc + page_size, mmap_alloc_size - page_size, PROT_READ);
137139
#endif
138140
}
139141

140142
static void cover_unprotect(cover_t* cov)
141143
{
142144
#if GOOS_freebsd
143145
size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
144-
mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE);
146+
mprotect(cov->alloc, mmap_alloc_size, PROT_READ | PROT_WRITE);
145147
#elif GOOS_openbsd
146148
size_t mmap_alloc_size = kCoverSize * sizeof(uintptr_t);
147-
mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE);
149+
mprotect(cov->alloc, mmap_alloc_size, PROT_READ | PROT_WRITE);
148150
#endif
149151
}
150152

@@ -171,12 +173,12 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
171173

172174
static void cover_reset(cover_t* cov)
173175
{
174-
*(uint64*)cov->data = 0;
176+
*(uint64*)cov->trace = 0;
175177
}
176178

177179
static void cover_collect(cover_t* cov)
178180
{
179-
cov->size = *(uint64*)cov->data;
181+
cov->size = *(uint64*)cov->trace;
180182
}
181183

182184
#if GOOS_netbsd

executor/executor_darwin.h

+9-7
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ static void cover_open(cover_t* cov, bool extra)
7373

7474
static void cover_mmap(cover_t* cov)
7575
{
76-
if (cov->data != NULL)
76+
if (cov->alloc != NULL)
7777
fail("cover_mmap invoked on an already mmapped cover_t object");
7878
uintptr_t mmap_ptr = 0;
7979
if (ksancov_map(cov->fd, &mmap_ptr, &cov->mmap_alloc_size))
@@ -84,8 +84,10 @@ static void cover_mmap(cover_t* cov)
8484
if (cov->mmap_alloc_size > kCoverSize)
8585
fail("mmap allocation size larger than anticipated");
8686

87-
cov->data = (char*)mmap_ptr;
88-
cov->data_end = cov->data + cov->mmap_alloc_size;
87+
cov->alloc = mmap_ptr;
88+
cov->trace_size = cov->mmap_alloc_size;
89+
cov->trace = (char*)cov->alloc;
90+
cov->trace_end = cov->trace + cov->trace_size;
8991
}
9092

9193
static void cover_protect(cover_t* cov)
@@ -110,14 +112,14 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
110112

111113
static void cover_reset(cover_t* cov)
112114
{
113-
ksancov_reset((struct ksancov_header*)cov->data);
114-
ksancov_start((struct ksancov_header*)cov->data);
115+
ksancov_reset((struct ksancov_header*)cov->trace);
116+
ksancov_start((struct ksancov_header*)cov->trace);
115117
}
116118

117119
static void cover_collect(cover_t* cov)
118120
{
119-
struct ksancov_trace* trace = (struct ksancov_trace*)cov->data;
121+
struct ksancov_trace* trace = (struct ksancov_trace*)cov->trace;
120122
cov->size = ksancov_trace_head(trace);
121-
cov->data_offset = ((int64_t) & (trace->pcs)) - ((int64_t)(cov->data));
123+
cov->trace_offset = ((int64_t) & (trace->pcs)) - ((int64_t)(cov->trace));
122124
cov->pc_offset = trace->offset;
123125
}

executor/executor_linux.h

+17-13
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ static void cover_unprotect(cover_t* cov)
130130

131131
static void cover_mmap(cover_t* cov)
132132
{
133-
if (cov->data != NULL)
133+
if (cov->alloc != NULL)
134134
fail("cover_mmap invoked on an already mmapped cover_t object");
135135
if (cov->mmap_alloc_size == 0)
136136
fail("cover_t structure is corrupted");
@@ -140,14 +140,17 @@ static void cover_mmap(cover_t* cov)
140140
if (mapped == MAP_FAILED)
141141
exitf("failed to preallocate kcov buffer");
142142
// Now map the kcov buffer to the file, overwriting the existing mapping above.
143-
cov->data = (char*)mmap(mapped + SYZ_PAGE_SIZE, cov->mmap_alloc_size,
144-
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, cov->fd, 0);
145-
if (cov->data == MAP_FAILED)
143+
cov->alloc = (char*)mmap(mapped + SYZ_PAGE_SIZE, cov->mmap_alloc_size,
144+
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, cov->fd, 0);
145+
if (cov->alloc == MAP_FAILED)
146146
exitf("cover mmap failed");
147-
if (pkeys_enabled && pkey_mprotect(cov->data, cov->mmap_alloc_size, PROT_READ | PROT_WRITE, RESERVED_PKEY))
147+
if (pkeys_enabled && pkey_mprotect(cov->alloc, cov->mmap_alloc_size, PROT_READ | PROT_WRITE, RESERVED_PKEY))
148148
exitf("failed to pkey_mprotect kcov buffer");
149-
cov->data_end = cov->data + cov->mmap_alloc_size;
150-
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
149+
cov->trace = (char*)cov->alloc;
150+
// TODO(glider): trace size may be different from mmap_alloc_size in the future.
151+
cov->trace_size = cov->mmap_alloc_size;
152+
cov->trace_end = cov->trace + cov->mmap_alloc_size;
153+
cov->trace_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
151154
cov->pc_offset = 0;
152155
}
153156

@@ -185,16 +188,16 @@ static void cover_reset(cover_t* cov)
185188
cov = &current_thread->cov;
186189
}
187190
cover_unprotect(cov);
188-
*(uint64*)cov->data = 0;
191+
*(uint64*)cov->trace = 0;
189192
cover_protect(cov);
190193
cov->overflow = false;
191194
}
192195

193196
template <typename cover_data_t>
194197
static void cover_collect_impl(cover_t* cov)
195198
{
196-
cov->size = *(cover_data_t*)cov->data;
197-
cov->overflow = (cov->data + (cov->size + 2) * sizeof(cover_data_t)) > cov->data_end;
199+
cov->size = *(cover_data_t*)cov->trace;
200+
cov->overflow = (cov->trace + (cov->size + 2) * sizeof(cover_data_t)) > cov->trace_end;
198201
}
199202

200203
static void cover_collect(cover_t* cov)
@@ -285,8 +288,8 @@ static const char* setup_delay_kcov()
285288
cov.fd = kCoverFd;
286289
cover_open(&cov, false);
287290
cover_mmap(&cov);
288-
char* first = cov.data;
289-
cov.data = nullptr;
291+
char* first = (char*)cov.alloc;
292+
cov.alloc = nullptr;
290293
cover_mmap(&cov);
291294
// If delayed kcov mmap is not supported by the kernel,
292295
// accesses to the second mapping will crash.
@@ -298,7 +301,8 @@ static const char* setup_delay_kcov()
298301
fail("clock_gettime failed");
299302
error = "kernel commit b3d7fe86fbd0 is not present";
300303
} else {
301-
munmap(cov.data - SYZ_PAGE_SIZE, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE);
304+
void* region = (void*)((char*)cov.alloc - SYZ_PAGE_SIZE);
305+
munmap(region, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE);
302306
}
303307
munmap(first - SYZ_PAGE_SIZE, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE);
304308
close(cov.fd);

executor/executor_test.h

+20-18
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ static void os_init(int argc, char** argv, void* data, size_t data_size)
3636

3737
extern "C" notrace void __sanitizer_cov_trace_pc(void)
3838
{
39-
if (current_thread == nullptr || current_thread->cov.data == nullptr || current_thread->cov.collect_comps)
39+
if (current_thread == nullptr || current_thread->cov.trace == nullptr || current_thread->cov.collect_comps)
4040
return;
4141
uint64 pc = (uint64)__builtin_return_address(0);
4242
// Convert to what is_kernel_pc will accept as valid coverage;
@@ -45,16 +45,16 @@ extern "C" notrace void __sanitizer_cov_trace_pc(void)
4545
// because it must not be instrumented which is hard to achieve for all compiler
4646
// if the code is in a separate function.
4747
if (is_kernel_64_bit) {
48-
uint64* start = (uint64*)current_thread->cov.data;
49-
uint64* end = (uint64*)current_thread->cov.data_end;
48+
uint64* start = (uint64*)current_thread->cov.trace;
49+
uint64* end = (uint64*)current_thread->cov.trace_end;
5050
uint64 pos = start[0];
5151
if (start + pos + 1 < end) {
5252
start[0] = pos + 1;
5353
start[pos + 1] = pc;
5454
}
5555
} else {
56-
uint32* start = (uint32*)current_thread->cov.data;
57-
uint32* end = (uint32*)current_thread->cov.data_end;
56+
uint32* start = (uint32*)current_thread->cov.trace;
57+
uint32* end = (uint32*)current_thread->cov.trace_end;
5858
uint32 pos = start[0];
5959
if (start + pos + 1 < end) {
6060
start[0] = pos + 1;
@@ -85,15 +85,15 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
8585

8686
static void cover_reset(cover_t* cov)
8787
{
88-
*(uint64*)(cov->data) = 0;
88+
*(uint64*)(cov->trace) = 0;
8989
}
9090

9191
static void cover_collect(cover_t* cov)
9292
{
9393
if (is_kernel_64_bit)
94-
cov->size = *(uint64*)cov->data;
94+
cov->size = *(uint64*)cov->trace;
9595
else
96-
cov->size = *(uint32*)cov->data;
96+
cov->size = *(uint32*)cov->trace;
9797
}
9898

9999
static void cover_protect(cover_t* cov)
@@ -102,16 +102,18 @@ static void cover_protect(cover_t* cov)
102102

103103
static void cover_mmap(cover_t* cov)
104104
{
105-
if (cov->data != NULL)
105+
if (cov->alloc != NULL)
106106
fail("cover_mmap invoked on an already mmapped cover_t object");
107107
if (cov->mmap_alloc_size == 0)
108108
fail("cover_t structure is corrupted");
109-
cov->data = (char*)mmap(NULL, cov->mmap_alloc_size,
110-
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
111-
if (cov->data == MAP_FAILED)
109+
cov->alloc = (char*)mmap(NULL, cov->mmap_alloc_size,
110+
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
111+
if (cov->alloc == MAP_FAILED)
112112
exitf("cover mmap failed");
113-
cov->data_end = cov->data + cov->mmap_alloc_size;
114-
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
113+
cov->trace = (char*)cov->alloc;
114+
cov->trace_size = cov->mmap_alloc_size;
115+
cov->trace_end = cov->trace + cov->trace_size;
116+
cov->trace_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
115117
// We don't care about the specific PC values for now.
116118
// Once we do, we might want to consider ASLR here.
117119
cov->pc_offset = 0;
@@ -123,11 +125,11 @@ static void cover_unprotect(cover_t* cov)
123125

124126
static long inject_cover(cover_t* cov, long a, long b)
125127
{
126-
if (cov->data == nullptr)
128+
if (cov->trace == nullptr)
127129
return ENOENT;
128-
uint32 size = std::min((uint32)b, cov->mmap_alloc_size);
129-
memcpy(cov->data, (void*)a, size);
130-
memset(cov->data + size, 0xcd, std::min<uint64>(100, cov->mmap_alloc_size - size));
130+
uint32 size = std::min((uint32)b, cov->trace_size);
131+
memcpy(cov->trace, (void*)a, size);
132+
memset(cov->trace + size, 0xcd, std::min<uint64>(100, cov->trace_size - size));
131133
return 0;
132134
}
133135

executor/snapshot.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ static void SnapshotStart()
213213
thread_t* th = &threads[i];
214214
thread_create(th, i, flag_coverage);
215215
if (flag_coverage)
216-
PopulateMemory(th->cov.data, kCoveragePopulate);
216+
PopulateMemory(th->cov.alloc, kCoveragePopulate);
217217
}
218218
TouchMemory((char*)output_data + output_size - kOutputPopulate, kOutputPopulate);
219219
TouchMemory(ivs.input, kInputPopulate);

0 commit comments

Comments
 (0)