diff --git a/executor/common_linux.h b/executor/common_linux.h index 635fc25ad28c..b183f051db0a 100644 --- a/executor/common_linux.h +++ b/executor/common_linux.h @@ -9,7 +9,7 @@ #include #if SYZ_EXECUTOR -const int kExtraCoverSize = 1024 << 10; +const int kExtraCoverSize = 8 << 10; struct cover_t; static void cover_reset(cover_t* cov); #endif diff --git a/executor/executor.cc b/executor/executor.cc index 702f8c3b4303..639cddc5508d 100644 --- a/executor/executor.cc +++ b/executor/executor.cc @@ -72,7 +72,7 @@ const int kOutPipeFd = kMaxFd - 2; // remapped from stdout const int kCoverFd = kOutPipeFd - kMaxThreads; const int kExtraCoverFd = kCoverFd - 1; const int kMaxArgs = 9; -const int kCoverSize = 512 << 10; +const int kCoverSize = 8 << 10; const int kFailStatus = 67; // Two approaches of dealing with kcov memory. @@ -324,6 +324,7 @@ const uint64 no_copyout = -1; static int running; static uint32 completed; static bool is_kernel_64_bit; +static bool is_uniq_mode = false; static bool use_cover_edges; static uint8* input_data; @@ -347,9 +348,12 @@ struct call_t { struct cover_t { int fd; uint32 size; + uint32 size_edge; uint32 mmap_alloc_size; char* data; char* data_end; + char* data_edge; + char* data_edge_end; // Currently collecting comparisons. bool collect_comps; // Note: On everything but darwin the first value in data is the count of @@ -367,6 +371,8 @@ struct cover_t { intptr_t pc_offset; // The coverage buffer has overflowed and we have truncated coverage. bool overflow; + // kcov mode. + unsigned int mode; }; struct thread_t { @@ -1157,36 +1163,51 @@ template uint32 write_signal(flatbuffers::FlatBufferBuilder& fbb, int index, cover_t* cov, bool all) { // Write out feedback signals. - // Currently it is code edges computed as xor of two subsequent basic block PCs. fbb.StartVector(0, sizeof(uint64)); - cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset); - if ((char*)(cover_data + cov->size) > cov->data_end) - failmsg("too much cover", "cov=%u", cov->size); - uint32 nsig = 0; - cover_data_t prev_pc = 0; - bool prev_filter = true; - for (uint32 i = 0; i < cov->size; i++) { - cover_data_t pc = cover_data[i] + cov->pc_offset; - uint64 sig = pc; - if (use_cover_edges) { - // Only hash the lower 12 bits so the hash is independent of any module offsets. - const uint64 mask = (1 << 12) - 1; - sig ^= hash(prev_pc & mask) & mask; + if (is_uniq_mode) { + uint32 nsig = 0; + cover_data_t* cover_data = (cover_data_t*)(cov->data_edge + cov->data_offset); + if ((char*)(cover_data + cov->size_edge) > cov->data_edge_end) + failmsg("too much cover", "cov=%u", cov->size_edge); + for (uint32 i = 0; i < cov->size_edge; i++) { + cover_data_t sig = cover_data[i] + cov->pc_offset; + if (!all && max_signal && max_signal->Contains(sig)) + continue; + fbb.PushElement(uint64(sig)); + nsig++; } - bool filter = coverage_filter(pc); - // Ignore the edge only if both current and previous PCs are filtered out - // to capture all incoming and outcoming edges into the interesting code. - bool ignore = !filter && !prev_filter; - prev_pc = pc; - prev_filter = filter; - if (ignore || dedup(index, sig)) - continue; - if (!all && max_signal && max_signal->Contains(sig)) - continue; - fbb.PushElement(uint64(sig)); - nsig++; + return fbb.EndVector(nsig); + } else { + // It is code edges computed as xor of two subsequent basic block PCs. + cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset); + if ((char*)(cover_data + cov->size) > cov->data_end) + failmsg("too much cover", "cov=%u", cov->size); + uint32 nsig = 0; + cover_data_t prev_pc = 0; + bool prev_filter = true; + for (uint32 i = 0; i < cov->size; i++) { + cover_data_t pc = cover_data[i] + cov->pc_offset; + uint64 sig = pc; + if (use_cover_edges) { + // Only hash the lower 12 bits so the hash is independent of any module offsets. + const uint64 mask = (1 << 12) - 1; + sig ^= hash(prev_pc & mask) & mask; + } + bool filter = coverage_filter(pc); + // Ignore the edge only if both current and previous PCs are filtered out + // to capture all incoming and outcoming edges into the interesting code. + bool ignore = !filter && !prev_filter; + prev_pc = pc; + prev_filter = filter; + if (ignore || dedup(index, sig)) + continue; + if (!all && max_signal && max_signal->Contains(sig)) + continue; + fbb.PushElement(uint64(sig)); + nsig++; + } + return fbb.EndVector(nsig); } - return fbb.EndVector(nsig); } template @@ -1519,8 +1540,11 @@ void execute_call(thread_t* th) th->id, current_time_ms() - start_time_ms, call->name, (uint64)th->res); if (th->res == (intptr_t)-1) debug(" errno=%d", th->reserrno); - if (flag_coverage) + if (flag_coverage) { debug(" cover=%u", th->cov.size); + if (is_uniq_mode) + debug(" edge=%u", th->cov.size_edge); + } if (th->call_props.fail_nth > 0) debug(" fault=%d", th->fault_injected); if (th->call_props.rerun > 0) diff --git a/executor/executor_linux.h b/executor/executor_linux.h index e952e6ea9a96..8d7f71d4efbd 100644 --- a/executor/executor_linux.h +++ b/executor/executor_linux.h @@ -17,10 +17,13 @@ static bool pkeys_enabled; // very large buffer b/c there are usually multiple procs, and each of them consumes // significant amount of memory. In snapshot mode we have only one proc, so we can have // larger coverage buffer. -const int kSnapshotCoverSize = 1024 << 10; +const int kSnapshotCoverSize = 8 << 10; const unsigned long KCOV_TRACE_PC = 0; const unsigned long KCOV_TRACE_CMP = 1; +const unsigned long KCOV_TRACE_UNIQ_PC = 2; +const unsigned long KCOV_TRACE_UNIQ_EDGE = 4; +const unsigned long KCOV_TRACE_UNIQ_CMP = 8; template struct kcov_remote_arg { @@ -106,6 +109,7 @@ static void cover_open(cover_t* cov, bool extra) if (dup2(fd, cov->fd) < 0) failmsg("filed to dup cover fd", "from=%d, to=%d", fd, cov->fd); close(fd); + is_uniq_mode = true; const int kcov_init_trace = is_kernel_64_bit ? KCOV_INIT_TRACE64 : KCOV_INIT_TRACE32; const int cover_size = extra ? kExtraCoverSize : flag_snapshot ? kSnapshotCoverSize : kCoverSize; @@ -149,21 +153,44 @@ static void cover_mmap(cover_t* cov) cov->data_end = cov->data + cov->mmap_alloc_size; cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t); cov->pc_offset = 0; + + if (is_uniq_mode) { + // Now map edge cover. + unsigned int off = cov->mmap_alloc_size; + mapped = (char*)mmap(NULL, cov->mmap_alloc_size + 2 * SYZ_PAGE_SIZE, + PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, off); + if (mapped == MAP_FAILED) + exitf("failed to preallocate kcov buffer"); + cov->data_edge = (char*)mmap(mapped + SYZ_PAGE_SIZE, cov->mmap_alloc_size, + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, cov->fd, off); + if (cov->data_edge == MAP_FAILED) { + is_uniq_mode = false; + debug("edge mmap failed, fallback to none uniq mode\n"); + return; + } + if (pkeys_enabled && pkey_mprotect(cov->data_edge, cov->mmap_alloc_size, PROT_READ | PROT_WRITE, RESERVED_PKEY)) + exitf("failed to pkey_mprotect kcov buffer"); + cov->data_edge_end = cov->data_edge + cov->mmap_alloc_size; + } } static void cover_enable(cover_t* cov, bool collect_comps, bool extra) { - unsigned int kcov_mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC; + cov->mode = collect_comps ? KCOV_TRACE_UNIQ_CMP : KCOV_TRACE_UNIQ_PC | KCOV_TRACE_UNIQ_EDGE; // The KCOV_ENABLE call should be fatal, // but in practice ioctl fails with assorted errors (9, 14, 25), // so we use exitf. if (!extra) { - if (ioctl(cov->fd, KCOV_ENABLE, kcov_mode)) - exitf("cover enable write trace failed, mode=%d", kcov_mode); + if (ioctl(cov->fd, KCOV_ENABLE, cov->mode)) { + is_uniq_mode = false; + cov->mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC; + if (ioctl(cov->fd, KCOV_ENABLE, cov->mode)) + exitf("cover enable write trace failed, mode=%d", cov->mode); + } return; } kcov_remote_arg<1> arg = { - .trace_mode = kcov_mode, + .trace_mode = static_cast(collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC), // Coverage buffer size of background threads. .area_size = kExtraCoverSize, .num_handles = 1, @@ -186,6 +213,7 @@ static void cover_reset(cover_t* cov) } cover_unprotect(cov); *(uint64*)cov->data = 0; + *(uint64*)cov->data_edge = 0; cover_protect(cov); cov->overflow = false; } @@ -195,6 +223,10 @@ static void cover_collect_impl(cover_t* cov) { cov->size = *(cover_data_t*)cov->data; cov->overflow = (cov->data + (cov->size + 2) * sizeof(cover_data_t)) > cov->data_end; + if (cov->mode & KCOV_TRACE_UNIQ_EDGE) { + cov->size_edge = *(cover_data_t*)cov->data_edge; + cov->overflow |= (cov->data_edge + (cov->size_edge + 2) * sizeof(cover_data_t)) > cov->data_edge_end; + } } static void cover_collect(cover_t* cov) diff --git a/sys/linux/sys.txt b/sys/linux/sys.txt index a48d38c0b108..166933440074 100644 --- a/sys/linux/sys.txt +++ b/sys/linux/sys.txt @@ -1400,7 +1400,7 @@ _ = ADJ_OFFSET, ADJ_FREQUENCY, ADJ_MAXERROR, ADJ_ESTERROR, ADJ_STATUS, ADJ_TIMEC _ = SMB_PATH_MAX, XT_CGROUP_PATH_MAX, XENSTORE_REL_PATH_MAX # misc -_ = KCOV_INIT_TRACE, KCOV_ENABLE, KCOV_DISABLE, KCOV_TRACE_PC, KCOV_TRACE_CMP, PTRACE_TRACEME, SYSLOG_ACTION_CONSOLE_ON, SYSLOG_ACTION_CONSOLE_OFF, SYSLOG_ACTION_CONSOLE_LEVEL, SYSLOG_ACTION_CLEAR, __NR_mmap2 +_ = KCOV_INIT_TRACE, KCOV_ENABLE, KCOV_DISABLE, KCOV_TRACE_PC, KCOV_TRACE_CMP, KCOV_TRACE_UNIQ_PC, KCOV_TRACE_UNIQ_EDGE, KCOV_TRACE_UNIQ_CMP, PTRACE_TRACEME, SYSLOG_ACTION_CONSOLE_ON, SYSLOG_ACTION_CONSOLE_OFF, SYSLOG_ACTION_CONSOLE_LEVEL, SYSLOG_ACTION_CLEAR, __NR_mmap2 # Hardcode KCOV_REMOTE_ENABLE value for amd64 until new kcov patches reach mainline. define KCOV_REMOTE_ENABLE 1075340134 diff --git a/sys/linux/sys.txt.const b/sys/linux/sys.txt.const index ba024ae9992f..9c06c1ffa761 100644 --- a/sys/linux/sys.txt.const +++ b/sys/linux/sys.txt.const @@ -216,6 +216,9 @@ KCOV_INIT_TRACE = 2148033281, 386:arm:2147771137, mips64le:ppc64le:1074291457 KCOV_REMOTE_ENABLE = 1075340134, mips64le:ppc64le:2149081958 KCOV_TRACE_CMP = 1 KCOV_TRACE_PC = 0 +KCOV_TRACE_UNIQ_PC = 2 +KCOV_TRACE_UNIQ_EDGE = 4 +KCOV_TRACE_UNIQ_CMP = 8 KEXEC_ARCH_386 = 196608 KEXEC_ARCH_ARM = 2621440 KEXEC_ARCH_DEFAULT = 0