Update google benchmark version

git-svn-id: https://llvm.org/svn/llvm-project/libcxx/trunk@349126 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Eric Fiselier
2018-12-14 03:37:13 +00:00
parent e713cc0acf
commit 114125f97b
17 changed files with 194 additions and 100 deletions

View File

@@ -1,42 +0,0 @@
licenses(["notice"])
config_setting(
name = "windows",
values = {
"cpu": "x64_windows",
},
visibility = [":__subpackages__"],
)
cc_library(
name = "benchmark",
srcs = glob(
[
"src/*.cc",
"src/*.h",
],
exclude = ["src/benchmark_main.cc"],
),
hdrs = ["include/benchmark/benchmark.h"],
linkopts = select({
":windows": ["-DEFAULTLIB:shlwapi.lib"],
"//conditions:default": ["-pthread"],
}),
strip_include_prefix = "include",
visibility = ["//visibility:public"],
)
cc_library(
name = "benchmark_main",
srcs = ["src/benchmark_main.cc"],
hdrs = ["include/benchmark/benchmark.h"],
strip_include_prefix = "include",
visibility = ["//visibility:public"],
deps = [":benchmark"],
)
cc_library(
name = "benchmark_internal_headers",
hdrs = glob(["src/*.h"]),
visibility = ["//test:__pkg__"],
)

View File

@@ -27,6 +27,7 @@ Arne Beer <arne@twobeer.de>
Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com> Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com> Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
Christopher Seymour <chris.j.seymour@hotmail.com> Christopher Seymour <chris.j.seymour@hotmail.com>
Cyrille Faucheux <cyrille.faucheux@gmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr> David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com> Deniz Evrenci <denizevrenci@gmail.com>
Dominic Hamon <dma@stripysock.com> <dominic@google.com> Dominic Hamon <dma@stripysock.com> <dominic@google.com>

View File

@@ -255,7 +255,7 @@ that might be used to customize high-order term calculation.
```c++ ```c++
BENCHMARK(BM_StringCompare)->RangeMultiplier(2) BENCHMARK(BM_StringCompare)->RangeMultiplier(2)
->Range(1<<10, 1<<18)->Complexity([](int n)->double{return n; }); ->Range(1<<10, 1<<18)->Complexity([](int64_t n)->double{return n; });
``` ```
### Templated benchmarks ### Templated benchmarks
@@ -264,7 +264,7 @@ messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the
absence of multiprogramming. absence of multiprogramming.
```c++ ```c++
template <class Q> int BM_Sequential(benchmark::State& state) { template <class Q> void BM_Sequential(benchmark::State& state) {
Q q; Q q;
typename Q::value_type v; typename Q::value_type v;
for (auto _ : state) { for (auto _ : state) {
@@ -428,6 +428,26 @@ BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime();
Without `UseRealTime`, CPU time is used by default. Without `UseRealTime`, CPU time is used by default.
## Controlling timers
Normally, the entire duration of the work loop (`for (auto _ : state) {}`)
is measured. But sometimes, it is nessesary to do some work inside of
that loop, every iteration, but without counting that time to the benchmark time.
That is possible, althought it is not recommended, since it has high overhead.
```c++
static void BM_SetInsert_With_Timer_Control(benchmark::State& state) {
std::set<int> data;
for (auto _ : state) {
state.PauseTiming(); // Stop timers. They will not count until they are resumed.
data = ConstructRandomSet(state.range(0)); // Do something that should not be measured
state.ResumeTiming(); // And resume timers. They are now counting again.
// The rest will be measured.
for (int j = 0; j < state.range(1); ++j)
data.insert(RandomNumber());
}
}
BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}});
```
## Manual timing ## Manual timing
For benchmarking something for which neither CPU time nor real-time are For benchmarking something for which neither CPU time nor real-time are

View File

@@ -1293,6 +1293,15 @@ struct CPUInfo {
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo); BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo);
}; };
//Adding Struct for System Information
struct SystemInfo {
std::string name;
static const SystemInfo& Get();
private:
SystemInfo();
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo);
};
// Interface for custom benchmark result printers. // Interface for custom benchmark result printers.
// By default, benchmark reports are printed to stdout. However an application // By default, benchmark reports are printed to stdout. However an application
// can control the destination of the reports by calling // can control the destination of the reports by calling
@@ -1302,6 +1311,7 @@ class BenchmarkReporter {
public: public:
struct Context { struct Context {
CPUInfo const& cpu_info; CPUInfo const& cpu_info;
SystemInfo const& sys_info;
// The number of chars in the longest benchmark name. // The number of chars in the longest benchmark name.
size_t name_field_width; size_t name_field_width;
static const char* executable_name; static const char* executable_name;

View File

@@ -57,9 +57,9 @@ DEFINE_bool(benchmark_list_tests, false,
DEFINE_string(benchmark_filter, ".", DEFINE_string(benchmark_filter, ".",
"A regular expression that specifies the set of benchmarks " "A regular expression that specifies the set of benchmarks "
"to execute. If this flag is empty, no benchmarks are run. " "to execute. If this flag is empty, or if this flag is the "
"If this flag is the string \"all\", all benchmarks linked " "string \"all\", all benchmarks linked into the binary are "
"into the process are run."); "run.");
DEFINE_double(benchmark_min_time, 0.5, DEFINE_double(benchmark_min_time, 0.5,
"Minimum number of seconds we should run benchmark before " "Minimum number of seconds we should run benchmark before "

View File

@@ -182,14 +182,19 @@ bool BenchmarkFamilies::FindBenchmarks(
} }
} }
instance.name += StrFormat("%d", arg); // we know that the args are always non-negative (see 'AddRange()'),
// thus print as 'unsigned'. BUT, do a cast due to the 32-bit builds.
instance.name += StrFormat("%lu", static_cast<unsigned long>(arg));
++arg_i; ++arg_i;
} }
if (!IsZero(family->min_time_)) if (!IsZero(family->min_time_))
instance.name += StrFormat("/min_time:%0.3f", family->min_time_); instance.name += StrFormat("/min_time:%0.3f", family->min_time_);
if (family->iterations_ != 0) if (family->iterations_ != 0) {
instance.name += StrFormat("/iterations:%d", family->iterations_); instance.name +=
StrFormat("/iterations:%lu",
static_cast<unsigned long>(family->iterations_));
}
if (family->repetitions_ != 0) if (family->repetitions_ != 0)
instance.name += StrFormat("/repeats:%d", family->repetitions_); instance.name += StrFormat("/repeats:%d", family->repetitions_);

View File

@@ -73,8 +73,8 @@ std::string GetBigOString(BigO complexity) {
// - time : Vector containing the times for the benchmark tests. // - time : Vector containing the times for the benchmark tests.
// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };). // - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
// For a deeper explanation on the algorithm logic, look the README file at // For a deeper explanation on the algorithm logic, please refer to
// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit // https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
LeastSq MinimalLeastSq(const std::vector<int64_t>& n, LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
const std::vector<double>& time, const std::vector<double>& time,

View File

@@ -77,6 +77,8 @@ bool JSONReporter::ReportContext(const Context& context) {
std::string walltime_value = LocalDateTimeString(); std::string walltime_value = LocalDateTimeString();
out << indent << FormatKV("date", walltime_value) << ",\n"; out << indent << FormatKV("date", walltime_value) << ",\n";
out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
if (Context::executable_name) { if (Context::executable_name) {
// windows uses backslash for its path separator, // windows uses backslash for its path separator,
// which must be escaped in JSON otherwise it blows up conforming JSON // which must be escaped in JSON otherwise it blows up conforming JSON

View File

@@ -79,7 +79,8 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
// No initializer because it's already initialized to NULL. // No initializer because it's already initialized to NULL.
const char *BenchmarkReporter::Context::executable_name; const char *BenchmarkReporter::Context::executable_name;
BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {} BenchmarkReporter::Context::Context()
: cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {}
std::string BenchmarkReporter::Run::benchmark_name() const { std::string BenchmarkReporter::Run::benchmark_name() const {
std::string name = run_name; std::string name = run_name;

View File

@@ -12,7 +12,11 @@ void AppendHumanReadable(int n, std::string* str);
std::string HumanReadableNumber(double n, double one_k = 1024.0); std::string HumanReadableNumber(double n, double one_k = 1024.0);
std::string StrFormat(const char* format, ...); #ifdef __GNUC__
__attribute__((format(printf, 1, 2)))
#endif
std::string
StrFormat(const char* format, ...);
inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
return out; return out;

View File

@@ -19,6 +19,7 @@
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA #undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <versionhelpers.h> #include <versionhelpers.h>
#include <windows.h> #include <windows.h>
#include <codecvt>
#else #else
#include <fcntl.h> #include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA #ifndef BENCHMARK_OS_FUCHSIA
@@ -52,6 +53,7 @@
#include <limits> #include <limits>
#include <memory> #include <memory>
#include <sstream> #include <sstream>
#include <locale>
#include "check.h" #include "check.h"
#include "cycleclock.h" #include "cycleclock.h"
@@ -366,6 +368,35 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizes() {
#endif #endif
} }
std::string GetSystemName() {
#if defined(BENCHMARK_OS_WINDOWS)
std::string str;
const unsigned COUNT = MAX_COMPUTERNAME_LENGTH+1;
TCHAR hostname[COUNT] = {'\0'};
DWORD DWCOUNT = COUNT;
if (!GetComputerName(hostname, &DWCOUNT))
return std::string("");
#ifndef UNICODE
str = std::string(hostname, DWCOUNT);
#else
//Using wstring_convert, Is deprecated in C++17
using convert_type = std::codecvt_utf8<wchar_t>;
std::wstring_convert<convert_type, wchar_t> converter;
std::wstring wStr(hostname, DWCOUNT);
str = converter.to_bytes(wStr);
#endif
return str;
#else // defined(BENCHMARK_OS_WINDOWS)
#ifdef BENCHMARK_OS_MACOSX //Mac Doesnt have HOST_NAME_MAX defined
#define HOST_NAME_MAX 64
#endif
char hostname[HOST_NAME_MAX];
int retVal = gethostname(hostname, HOST_NAME_MAX);
if (retVal != 0) return std::string("");
return std::string(hostname);
#endif // Catch-all POSIX block.
}
int GetNumCPUs() { int GetNumCPUs() {
#ifdef BENCHMARK_HAS_SYSCTL #ifdef BENCHMARK_HAS_SYSCTL
int NumCPU = -1; int NumCPU = -1;
@@ -609,4 +640,11 @@ CPUInfo::CPUInfo()
scaling_enabled(CpuScalingEnabled(num_cpus)), scaling_enabled(CpuScalingEnabled(num_cpus)),
load_avg(GetLoadAvg()) {} load_avg(GetLoadAvg()) {}
const SystemInfo& SystemInfo::Get() {
static const SystemInfo* info = new SystemInfo();
return *info;
}
SystemInfo::SystemInfo() : name(GetSystemName()) {}
} // end namespace benchmark } // end namespace benchmark

View File

@@ -4,6 +4,7 @@
#include <iostream> #include <iostream>
#include <map> #include <map>
#include <memory> #include <memory>
#include <random>
#include <sstream> #include <sstream>
#include <streambuf> #include <streambuf>
@@ -207,7 +208,7 @@ void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) {
void ResultsChecker::CheckResults(std::stringstream& output) { void ResultsChecker::CheckResults(std::stringstream& output) {
// first reset the stream to the start // first reset the stream to the start
{ {
auto start = std::ios::streampos(0); auto start = std::stringstream::pos_type(0);
// clear before calling tellg() // clear before calling tellg()
output.clear(); output.clear();
// seek to zero only when needed // seek to zero only when needed
@@ -438,11 +439,50 @@ int SubstrCnt(const std::string& haystack, const std::string& pat) {
return count; return count;
} }
static char ToHex(int ch) {
return ch < 10 ? static_cast<char>('0' + ch)
: static_cast<char>('a' + (ch - 10));
}
static char RandomHexChar() {
static std::mt19937 rd{std::random_device{}()};
static std::uniform_int_distribution<int> mrand{0, 15};
return ToHex(mrand(rd));
}
static std::string GetRandomFileName() {
std::string model = "test.%%%%%%";
for (auto & ch : model) {
if (ch == '%')
ch = RandomHexChar();
}
return model;
}
static bool FileExists(std::string const& name) {
std::ifstream in(name.c_str());
return in.good();
}
static std::string GetTempFileName() {
// This function attempts to avoid race conditions where two tests
// create the same file at the same time. However, it still introduces races
// similar to tmpnam.
int retries = 3;
while (--retries) {
std::string name = GetRandomFileName();
if (!FileExists(name))
return name;
}
std::cerr << "Failed to create unique temporary file name" << std::endl;
std::abort();
}
std::string GetFileReporterOutput(int argc, char* argv[]) { std::string GetFileReporterOutput(int argc, char* argv[]) {
std::vector<char*> new_argv(argv, argv + argc); std::vector<char*> new_argv(argv, argv + argc);
assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size()); assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size());
std::string tmp_file_name = std::tmpnam(nullptr); std::string tmp_file_name = GetTempFileName();
std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n'; std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n';
std::string tmp = "--benchmark_out="; std::string tmp = "--benchmark_out=";

View File

@@ -23,6 +23,7 @@ static int AddContextCases() {
{{"^\\{", MR_Default}, {{"^\\{", MR_Default},
{"\"context\":", MR_Next}, {"\"context\":", MR_Next},
{"\"date\": \"", MR_Next}, {"\"date\": \"", MR_Next},
{"\"host_name\":", MR_Next},
{"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",", {"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",",
MR_Next}, MR_Next},
{"\"num_cpus\": %int,$", MR_Next}, {"\"num_cpus\": %int,$", MR_Next},
@@ -219,6 +220,18 @@ ADD_CASES(TC_JSONOut,
{"\"run_type\": \"iteration\",$", MR_Next}}); {"\"run_type\": \"iteration\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Big Args Output ------------------------ //
// ========================================================================= //
void BM_BigArgs(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U);
ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
{"^BM_BigArgs/2147483648 %console_report$"}});
// ========================================================================= // // ========================================================================= //
// ----------------------- Testing Complexity Output ----------------------- // // ----------------------- Testing Complexity Output ----------------------- //
// ========================================================================= // // ========================================================================= //

View File

@@ -9,56 +9,56 @@ namespace {
TEST(StringUtilTest, stoul) { TEST(StringUtilTest, stoul) {
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0, benchmark::stoul("0", &pos)); EXPECT_EQ(0ul, benchmark::stoul("0", &pos));
EXPECT_EQ(1, pos); EXPECT_EQ(1ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(7, benchmark::stoul("7", &pos)); EXPECT_EQ(7ul, benchmark::stoul("7", &pos));
EXPECT_EQ(1, pos); EXPECT_EQ(1ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(135, benchmark::stoul("135", &pos)); EXPECT_EQ(135ul, benchmark::stoul("135", &pos));
EXPECT_EQ(3, pos); EXPECT_EQ(3ul, pos);
} }
#if ULONG_MAX == 0xFFFFFFFFul #if ULONG_MAX == 0xFFFFFFFFul
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos)); EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos));
EXPECT_EQ(10, pos); EXPECT_EQ(10ul, pos);
} }
#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul #elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos)); EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos));
EXPECT_EQ(20, pos); EXPECT_EQ(20ul, pos);
} }
#endif #endif
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(10, benchmark::stoul("1010", &pos, 2)); EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(520, benchmark::stoul("1010", &pos, 8)); EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(1010, benchmark::stoul("1010", &pos, 10)); EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(4112, benchmark::stoul("1010", &pos, 16)); EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0xBEEF, benchmark::stoul("BEEF", &pos, 16)); EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument);
@@ -69,42 +69,42 @@ TEST(StringUtilTest, stoi) {
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0, benchmark::stoi("0", &pos)); EXPECT_EQ(0, benchmark::stoi("0", &pos));
EXPECT_EQ(1, pos); EXPECT_EQ(1ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); EXPECT_EQ(-17, benchmark::stoi("-17", &pos));
EXPECT_EQ(3, pos); EXPECT_EQ(3ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); EXPECT_EQ(1357, benchmark::stoi("1357", &pos));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument);
@@ -115,28 +115,28 @@ TEST(StringUtilTest, stod) {
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(0.0, benchmark::stod("0", &pos)); EXPECT_EQ(0.0, benchmark::stod("0", &pos));
EXPECT_EQ(1, pos); EXPECT_EQ(1ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); EXPECT_EQ(-84.0, benchmark::stod("-84", &pos));
EXPECT_EQ(3, pos); EXPECT_EQ(3ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); EXPECT_EQ(1234.0, benchmark::stod("1234", &pos));
EXPECT_EQ(4, pos); EXPECT_EQ(4ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); EXPECT_EQ(1.5, benchmark::stod("1.5", &pos));
EXPECT_EQ(3, pos); EXPECT_EQ(3ul, pos);
} }
{ {
size_t pos = 0; size_t pos = 0;
/* Note: exactly representable as double */ /* Note: exactly representable as double */
EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos));
EXPECT_EQ(8, pos); EXPECT_EQ(8ul, pos);
} }
{ {
ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument);

View File

@@ -1,5 +1,6 @@
#!/usr/bin/env python #!/usr/bin/env python
import unittest
""" """
compare.py - versatile benchmark output compare tool compare.py - versatile benchmark output compare tool
""" """
@@ -244,9 +245,6 @@ def main():
print(ln) print(ln)
import unittest
class TestParser(unittest.TestCase): class TestParser(unittest.TestCase):
def setUp(self): def setUp(self):
self.parser = create_parser() self.parser = create_parser()
@@ -402,7 +400,7 @@ class TestParser(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
#unittest.main() # unittest.main()
main() main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4

View File

@@ -1,3 +1,4 @@
import unittest
"""report.py - Utilities for reporting statistics about benchmark results """report.py - Utilities for reporting statistics about benchmark results
""" """
import os import os
@@ -270,9 +271,6 @@ def generate_difference_report(
# Unit tests # Unit tests
import unittest
class TestGetUniqueBenchmarkNames(unittest.TestCase): class TestGetUniqueBenchmarkNames(unittest.TestCase):
def load_results(self): def load_results(self):
import json import json
@@ -290,7 +288,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
'BM_One', 'BM_One',
'BM_Two', 'BM_Two',
'short', # These two are not sorted 'short', # These two are not sorted
'medium', # These two are not sorted 'medium', # These two are not sorted
] ]
json = self.load_results() json = self.load_results()
output_lines = get_unique_benchmark_names(json) output_lines = get_unique_benchmark_names(json)
@@ -300,6 +298,7 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
for i in range(0, len(output_lines)): for i in range(0, len(output_lines)):
self.assertEqual(expect_lines[i], output_lines[i]) self.assertEqual(expect_lines[i], output_lines[i])
class TestReportDifference(unittest.TestCase): class TestReportDifference(unittest.TestCase):
def load_results(self): def load_results(self):
import json import json

View File

@@ -7,11 +7,13 @@ import subprocess
import sys import sys
# Input file type enumeration # Input file type enumeration
IT_Invalid = 0 IT_Invalid = 0
IT_JSON = 1 IT_JSON = 1
IT_Executable = 2 IT_Executable = 2
_num_magic_bytes = 2 if sys.platform.startswith('win') else 4 _num_magic_bytes = 2 if sys.platform.startswith('win') else 4
def is_executable_file(filename): def is_executable_file(filename):
""" """
Return 'True' if 'filename' names a valid file which is likely Return 'True' if 'filename' names a valid file which is likely
@@ -46,7 +48,7 @@ def is_json_file(filename):
with open(filename, 'r') as f: with open(filename, 'r') as f:
json.load(f) json.load(f)
return True return True
except: except BaseException:
pass pass
return False return False
@@ -84,6 +86,7 @@ def check_input_file(filename):
sys.exit(1) sys.exit(1)
return ftype return ftype
def find_benchmark_flag(prefix, benchmark_flags): def find_benchmark_flag(prefix, benchmark_flags):
""" """
Search the specified list of flags for a flag matching `<prefix><arg>` and Search the specified list of flags for a flag matching `<prefix><arg>` and
@@ -97,6 +100,7 @@ def find_benchmark_flag(prefix, benchmark_flags):
result = f[len(prefix):] result = f[len(prefix):]
return result return result
def remove_benchmark_flags(prefix, benchmark_flags): def remove_benchmark_flags(prefix, benchmark_flags):
""" """
Return a new list containing the specified benchmark_flags except those Return a new list containing the specified benchmark_flags except those
@@ -105,6 +109,7 @@ def remove_benchmark_flags(prefix, benchmark_flags):
assert prefix.startswith('--') and prefix.endswith('=') assert prefix.startswith('--') and prefix.endswith('=')
return [f for f in benchmark_flags if not f.startswith(prefix)] return [f for f in benchmark_flags if not f.startswith(prefix)]
def load_benchmark_results(fname): def load_benchmark_results(fname):
""" """
Read benchmark output from a file and return the JSON object. Read benchmark output from a file and return the JSON object.
@@ -129,7 +134,7 @@ def run_benchmark(exe_name, benchmark_flags):
thandle, output_name = tempfile.mkstemp() thandle, output_name = tempfile.mkstemp()
os.close(thandle) os.close(thandle)
benchmark_flags = list(benchmark_flags) + \ benchmark_flags = list(benchmark_flags) + \
['--benchmark_out=%s' % output_name] ['--benchmark_out=%s' % output_name]
cmd = [exe_name] + benchmark_flags cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % ' '.join(cmd)) print("RUNNING: %s" % ' '.join(cmd))
@@ -156,4 +161,4 @@ def run_or_load_benchmark(filename, benchmark_flags):
elif ftype == IT_Executable: elif ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags) return run_benchmark(filename, benchmark_flags)
else: else:
assert False # This branch is unreachable assert False # This branch is unreachable