Merge to upstream r348296.
Test: ./run_tests.py --bitness 32 Test: ./run_tests.py --bitness 64 Test: ./run_tests.py --bitness 64 --host Bug: None Change-Id: I79cdb1e5f146e42e383426c8e12a231b53a9459a
This commit is contained in:
109
utils/docker/build_docker_image.sh
Executable file
109
utils/docker/build_docker_image.sh
Executable file
@@ -0,0 +1,109 @@
|
||||
#!/bin/bash
|
||||
#===- libcxx/utils/docker/build_docker_image.sh ----------------------------===//
|
||||
#
|
||||
# The LLVM Compiler Infrastructure
|
||||
#
|
||||
# This file is distributed under the University of Illinois Open Source
|
||||
# License. See LICENSE.TXT for details.
|
||||
#
|
||||
#===----------------------------------------------------------------------===//
|
||||
set -e
|
||||
|
||||
IMAGE_SOURCE=""
|
||||
DOCKER_REPOSITORY=""
|
||||
DOCKER_TAG=""
|
||||
|
||||
function show_usage() {
|
||||
cat << EOF
|
||||
Usage: build_docker_image.sh [options] [-- [cmake_args]...]
|
||||
|
||||
Available options:
|
||||
General:
|
||||
-h|--help show this help message
|
||||
Docker-specific:
|
||||
-s|--source image source dir (i.e. debian8, nvidia-cuda, etc)
|
||||
-d|--docker-repository docker repository for the image
|
||||
-t|--docker-tag docker tag for the image
|
||||
|
||||
Required options: --source and --docker-repository.
|
||||
|
||||
For example, running:
|
||||
$ build_docker_image.sh -s debian9 -d mydocker/debian9-clang -t latest
|
||||
will produce two docker images:
|
||||
mydocker/debian9-clang-build:latest - an intermediate image used to compile
|
||||
clang.
|
||||
mydocker/clang-debian9:latest - a small image with preinstalled clang.
|
||||
Please note that this example produces a not very useful installation, since it
|
||||
doesn't override CMake defaults, which produces a Debug and non-boostrapped
|
||||
version of clang.
|
||||
EOF
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
show_usage
|
||||
exit 0
|
||||
;;
|
||||
-s|--source)
|
||||
shift
|
||||
IMAGE_SOURCE="$1"
|
||||
shift
|
||||
;;
|
||||
-d|--docker-repository)
|
||||
shift
|
||||
DOCKER_REPOSITORY="$1"
|
||||
shift
|
||||
;;
|
||||
-t|--docker-tag)
|
||||
shift
|
||||
DOCKER_TAG="$1"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
command -v docker >/dev/null ||
|
||||
{
|
||||
echo "Docker binary cannot be found. Please install Docker to use this script."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ "$IMAGE_SOURCE" == "" ]; then
|
||||
echo "Required argument missing: --source"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$DOCKER_REPOSITORY" == "" ]; then
|
||||
echo "Required argument missing: --docker-repository"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SOURCE_DIR=$(dirname $0)
|
||||
if [ ! -d "$SOURCE_DIR/$IMAGE_SOURCE" ]; then
|
||||
echo "No sources for '$IMAGE_SOURCE' were found in $SOURCE_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BUILD_DIR=$(mktemp -d)
|
||||
trap "rm -rf $BUILD_DIR" EXIT
|
||||
echo "Using a temporary directory for the build: $BUILD_DIR"
|
||||
|
||||
cp -r "$SOURCE_DIR/$IMAGE_SOURCE" "$BUILD_DIR/$IMAGE_SOURCE"
|
||||
cp -r "$SOURCE_DIR/scripts" "$BUILD_DIR/scripts"
|
||||
|
||||
|
||||
if [ "$DOCKER_TAG" != "" ]; then
|
||||
DOCKER_TAG=":$DOCKER_TAG"
|
||||
fi
|
||||
|
||||
echo "Building ${DOCKER_REPOSITORY}${DOCKER_TAG} from $IMAGE_SOURCE"
|
||||
docker build -t "${DOCKER_REPOSITORY}${DOCKER_TAG}" \
|
||||
-f "$BUILD_DIR/$IMAGE_SOURCE/Dockerfile" \
|
||||
"$BUILD_DIR"
|
||||
echo "Done"
|
||||
113
utils/docker/debian9/Dockerfile
Normal file
113
utils/docker/debian9/Dockerfile
Normal file
@@ -0,0 +1,113 @@
|
||||
#===- libcxx/utils/docker/debian9/Dockerfile -------------------------===//
|
||||
#
|
||||
# The LLVM Compiler Infrastructure
|
||||
#
|
||||
# This file is distributed under the University of Illinois Open Source
|
||||
# License. See LICENSE.TXT for details.
|
||||
#
|
||||
#===----------------------------------------------------------------------===//
|
||||
|
||||
# Setup the base builder image with the packages we'll need to build GCC and Clang from source.
|
||||
FROM launcher.gcr.io/google/debian9:latest as builder-base
|
||||
LABEL maintainer "libc++ Developers"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
gnupg \
|
||||
build-essential \
|
||||
wget \
|
||||
subversion \
|
||||
unzip \
|
||||
automake \
|
||||
python \
|
||||
cmake \
|
||||
ninja-build \
|
||||
curl \
|
||||
git \
|
||||
gcc-multilib \
|
||||
g++-multilib \
|
||||
libc6-dev \
|
||||
bison \
|
||||
flex \
|
||||
libtool \
|
||||
autoconf \
|
||||
binutils-dev \
|
||||
binutils-gold \
|
||||
software-properties-common && \
|
||||
update-alternatives --install "/usr/bin/ld" "ld" "/usr/bin/ld.gold" 20 && \
|
||||
update-alternatives --install "/usr/bin/ld" "ld" "/usr/bin/ld.bfd" 10
|
||||
|
||||
# Build GCC 4.9 for testing our C++11 against
|
||||
FROM builder-base as gcc-49-builder
|
||||
LABEL maintainer "libc++ Developers"
|
||||
|
||||
ADD scripts/build_gcc.sh /tmp/build_gcc.sh
|
||||
|
||||
RUN git clone --depth=1 --branch gcc-4_9_4-release git://gcc.gnu.org/git/gcc.git /tmp/gcc-4.9.4
|
||||
RUN cd /tmp/gcc-4.9.4/ && ./contrib/download_prerequisites
|
||||
RUN /tmp/build_gcc.sh --source /tmp/gcc-4.9.4 --to /opt/gcc-4.9.4
|
||||
|
||||
# Build GCC ToT for testing in all dialects.
|
||||
FROM builder-base as gcc-tot-builder
|
||||
LABEL maintainer "libc++ Developers"
|
||||
|
||||
ADD scripts/build_gcc.sh /tmp/build_gcc.sh
|
||||
|
||||
RUN git clone --depth=1 git://gcc.gnu.org/git/gcc.git /tmp/gcc-tot
|
||||
RUN cd /tmp/gcc-tot && ./contrib/download_prerequisites
|
||||
RUN /tmp/build_gcc.sh --source /tmp/gcc-tot --to /opt/gcc-tot
|
||||
|
||||
# Build LLVM 4.0 which is used to test against a "legacy" compiler.
|
||||
FROM builder-base as llvm-4-builder
|
||||
LABEL maintainer "libc++ Developers"
|
||||
|
||||
ADD scripts/checkout_git.sh /tmp/checkout_git.sh
|
||||
ADD scripts/build_install_llvm.sh /tmp/build_install_llvm.sh
|
||||
|
||||
RUN /tmp/checkout_git.sh --to /tmp/llvm-4.0 -p clang -p compiler-rt --branch release_40
|
||||
RUN /tmp/build_install_llvm.sh \
|
||||
--install /opt/llvm-4.0 \
|
||||
--source /tmp/llvm-4.0 \
|
||||
--build /tmp/build-llvm-4.0 \
|
||||
-i install-clang -i install-clang-headers \
|
||||
-i install-compiler-rt \
|
||||
-- \
|
||||
-DCMAKE_BUILD_TYPE=RELEASE \
|
||||
-DLLVM_ENABLE_ASSERTIONS=ON
|
||||
|
||||
# Stage 2. Produce a minimal release image with build results.
|
||||
FROM launcher.gcr.io/google/debian9:latest
|
||||
LABEL maintainer "libc++ Developers"
|
||||
|
||||
# Copy over the GCC and Clang installations
|
||||
COPY --from=gcc-49-builder /opt/gcc-4.9.4 /opt/gcc-4.9.4
|
||||
COPY --from=gcc-tot-builder /opt/gcc-tot /opt/gcc-tot
|
||||
COPY --from=llvm-4-builder /opt/llvm-4.0 /opt/llvm-4.0
|
||||
|
||||
RUN ln -s /opt/gcc-4.9.4/bin/gcc /usr/local/bin/gcc-4.9 && \
|
||||
ln -s /opt/gcc-4.9.4/bin/g++ /usr/local/bin/g++-4.9
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
ca-certificates \
|
||||
gnupg \
|
||||
build-essential \
|
||||
apt-transport-https \
|
||||
curl \
|
||||
software-properties-common
|
||||
|
||||
RUN apt-get install -y --no-install-recommends \
|
||||
systemd \
|
||||
sysvinit-utils \
|
||||
cmake \
|
||||
subversion \
|
||||
git \
|
||||
ninja-build \
|
||||
gcc-multilib \
|
||||
g++-multilib \
|
||||
python \
|
||||
buildbot-slave
|
||||
|
||||
ADD scripts /libcxx-scripts/
|
||||
RUN /libcxx-scripts/install_clang_packages.sh
|
||||
91
utils/docker/scripts/build_gcc.sh
Executable file
91
utils/docker/scripts/build_gcc.sh
Executable file
@@ -0,0 +1,91 @@
|
||||
#!/usr/bin/env bash
|
||||
#===- libcxx/utils/docker/scripts/build-gcc.sh ----------------------------===//
|
||||
#
|
||||
# The LLVM Compiler Infrastructure
|
||||
#
|
||||
# This file is distributed under the University of Illinois Open Source
|
||||
# License. See LICENSE.TXT for details.
|
||||
#
|
||||
#===-----------------------------------------------------------------------===//
|
||||
|
||||
set -e
|
||||
|
||||
|
||||
function show_usage() {
|
||||
cat << EOF
|
||||
Usage: build-gcc.sh [options]
|
||||
|
||||
Run autoconf with the specified arguments. Used inside docker container.
|
||||
|
||||
Available options:
|
||||
-h|--help show this help message
|
||||
--source the source path from which to run the configuration.
|
||||
--to destination directory where to install the targets.
|
||||
Required options: --to, at least one --install-target.
|
||||
|
||||
All options after '--' are passed to CMake invocation.
|
||||
EOF
|
||||
}
|
||||
|
||||
GCC_INSTALL_DIR=""
|
||||
GCC_SOURCE_DIR=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--to)
|
||||
shift
|
||||
GCC_INSTALL_DIR="$1"
|
||||
shift
|
||||
;;
|
||||
--source)
|
||||
shift
|
||||
GCC_SOURCE_DIR="$1"
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
show_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
exit 1
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$GCC_INSTALL_DIR" == "" ]; then
|
||||
echo "No install directory. Please specify the --to argument."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$GCC_SOURCE_DIR" == "" ]; then
|
||||
echo "No source directory. Please specify the --source argument."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
GCC_NAME=`basename $GCC_SOURCE_DIR`
|
||||
GCC_BUILD_DIR="/tmp/gcc-build-root/build-$GCC_NAME"
|
||||
|
||||
mkdir -p "$GCC_INSTALL_DIR"
|
||||
mkdir -p "$GCC_BUILD_DIR"
|
||||
pushd "$GCC_BUILD_DIR"
|
||||
|
||||
# Run the build as specified in the build arguments.
|
||||
echo "Running configuration"
|
||||
$GCC_SOURCE_DIR/configure --prefix=$GCC_INSTALL_DIR \
|
||||
--disable-bootstrap --disable-libgomp --disable-libitm \
|
||||
--disable-libvtv --disable-libcilkrts --disable-libmpx \
|
||||
--disable-liboffloadmic --disable-libcc1 --enable-languages=c,c++
|
||||
|
||||
NPROC=`nproc`
|
||||
echo "Running build with $NPROC threads"
|
||||
make -j$NPROC
|
||||
|
||||
echo "Installing to $GCC_INSTALL_DIR"
|
||||
make install -j$NPROC
|
||||
|
||||
popd
|
||||
|
||||
# Cleanup.
|
||||
rm -rf "$GCC_BUILD_DIR"
|
||||
|
||||
echo "Done"
|
||||
114
utils/docker/scripts/build_install_llvm.sh
Executable file
114
utils/docker/scripts/build_install_llvm.sh
Executable file
@@ -0,0 +1,114 @@
|
||||
#!/usr/bin/env bash
|
||||
#===- llvm/utils/docker/scripts/build_install_llvm.sh ---------------------===//
|
||||
#
|
||||
# The LLVM Compiler Infrastructure
|
||||
#
|
||||
# This file is distributed under the University of Illinois Open Source
|
||||
# License. See LICENSE.TXT for details.
|
||||
#
|
||||
#===-----------------------------------------------------------------------===//
|
||||
|
||||
set -e
|
||||
|
||||
function show_usage() {
|
||||
cat << EOF
|
||||
Usage: build_install_llvm.sh [options] -- [cmake-args]
|
||||
|
||||
Run cmake with the specified arguments. Used inside docker container.
|
||||
Passes additional -DCMAKE_INSTALL_PREFIX and puts the build results into
|
||||
the directory specified by --to option.
|
||||
|
||||
Available options:
|
||||
-h|--help show this help message
|
||||
-i|--install-target name of a cmake install target to build and include in
|
||||
the resulting archive. Can be specified multiple times.
|
||||
--install destination directory where to install the targets.
|
||||
--source location of the source tree.
|
||||
--build location to use as the build directory.
|
||||
Required options: --to, --source, --build, and at least one --install-target.
|
||||
|
||||
All options after '--' are passed to CMake invocation.
|
||||
EOF
|
||||
}
|
||||
|
||||
CMAKE_ARGS=""
|
||||
CMAKE_INSTALL_TARGETS=""
|
||||
CLANG_INSTALL_DIR=""
|
||||
CLANG_SOURCE_DIR=""
|
||||
CLANG_BUILD_DIR=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-i|--install-target)
|
||||
shift
|
||||
CMAKE_INSTALL_TARGETS="$CMAKE_INSTALL_TARGETS $1"
|
||||
shift
|
||||
;;
|
||||
--source)
|
||||
shift
|
||||
CLANG_SOURCE_DIR="$1"
|
||||
shift
|
||||
;;
|
||||
--build)
|
||||
shift
|
||||
CLANG_BUILD_DIR="$1"
|
||||
shift
|
||||
;;
|
||||
--install)
|
||||
shift
|
||||
CLANG_INSTALL_DIR="$1"
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
CMAKE_ARGS="$*"
|
||||
shift $#
|
||||
;;
|
||||
-h|--help)
|
||||
show_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
exit 1
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$CLANG_SOURCE_DIR" == "" ]; then
|
||||
echo "No source directory. Please pass --source."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$CLANG_BUILD_DIR" == "" ]; then
|
||||
echo "No build directory. Please pass --build"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$CMAKE_INSTALL_TARGETS" == "" ]; then
|
||||
echo "No install targets. Please pass one or more --install-target."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$CLANG_INSTALL_DIR" == "" ]; then
|
||||
echo "No install directory. Please specify the --to argument."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Building in $CLANG_BUILD_DIR"
|
||||
mkdir -p "$CLANG_BUILD_DIR"
|
||||
pushd "$CLANG_BUILD_DIR"
|
||||
|
||||
# Run the build as specified in the build arguments.
|
||||
echo "Running build"
|
||||
cmake -GNinja \
|
||||
-DCMAKE_INSTALL_PREFIX="$CLANG_INSTALL_DIR" \
|
||||
$CMAKE_ARGS \
|
||||
"$CLANG_SOURCE_DIR"
|
||||
ninja $CMAKE_INSTALL_TARGETS
|
||||
|
||||
popd
|
||||
|
||||
# Cleanup.
|
||||
rm -rf "$CLANG_BUILD_DIR"
|
||||
|
||||
echo "Done"
|
||||
130
utils/docker/scripts/checkout_git.sh
Executable file
130
utils/docker/scripts/checkout_git.sh
Executable file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env bash
|
||||
#===- llvm/utils/docker/scripts/checkout.sh ---------------------===//
|
||||
#
|
||||
# The LLVM Compiler Infrastructure
|
||||
#
|
||||
# This file is distributed under the University of Illinois Open Source
|
||||
# License. See LICENSE.TXT for details.
|
||||
#
|
||||
#===-----------------------------------------------------------------------===//
|
||||
|
||||
set -e
|
||||
|
||||
function show_usage() {
|
||||
cat << EOF
|
||||
Usage: checkout.sh [options]
|
||||
|
||||
Checkout svn sources into /tmp/clang-build/src. Used inside a docker container.
|
||||
|
||||
Available options:
|
||||
-h|--help show this help message
|
||||
-b|--branch svn branch to checkout, i.e. 'trunk',
|
||||
'branches/release_40'
|
||||
(default: 'trunk')
|
||||
-p|--llvm-project name of an svn project to checkout.
|
||||
For clang, please use 'clang', not 'cfe'.
|
||||
Project 'llvm' is always included and ignored, if
|
||||
specified.
|
||||
Can be specified multiple times.
|
||||
EOF
|
||||
}
|
||||
|
||||
LLVM_BRANCH=""
|
||||
# We always checkout llvm
|
||||
LLVM_PROJECTS="llvm"
|
||||
SOURCE_DIR=""
|
||||
|
||||
function contains_project() {
|
||||
local TARGET_PROJ="$1"
|
||||
local PROJ
|
||||
for PROJ in $LLVM_PROJECTS; do
|
||||
if [ "$PROJ" == "$TARGET_PROJ" ]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--to)
|
||||
shift
|
||||
SOURCE_DIR="$1"
|
||||
shift
|
||||
;;
|
||||
-b|--branch)
|
||||
shift
|
||||
LLVM_BRANCH="$1"
|
||||
shift
|
||||
;;
|
||||
-p|--llvm-project)
|
||||
shift
|
||||
PROJ="$1"
|
||||
shift
|
||||
|
||||
if [ "$PROJ" == "cfe" ]; then
|
||||
PROJ="clang"
|
||||
fi
|
||||
|
||||
if ! contains_project "$PROJ" ; then
|
||||
if [ "$PROJ" == "clang-tools-extra" ] && [ ! contains_project "clang" ]; then
|
||||
echo "Project 'clang-tools-extra' specified before 'clang'. Adding 'clang' to a list of projects first."
|
||||
LLVM_PROJECTS="$LLVM_PROJECTS clang"
|
||||
fi
|
||||
LLVM_PROJECTS="$LLVM_PROJECTS $PROJ"
|
||||
else
|
||||
echo "Project '$PROJ' is already enabled, ignoring extra occurrences."
|
||||
fi
|
||||
;;
|
||||
-h|--help)
|
||||
show_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
exit 1
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$SOURCE_DIR" == "" ]; then
|
||||
echo "Must specify checkout directory using --to"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$LLVM_BRANCH" == "" ]; then
|
||||
GIT_BRANCH_ARG=""
|
||||
else
|
||||
GIT_BRANCH_ARG="--branch $LLVM_BRANCH"
|
||||
fi
|
||||
|
||||
if [ "$LLVM_SVN_REV" != "" ]; then
|
||||
SVN_REV_ARG="-r$LLVM_SVN_REV"
|
||||
echo "Checking out svn revision r$LLVM_SVN_REV."
|
||||
else
|
||||
SVN_REV_ARG=""
|
||||
echo "Checking out latest svn revision."
|
||||
fi
|
||||
|
||||
# Get the sources from svn.
|
||||
echo "Checking out sources from git"
|
||||
|
||||
for LLVM_PROJECT in $LLVM_PROJECTS; do
|
||||
if [ "$LLVM_PROJECT" == "llvm" ]; then
|
||||
CHECKOUT_DIR="$SOURCE_DIR"
|
||||
elif [ "$LLVM_PROJECT" == "libcxx" ] || [ "$LLVM_PROJECT" == "libcxxabi" ] || [ "$LLVM_PROJECT" == "compiler-rt" ]; then
|
||||
CHECKOUT_DIR="$SOURCE_DIR/projects/$LLVM_PROJECT"
|
||||
elif [ "$LLVM_PROJECT" == "clang" ]; then
|
||||
CHECKOUT_DIR="$SOURCE_DIR/tools/clang"
|
||||
elif [ "$LLVM_PROJECT" == "clang-tools-extra" ]; then
|
||||
CHECKOUT_DIR="$SOURCE_DIR/tools/clang/tools/extra"
|
||||
else
|
||||
CHECKOUT_DIR="$SOURCE_DIR/$LLVM_PROJECT"
|
||||
fi
|
||||
|
||||
echo "Checking out https://git.llvm.org/git/$LLVM_PROJECT to $CHECKOUT_DIR"
|
||||
git clone --depth=1 $GIT_BRANCH_ARG \
|
||||
"https://git.llvm.org/git/$LLVM_PROJECT.git" \
|
||||
"$CHECKOUT_DIR"
|
||||
done
|
||||
|
||||
echo "Done"
|
||||
64
utils/docker/scripts/install_clang_packages.sh
Executable file
64
utils/docker/scripts/install_clang_packages.sh
Executable file
@@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env bash
|
||||
#===- libcxx/utils/docker/scripts/install_clang_package.sh -----------------===//
|
||||
#
|
||||
# The LLVM Compiler Infrastructure
|
||||
#
|
||||
# This file is distributed under the University of Illinois Open Source
|
||||
# License. See LICENSE.TXT for details.
|
||||
#
|
||||
#===-----------------------------------------------------------------------===//
|
||||
|
||||
set -e
|
||||
|
||||
function show_usage() {
|
||||
cat << EOF
|
||||
Usage: install_clang_package.sh [options]
|
||||
|
||||
Install
|
||||
Available options:
|
||||
-h|--help show this help message
|
||||
--version the numeric version of the package to use.
|
||||
EOF
|
||||
}
|
||||
|
||||
VERSION=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--version)
|
||||
shift
|
||||
VERSION="$1"
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
show_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
exit 1
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
|
||||
curl -fsSL https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
||||
add-apt-repository -s "deb http://apt.llvm.org/$(lsb_release -cs)/ llvm-toolchain-$(lsb_release -cs) main"
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends clang
|
||||
|
||||
echo "Testing clang version..."
|
||||
clang --version
|
||||
|
||||
echo "Testing clang++ version..."
|
||||
clang++ --version
|
||||
|
||||
# Figure out the libc++ and libc++abi package versions that we want.
|
||||
if [ "$VERSION" == "" ]; then
|
||||
VERSION="$(apt-cache search 'libc\+\+-[0-9]-dev' | awk '{print $1}' | awk -F- '{print $2}')"
|
||||
echo "Installing version '$VERSION'"
|
||||
fi
|
||||
|
||||
apt-get install -y --no-install-recommends "libc++-$VERSION-dev" "libc++abi-$VERSION-dev"
|
||||
|
||||
echo "Done"
|
||||
55
utils/docker/scripts/run_buildbot.sh
Executable file
55
utils/docker/scripts/run_buildbot.sh
Executable file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env bash
|
||||
set -x
|
||||
|
||||
BOT_DIR=/b
|
||||
BOT_NAME=$1
|
||||
BOT_PASS=$2
|
||||
|
||||
mkdir -p $BOT_DIR
|
||||
|
||||
#curl "https://repo.stackdriver.com/stack-install.sh" | bash -s -- --write-gcm
|
||||
|
||||
apt-get update -y
|
||||
apt-get upgrade -y
|
||||
|
||||
systemctl set-property buildslave.service TasksMax=100000
|
||||
|
||||
buildslave stop $BOT_DIR
|
||||
|
||||
chown buildbot:buildbot $BOT_DIR
|
||||
|
||||
echo "Connecting as $BOT_NAME"
|
||||
buildslave create-slave --allow-shutdown=signal $BOT_DIR lab.llvm.org:9990 $BOT_NAME $BOT_PASS
|
||||
|
||||
echo "Eric Fiselier <ericwf@google.com>" > $BOT_DIR/info/admin
|
||||
|
||||
{
|
||||
uname -a | head -n1
|
||||
cmake --version | head -n1
|
||||
g++ --version | head -n1
|
||||
ld --version | head -n1
|
||||
date
|
||||
lscpu
|
||||
} > $BOT_DIR/info/host
|
||||
|
||||
echo "SLAVE_RUNNER=/usr/bin/buildslave
|
||||
SLAVE_ENABLED[1]=\"1\"
|
||||
SLAVE_NAME[1]=\"buildslave1\"
|
||||
SLAVE_USER[1]=\"buildbot\"
|
||||
SLAVE_BASEDIR[1]=\"$BOT_DIR\"
|
||||
SLAVE_OPTIONS[1]=\"\"
|
||||
SLAVE_PREFIXCMD[1]=\"\"" > /etc/default/buildslave
|
||||
|
||||
chown -R buildbot:buildbot $BOT_DIR
|
||||
systemctl daemon-reload
|
||||
service buildslave restart
|
||||
|
||||
sleep 30
|
||||
cat $BOT_DIR/twistd.log
|
||||
grep "slave is ready" $BOT_DIR/twistd.log || shutdown now
|
||||
|
||||
# GCE can restart instance after 24h in the middle of the build.
|
||||
# Gracefully restart before that happen.
|
||||
sleep 72000
|
||||
while pkill -SIGHUP buildslave; do sleep 5; done;
|
||||
shutdown now
|
||||
5
utils/google-benchmark/.clang-format
Normal file
5
utils/google-benchmark/.clang-format
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
Language: Cpp
|
||||
BasedOnStyle: Google
|
||||
...
|
||||
|
||||
12
utils/google-benchmark/.gitignore
vendored
12
utils/google-benchmark/.gitignore
vendored
@@ -6,6 +6,7 @@
|
||||
*.dylib
|
||||
*.cmake
|
||||
!/cmake/*.cmake
|
||||
!/test/AssemblyTests.cmake
|
||||
*~
|
||||
*.pyc
|
||||
__pycache__
|
||||
@@ -41,6 +42,17 @@ build.ninja
|
||||
install_manifest.txt
|
||||
rules.ninja
|
||||
|
||||
# bazel output symlinks.
|
||||
bazel-*
|
||||
|
||||
# out-of-source build top-level folders.
|
||||
build/
|
||||
_build/
|
||||
build*/
|
||||
|
||||
# in-source dependencies
|
||||
/googletest/
|
||||
|
||||
# Visual Studio 2015/2017 cache/options directory
|
||||
.vs/
|
||||
CMakeSettings.json
|
||||
|
||||
28
utils/google-benchmark/.travis-libcxx-setup.sh
Normal file
28
utils/google-benchmark/.travis-libcxx-setup.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Install a newer CMake version
|
||||
curl -sSL https://cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.sh -o install-cmake.sh
|
||||
chmod +x install-cmake.sh
|
||||
sudo ./install-cmake.sh --prefix=/usr/local --skip-license
|
||||
|
||||
# Checkout LLVM sources
|
||||
git clone --depth=1 https://github.com/llvm-mirror/llvm.git llvm-source
|
||||
git clone --depth=1 https://github.com/llvm-mirror/libcxx.git llvm-source/projects/libcxx
|
||||
git clone --depth=1 https://github.com/llvm-mirror/libcxxabi.git llvm-source/projects/libcxxabi
|
||||
|
||||
# Setup libc++ options
|
||||
if [ -z "$BUILD_32_BITS" ]; then
|
||||
export BUILD_32_BITS=OFF && echo disabling 32 bit build
|
||||
fi
|
||||
|
||||
# Build and install libc++ (Use unstable ABI for better sanitizer coverage)
|
||||
mkdir llvm-build && cd llvm-build
|
||||
cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} \
|
||||
-DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr \
|
||||
-DLIBCXX_ABI_UNSTABLE=ON \
|
||||
-DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \
|
||||
-DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \
|
||||
../llvm-source
|
||||
make cxx -j2
|
||||
sudo make install-cxxabi install-cxx
|
||||
cd ../
|
||||
199
utils/google-benchmark/.travis.yml
Normal file
199
utils/google-benchmark/.travis.yml
Normal file
@@ -0,0 +1,199 @@
|
||||
sudo: required
|
||||
dist: trusty
|
||||
language: cpp
|
||||
|
||||
env:
|
||||
global:
|
||||
- /usr/local/bin:$PATH
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- compiler: gcc
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- lcov
|
||||
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage
|
||||
- compiler: gcc
|
||||
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug
|
||||
- compiler: gcc
|
||||
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release
|
||||
- compiler: gcc
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- g++-multilib
|
||||
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug BUILD_32_BITS=ON
|
||||
- compiler: gcc
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- g++-multilib
|
||||
env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release BUILD_32_BITS=ON
|
||||
- compiler: gcc
|
||||
env:
|
||||
- INSTALL_GCC6_FROM_PPA=1
|
||||
- COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug
|
||||
- ENABLE_SANITIZER=1
|
||||
- EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold"
|
||||
- compiler: clang
|
||||
env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug
|
||||
- compiler: clang
|
||||
env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Release
|
||||
# Clang w/ libc++
|
||||
- compiler: clang
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
clang-3.8
|
||||
env:
|
||||
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
|
||||
- LIBCXX_BUILD=1
|
||||
- EXTRA_FLAGS="-stdlib=libc++"
|
||||
- compiler: clang
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
clang-3.8
|
||||
env:
|
||||
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release
|
||||
- LIBCXX_BUILD=1
|
||||
- EXTRA_FLAGS="-stdlib=libc++"
|
||||
# Clang w/ 32bit libc++
|
||||
- compiler: clang
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- clang-3.8
|
||||
- g++-multilib
|
||||
env:
|
||||
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
|
||||
- LIBCXX_BUILD=1
|
||||
- BUILD_32_BITS=ON
|
||||
- EXTRA_FLAGS="-stdlib=libc++ -m32"
|
||||
# Clang w/ 32bit libc++
|
||||
- compiler: clang
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- clang-3.8
|
||||
- g++-multilib
|
||||
env:
|
||||
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release
|
||||
- LIBCXX_BUILD=1
|
||||
- BUILD_32_BITS=ON
|
||||
- EXTRA_FLAGS="-stdlib=libc++ -m32"
|
||||
# Clang w/ libc++, ASAN, UBSAN
|
||||
- compiler: clang
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
clang-3.8
|
||||
env:
|
||||
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
|
||||
- LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address"
|
||||
- ENABLE_SANITIZER=1
|
||||
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all"
|
||||
- UBSAN_OPTIONS=print_stacktrace=1
|
||||
# Clang w/ libc++ and MSAN
|
||||
- compiler: clang
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
clang-3.8
|
||||
env:
|
||||
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug
|
||||
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins
|
||||
- ENABLE_SANITIZER=1
|
||||
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins"
|
||||
# Clang w/ libc++ and MSAN
|
||||
- compiler: clang
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
clang-3.8
|
||||
env:
|
||||
- COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo
|
||||
- LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread
|
||||
- ENABLE_SANITIZER=1
|
||||
- EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all"
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
compiler: clang
|
||||
env:
|
||||
- COMPILER=clang++ BUILD_TYPE=Debug
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
compiler: clang
|
||||
env:
|
||||
- COMPILER=clang++ BUILD_TYPE=Release
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
compiler: clang
|
||||
env:
|
||||
- COMPILER=clang++ BUILD_TYPE=Release BUILD_32_BITS=ON
|
||||
- os: osx
|
||||
osx_image: xcode8.3
|
||||
compiler: gcc
|
||||
env:
|
||||
- COMPILER=g++-7 C_COMPILER=gcc-7 BUILD_TYPE=Debug
|
||||
|
||||
before_script:
|
||||
- if [ -n "${LIBCXX_BUILD}" ]; then
|
||||
source .travis-libcxx-setup.sh;
|
||||
fi
|
||||
- if [ -n "${ENABLE_SANITIZER}" ]; then
|
||||
export EXTRA_OPTIONS="-DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF";
|
||||
else
|
||||
export EXTRA_OPTIONS="";
|
||||
fi
|
||||
- mkdir -p build && cd build
|
||||
|
||||
before_install:
|
||||
- if [ -z "$BUILD_32_BITS" ]; then
|
||||
export BUILD_32_BITS=OFF && echo disabling 32 bit build;
|
||||
fi
|
||||
- if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then
|
||||
sudo add-apt-repository -y "ppa:ubuntu-toolchain-r/test";
|
||||
sudo apt-get update --option Acquire::Retries=100 --option Acquire::http::Timeout="60";
|
||||
fi
|
||||
|
||||
install:
|
||||
- if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then
|
||||
travis_wait sudo -E apt-get -yq --no-install-suggests --no-install-recommends install g++-6;
|
||||
fi
|
||||
- if [ "${TRAVIS_OS_NAME}" == "linux" -a "${BUILD_32_BITS}" == "OFF" ]; then
|
||||
travis_wait sudo -E apt-get -y --no-install-suggests --no-install-recommends install llvm-3.9-tools;
|
||||
sudo cp /usr/lib/llvm-3.9/bin/FileCheck /usr/local/bin/;
|
||||
fi
|
||||
- if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
|
||||
PATH=~/.local/bin:${PATH};
|
||||
pip install --user --upgrade pip;
|
||||
travis_wait pip install --user cpp-coveralls;
|
||||
fi
|
||||
- if [ "${C_COMPILER}" == "gcc-7" -a "${TRAVIS_OS_NAME}" == "osx" ]; then
|
||||
rm -f /usr/local/include/c++;
|
||||
brew update;
|
||||
travis_wait brew install gcc@7;
|
||||
fi
|
||||
- if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
|
||||
sudo apt-get update -qq;
|
||||
sudo apt-get install -qq unzip;
|
||||
wget https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-linux-x86_64.sh --output-document bazel-installer.sh;
|
||||
travis_wait sudo bash bazel-installer.sh;
|
||||
fi
|
||||
- if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
|
||||
curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-darwin-x86_64.sh;
|
||||
travis_wait sudo bash bazel-installer.sh;
|
||||
fi
|
||||
|
||||
script:
|
||||
- cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS}" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ${EXTRA_OPTIONS} ..
|
||||
- make
|
||||
- ctest -C ${BUILD_TYPE} --output-on-failure
|
||||
- bazel test -c dbg --define google_benchmark.have_regex=posix --announce_rc --verbose_failures --test_output=errors --keep_going //test/...
|
||||
|
||||
after_success:
|
||||
- if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then
|
||||
coveralls --include src --include include --gcov-options '\-lp' --root .. --build-root .;
|
||||
fi
|
||||
115
utils/google-benchmark/.ycm_extra_conf.py
Normal file
115
utils/google-benchmark/.ycm_extra_conf.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import os
|
||||
import ycm_core
|
||||
|
||||
# These are the compilation flags that will be used in case there's no
|
||||
# compilation database set (by default, one is not set).
|
||||
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
|
||||
flags = [
|
||||
'-Wall',
|
||||
'-Werror',
|
||||
'-pedantic-errors',
|
||||
'-std=c++0x',
|
||||
'-fno-strict-aliasing',
|
||||
'-O3',
|
||||
'-DNDEBUG',
|
||||
# ...and the same thing goes for the magic -x option which specifies the
|
||||
# language that the files to be compiled are written in. This is mostly
|
||||
# relevant for c++ headers.
|
||||
# For a C project, you would set this to 'c' instead of 'c++'.
|
||||
'-x', 'c++',
|
||||
'-I', 'include',
|
||||
'-isystem', '/usr/include',
|
||||
'-isystem', '/usr/local/include',
|
||||
]
|
||||
|
||||
|
||||
# Set this to the absolute path to the folder (NOT the file!) containing the
|
||||
# compile_commands.json file to use that instead of 'flags'. See here for
|
||||
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
|
||||
#
|
||||
# Most projects will NOT need to set this to anything; you can just change the
|
||||
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
|
||||
compilation_database_folder = ''
|
||||
|
||||
if os.path.exists( compilation_database_folder ):
|
||||
database = ycm_core.CompilationDatabase( compilation_database_folder )
|
||||
else:
|
||||
database = None
|
||||
|
||||
SOURCE_EXTENSIONS = [ '.cc' ]
|
||||
|
||||
def DirectoryOfThisScript():
|
||||
return os.path.dirname( os.path.abspath( __file__ ) )
|
||||
|
||||
|
||||
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
|
||||
if not working_directory:
|
||||
return list( flags )
|
||||
new_flags = []
|
||||
make_next_absolute = False
|
||||
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
|
||||
for flag in flags:
|
||||
new_flag = flag
|
||||
|
||||
if make_next_absolute:
|
||||
make_next_absolute = False
|
||||
if not flag.startswith( '/' ):
|
||||
new_flag = os.path.join( working_directory, flag )
|
||||
|
||||
for path_flag in path_flags:
|
||||
if flag == path_flag:
|
||||
make_next_absolute = True
|
||||
break
|
||||
|
||||
if flag.startswith( path_flag ):
|
||||
path = flag[ len( path_flag ): ]
|
||||
new_flag = path_flag + os.path.join( working_directory, path )
|
||||
break
|
||||
|
||||
if new_flag:
|
||||
new_flags.append( new_flag )
|
||||
return new_flags
|
||||
|
||||
|
||||
def IsHeaderFile( filename ):
|
||||
extension = os.path.splitext( filename )[ 1 ]
|
||||
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
|
||||
|
||||
|
||||
def GetCompilationInfoForFile( filename ):
|
||||
# The compilation_commands.json file generated by CMake does not have entries
|
||||
# for header files. So we do our best by asking the db for flags for a
|
||||
# corresponding source file, if any. If one exists, the flags for that file
|
||||
# should be good enough.
|
||||
if IsHeaderFile( filename ):
|
||||
basename = os.path.splitext( filename )[ 0 ]
|
||||
for extension in SOURCE_EXTENSIONS:
|
||||
replacement_file = basename + extension
|
||||
if os.path.exists( replacement_file ):
|
||||
compilation_info = database.GetCompilationInfoForFile(
|
||||
replacement_file )
|
||||
if compilation_info.compiler_flags_:
|
||||
return compilation_info
|
||||
return None
|
||||
return database.GetCompilationInfoForFile( filename )
|
||||
|
||||
|
||||
def FlagsForFile( filename, **kwargs ):
|
||||
if database:
|
||||
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
|
||||
# python list, but a "list-like" StringVec object
|
||||
compilation_info = GetCompilationInfoForFile( filename )
|
||||
if not compilation_info:
|
||||
return None
|
||||
|
||||
final_flags = MakeRelativePathsInFlagsAbsolute(
|
||||
compilation_info.compiler_flags_,
|
||||
compilation_info.compiler_working_dir_ )
|
||||
else:
|
||||
relative_to = DirectoryOfThisScript()
|
||||
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
|
||||
|
||||
return {
|
||||
'flags': final_flags,
|
||||
'do_cache': True
|
||||
}
|
||||
@@ -13,11 +13,13 @@ Arne Beer <arne@twobeer.de>
|
||||
Carto
|
||||
Christopher Seymour <chris.j.seymour@hotmail.com>
|
||||
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
|
||||
Deniz Evrenci <denizevrenci@gmail.com>
|
||||
Dirac Research
|
||||
Dominik Czarnota <dominik.b.czarnota@gmail.com>
|
||||
Eric Fiselier <eric@efcs.ca>
|
||||
Eugene Zhuk <eugene.zhuk@gmail.com>
|
||||
Evgeny Safronov <division494@gmail.com>
|
||||
Federico Ficarelli <federico.ficarelli@gmail.com>
|
||||
Felix Homann <linuxaudio@showlabor.de>
|
||||
Google Inc.
|
||||
International Business Machines Corporation
|
||||
@@ -31,13 +33,16 @@ Kishan Kumar <kumar.kishan@outlook.com>
|
||||
Lei Xu <eddyxu@gmail.com>
|
||||
Matt Clarkson <mattyclarkson@gmail.com>
|
||||
Maxim Vafin <maxvafin@gmail.com>
|
||||
MongoDB Inc.
|
||||
Nick Hutchinson <nshutchinson@gmail.com>
|
||||
Oleksandr Sochka <sasha.sochka@gmail.com>
|
||||
Ori Livneh <ori.livneh@gmail.com>
|
||||
Paul Redmond <paul.redmond@gmail.com>
|
||||
Radoslav Yovchev <radoslav.tm@gmail.com>
|
||||
Roman Lebedev <lebedev.ri@gmail.com>
|
||||
Shuo Chen <chenshuo@chenshuo.com>
|
||||
Steinar H. Gunderson <sgunderson@bigfoot.com>
|
||||
Stripe, Inc.
|
||||
Yixuan Qiu <yixuanq@gmail.com>
|
||||
Yusuke Suzuki <utatane.tea@gmail.com>
|
||||
Zbigniew Skowron <zbychs@gmail.com>
|
||||
|
||||
42
utils/google-benchmark/BUILD.bazel
Normal file
42
utils/google-benchmark/BUILD.bazel
Normal file
@@ -0,0 +1,42 @@
|
||||
licenses(["notice"])
|
||||
|
||||
config_setting(
|
||||
name = "windows",
|
||||
values = {
|
||||
"cpu": "x64_windows",
|
||||
},
|
||||
visibility = [":__subpackages__"],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "benchmark",
|
||||
srcs = glob(
|
||||
[
|
||||
"src/*.cc",
|
||||
"src/*.h",
|
||||
],
|
||||
exclude = ["src/benchmark_main.cc"],
|
||||
),
|
||||
hdrs = ["include/benchmark/benchmark.h"],
|
||||
linkopts = select({
|
||||
":windows": ["-DEFAULTLIB:shlwapi.lib"],
|
||||
"//conditions:default": ["-pthread"],
|
||||
}),
|
||||
strip_include_prefix = "include",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "benchmark_main",
|
||||
srcs = ["src/benchmark_main.cc"],
|
||||
hdrs = ["include/benchmark/benchmark.h"],
|
||||
strip_include_prefix = "include",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [":benchmark"],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "benchmark_internal_headers",
|
||||
hdrs = glob(["src/*.h"]),
|
||||
visibility = ["//test:__pkg__"],
|
||||
)
|
||||
@@ -16,7 +16,11 @@ option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON)
|
||||
option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
|
||||
option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
|
||||
option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF)
|
||||
option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF)
|
||||
if(NOT MSVC)
|
||||
option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF)
|
||||
else()
|
||||
set(BENCHMARK_BUILD_32_BITS OFF CACHE BOOL "Build a 32 bit version of the library - unsupported when using MSVC)" FORCE)
|
||||
endif()
|
||||
option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON)
|
||||
|
||||
# Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which
|
||||
@@ -27,17 +31,55 @@ option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree buildi
|
||||
# in cases where it is not possible to build or find a valid version of gtest.
|
||||
option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON)
|
||||
|
||||
set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF)
|
||||
function(should_enable_assembly_tests)
|
||||
if(CMAKE_BUILD_TYPE)
|
||||
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
|
||||
if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
|
||||
# FIXME: The --coverage flag needs to be removed when building assembly
|
||||
# tests for this to work.
|
||||
return()
|
||||
endif()
|
||||
endif()
|
||||
if (MSVC)
|
||||
return()
|
||||
elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
|
||||
return()
|
||||
elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
# FIXME: Make these work on 32 bit builds
|
||||
return()
|
||||
elseif(BENCHMARK_BUILD_32_BITS)
|
||||
# FIXME: Make these work on 32 bit builds
|
||||
return()
|
||||
endif()
|
||||
find_program(LLVM_FILECHECK_EXE FileCheck)
|
||||
if (LLVM_FILECHECK_EXE)
|
||||
set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm filecheck" FORCE)
|
||||
message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}")
|
||||
else()
|
||||
message(STATUS "Failed to find LLVM FileCheck")
|
||||
return()
|
||||
endif()
|
||||
set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE)
|
||||
endfunction()
|
||||
should_enable_assembly_tests()
|
||||
|
||||
# This option disables the building and running of the assembly verification tests
|
||||
option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the assembly tests"
|
||||
${ENABLE_ASSEMBLY_TESTS_DEFAULT})
|
||||
|
||||
# Make sure we can import out CMake functions
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
|
||||
|
||||
# Read the git tags to determine the project version
|
||||
include(GetGitVersion)
|
||||
get_git_version(GIT_VERSION)
|
||||
|
||||
# Tell the user what versions we are using
|
||||
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+" VERSION ${GIT_VERSION})
|
||||
message("-- Version: ${VERSION}")
|
||||
message(STATUS "Version: ${VERSION}")
|
||||
|
||||
# The version of the libraries
|
||||
set(GENERIC_LIB_VERSION ${VERSION})
|
||||
@@ -52,7 +94,7 @@ if (BENCHMARK_BUILD_32_BITS)
|
||||
add_required_cxx_compiler_flag(-m32)
|
||||
endif()
|
||||
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
|
||||
if (MSVC)
|
||||
# Turn compiler warnings up to 11
|
||||
string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
|
||||
@@ -61,6 +103,7 @@ if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
|
||||
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
|
||||
add_cxx_compiler_flag(-EHs-)
|
||||
add_cxx_compiler_flag(-EHa-)
|
||||
add_definitions(-D_HAS_EXCEPTIONS=0)
|
||||
endif()
|
||||
# Link time optimisation
|
||||
if (BENCHMARK_ENABLE_LTO)
|
||||
@@ -92,7 +135,6 @@ else()
|
||||
|
||||
# Turn compiler warnings up to 11
|
||||
add_cxx_compiler_flag(-Wall)
|
||||
|
||||
add_cxx_compiler_flag(-Wextra)
|
||||
add_cxx_compiler_flag(-Wshadow)
|
||||
add_cxx_compiler_flag(-Werror RELEASE)
|
||||
@@ -101,8 +143,20 @@ else()
|
||||
add_cxx_compiler_flag(-pedantic)
|
||||
add_cxx_compiler_flag(-pedantic-errors)
|
||||
add_cxx_compiler_flag(-Wshorten-64-to-32)
|
||||
add_cxx_compiler_flag(-Wfloat-equal)
|
||||
add_cxx_compiler_flag(-fstrict-aliasing)
|
||||
# Disable warnings regarding deprecated parts of the library while building
|
||||
# and testing those parts of the library.
|
||||
add_cxx_compiler_flag(-Wno-deprecated-declarations)
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
|
||||
# Intel silently ignores '-Wno-deprecated-declarations',
|
||||
# warning no. 1786 must be explicitly disabled.
|
||||
# See #631 for rationale.
|
||||
add_cxx_compiler_flag(-wd1786)
|
||||
endif()
|
||||
# Disable deprecation warnings for release builds (when -Werror is enabled).
|
||||
add_cxx_compiler_flag(-Wno-deprecated RELEASE)
|
||||
add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO)
|
||||
add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL)
|
||||
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
|
||||
add_cxx_compiler_flag(-fno-exceptions)
|
||||
endif()
|
||||
@@ -114,7 +168,7 @@ else()
|
||||
endif()
|
||||
# ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden
|
||||
# (because of deprecated overload)
|
||||
add_cxx_compiler_flag(-wd654)
|
||||
add_cxx_compiler_flag(-wd654)
|
||||
add_cxx_compiler_flag(-Wthread-safety)
|
||||
if (HAVE_CXX_FLAG_WTHREAD_SAFETY)
|
||||
cxx_feature_check(THREAD_SAFETY_ATTRIBUTES)
|
||||
@@ -140,7 +194,7 @@ else()
|
||||
if (GCC_RANLIB)
|
||||
set(CMAKE_RANLIB ${GCC_RANLIB})
|
||||
endif()
|
||||
elseif("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang")
|
||||
elseif("${CMAKE_C_COMPILER_ID}" MATCHES "Clang")
|
||||
include(llvm-toolchain)
|
||||
endif()
|
||||
endif()
|
||||
@@ -165,12 +219,12 @@ else()
|
||||
endif()
|
||||
|
||||
if (BENCHMARK_USE_LIBCXX)
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
|
||||
add_cxx_compiler_flag(-stdlib=libc++)
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR
|
||||
"${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
|
||||
add_cxx_compiler_flag(-nostdinc++)
|
||||
message("libc++ header path must be manually specified using CMAKE_CXX_FLAGS")
|
||||
message(WARNING "libc++ header path must be manually specified using CMAKE_CXX_FLAGS")
|
||||
# Adding -nodefaultlibs directly to CMAKE_<TYPE>_LINKER_FLAGS will break
|
||||
# configuration checks such as 'find_package(Threads)'
|
||||
list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs)
|
||||
@@ -178,7 +232,7 @@ if (BENCHMARK_USE_LIBCXX)
|
||||
# linker flags appear before all linker inputs and -lc++ must appear after.
|
||||
list(APPEND BENCHMARK_CXX_LIBRARIES c++)
|
||||
else()
|
||||
message(FATAL "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler")
|
||||
message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler")
|
||||
endif()
|
||||
endif(BENCHMARK_USE_LIBCXX)
|
||||
|
||||
|
||||
@@ -28,16 +28,19 @@ Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
|
||||
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
|
||||
Christopher Seymour <chris.j.seymour@hotmail.com>
|
||||
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
|
||||
Deniz Evrenci <denizevrenci@gmail.com>
|
||||
Dominic Hamon <dma@stripysock.com> <dominic@google.com>
|
||||
Dominik Czarnota <dominik.b.czarnota@gmail.com>
|
||||
Eric Fiselier <eric@efcs.ca>
|
||||
Eugene Zhuk <eugene.zhuk@gmail.com>
|
||||
Evgeny Safronov <division494@gmail.com>
|
||||
Federico Ficarelli <federico.ficarelli@gmail.com>
|
||||
Felix Homann <linuxaudio@showlabor.de>
|
||||
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
|
||||
Jern-Kuan Leong <jernkuan@gmail.com>
|
||||
JianXiong Zhou <zhoujianxiong2@gmail.com>
|
||||
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
|
||||
John Millikin <jmillikin@stripe.com>
|
||||
Jussi Knuuttila <jussi.knuuttila@gmail.com>
|
||||
Kai Wolf <kai.wolf@gmail.com>
|
||||
Kishan Kumar <kumar.kishan@outlook.com>
|
||||
@@ -47,12 +50,14 @@ Matt Clarkson <mattyclarkson@gmail.com>
|
||||
Maxim Vafin <maxvafin@gmail.com>
|
||||
Nick Hutchinson <nshutchinson@gmail.com>
|
||||
Oleksandr Sochka <sasha.sochka@gmail.com>
|
||||
Ori Livneh <ori.livneh@gmail.com>
|
||||
Pascal Leroy <phl@google.com>
|
||||
Paul Redmond <paul.redmond@gmail.com>
|
||||
Pierre Phaneuf <pphaneuf@google.com>
|
||||
Radoslav Yovchev <radoslav.tm@gmail.com>
|
||||
Raul Marin <rmrodriguez@cartodb.com>
|
||||
Ray Glover <ray.glover@uk.ibm.com>
|
||||
Robert Guo <robert.guo@mongodb.com>
|
||||
Roman Lebedev <lebedev.ri@gmail.com>
|
||||
Shuo Chen <chenshuo@chenshuo.com>
|
||||
Tobias Ulvgård <tobias.ulvgard@dirac.se>
|
||||
|
||||
@@ -6,14 +6,14 @@
|
||||
|
||||
A library to support the benchmarking of functions, similar to unit-tests.
|
||||
|
||||
Discussion group: https://groups.google.com/d/forum/benchmark-discuss
|
||||
[Discussion group](https://groups.google.com/d/forum/benchmark-discuss)
|
||||
|
||||
IRC channel: https://freenode.net #googlebenchmark
|
||||
|
||||
[Known issues and common problems](#known-issues)
|
||||
IRC channel: [freenode](https://freenode.net) #googlebenchmark
|
||||
|
||||
[Additional Tooling Documentation](docs/tools.md)
|
||||
|
||||
[Assembly Testing Documentation](docs/AssemblyTests.md)
|
||||
|
||||
|
||||
## Building
|
||||
|
||||
@@ -21,7 +21,7 @@ The basic steps for configuring and building the library look like this:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/google/benchmark.git
|
||||
# Benchmark requires GTest as a dependency. Add the source tree as a subdirectory.
|
||||
# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory.
|
||||
$ git clone https://github.com/google/googletest.git benchmark/googletest
|
||||
$ mkdir build && cd build
|
||||
$ cmake -G <generator> [options] ../benchmark
|
||||
@@ -29,15 +29,13 @@ $ cmake -G <generator> [options] ../benchmark
|
||||
$ make
|
||||
```
|
||||
|
||||
Note that Google Benchmark requires GTest to build and run the tests. This
|
||||
dependency can be provided three ways:
|
||||
Note that Google Benchmark requires Google Test to build and run the tests. This
|
||||
dependency can be provided two ways:
|
||||
|
||||
* Checkout the GTest sources into `benchmark/googletest`.
|
||||
* Checkout the Google Test sources into `benchmark/googletest` as above.
|
||||
* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during
|
||||
configuration, the library will automatically download and build any required
|
||||
dependencies.
|
||||
* Otherwise, if nothing is done, CMake will use `find_package(GTest REQUIRED)`
|
||||
to resolve the required GTest dependency.
|
||||
|
||||
If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF`
|
||||
to `CMAKE_ARGS`.
|
||||
@@ -47,11 +45,10 @@ to `CMAKE_ARGS`.
|
||||
|
||||
For Ubuntu and Debian Based System
|
||||
|
||||
First make sure you have git and cmake installed (If not please install it)
|
||||
First make sure you have git and cmake installed (If not please install them)
|
||||
|
||||
```
|
||||
sudo apt-get install git
|
||||
sudo apt-get install cmake
|
||||
sudo apt-get install git cmake
|
||||
```
|
||||
|
||||
Now, let's clone the repository and build it
|
||||
@@ -59,21 +56,20 @@ Now, let's clone the repository and build it
|
||||
```
|
||||
git clone https://github.com/google/benchmark.git
|
||||
cd benchmark
|
||||
# If you want to build tests and don't use BENCHMARK_DOWNLOAD_DEPENDENCIES, then
|
||||
# git clone https://github.com/google/googletest.git
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DCMAKE_BUILD_TYPE=RELEASE
|
||||
make
|
||||
```
|
||||
|
||||
We need to install the library globally now
|
||||
If you need to install the library globally
|
||||
|
||||
```
|
||||
sudo make install
|
||||
```
|
||||
|
||||
Now you have google/benchmark installed in your machine
|
||||
Note: Don't forget to link to pthread library while building
|
||||
|
||||
## Stable and Experimental Library Versions
|
||||
|
||||
The main branch contains the latest stable version of the benchmarking library;
|
||||
@@ -86,10 +82,16 @@ to use, test, and provide feedback on the new features are encouraged to try
|
||||
this branch. However, this branch provides no stability guarantees and reserves
|
||||
the right to change and break the API at any time.
|
||||
|
||||
## Further knowledge
|
||||
|
||||
It may help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/googletest/docs/primer.md)
|
||||
as some of the structural aspects of the APIs are similar.
|
||||
|
||||
## Example usage
|
||||
### Basic usage
|
||||
Define a function that executes the code to be measured.
|
||||
Define a function that executes the code to be measured, register it as a
|
||||
benchmark function using the `BENCHMARK` macro, and ensure an appropriate `main`
|
||||
function is available:
|
||||
|
||||
```c++
|
||||
#include <benchmark/benchmark.h>
|
||||
@@ -112,9 +114,28 @@ BENCHMARK(BM_StringCopy);
|
||||
BENCHMARK_MAIN();
|
||||
```
|
||||
|
||||
Don't forget to inform your linker to add benchmark library e.g. through `-lbenchmark` compilation flag.
|
||||
Don't forget to inform your linker to add benchmark library e.g. through
|
||||
`-lbenchmark` compilation flag. Alternatively, you may leave out the
|
||||
`BENCHMARK_MAIN();` at the end of the source file and link against
|
||||
`-lbenchmark_main` to get the same default behavior.
|
||||
|
||||
The benchmark library will reporting the timing for the code within the `for(...)` loop.
|
||||
The benchmark library will measure and report the timing for code within the
|
||||
`for(...)` loop.
|
||||
|
||||
#### Platform-specific libraries
|
||||
When the library is built using GCC it is necessary to link with the pthread
|
||||
library due to how GCC implements `std::thread`. Failing to link to pthread will
|
||||
lead to runtime exceptions (unless you're using libc++), not linker errors. See
|
||||
[issue #67](https://github.com/google/benchmark/issues/67) for more details. You
|
||||
can link to pthread by adding `-pthread` to your linker command. Note, you can
|
||||
also use `-lpthread`, but there are potential issues with ordering of command
|
||||
line parameters if you use that.
|
||||
|
||||
If you're running benchmarks on Windows, the shlwapi library (`-lshlwapi`) is
|
||||
also required.
|
||||
|
||||
If you're running benchmarks on solaris, you'll want the kstat library linked in
|
||||
too (`-lkstat`).
|
||||
|
||||
### Passing arguments
|
||||
Sometimes a family of benchmarks can be implemented with just one routine that
|
||||
@@ -513,15 +534,7 @@ order to manually set the time unit, you can specify it manually:
|
||||
BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
|
||||
```
|
||||
|
||||
## Controlling number of iterations
|
||||
In all cases, the number of iterations for which the benchmark is run is
|
||||
governed by the amount of time the benchmark takes. Concretely, the number of
|
||||
iterations is at least one, not more than 1e9, until CPU time is greater than
|
||||
the minimum time, or the wallclock time is 5x minimum time. The minimum time is
|
||||
set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on
|
||||
the registered benchmark object.
|
||||
|
||||
## Reporting the mean, median and standard deviation by repeated benchmarks
|
||||
### Reporting the mean, median and standard deviation by repeated benchmarks
|
||||
By default each benchmark is run once and that single result is reported.
|
||||
However benchmarks are often noisy and a single result may not be representative
|
||||
of the overall behavior. For this reason it's possible to repeatedly rerun the
|
||||
@@ -532,12 +545,20 @@ The number of runs of each benchmark is specified globally by the
|
||||
`Repetitions` on the registered benchmark object. When a benchmark is run more
|
||||
than once the mean, median and standard deviation of the runs will be reported.
|
||||
|
||||
Additionally the `--benchmark_report_aggregates_only={true|false}` flag or
|
||||
`ReportAggregatesOnly(bool)` function can be used to change how repeated tests
|
||||
are reported. By default the result of each repeated run is reported. When this
|
||||
option is `true` only the mean, median and standard deviation of the runs is reported.
|
||||
Calling `ReportAggregatesOnly(bool)` on a registered benchmark object overrides
|
||||
the value of the flag for that benchmark.
|
||||
Additionally the `--benchmark_report_aggregates_only={true|false}`,
|
||||
`--benchmark_display_aggregates_only={true|false}` flags or
|
||||
`ReportAggregatesOnly(bool)`, `DisplayAggregatesOnly(bool)` functions can be
|
||||
used to change how repeated tests are reported. By default the result of each
|
||||
repeated run is reported. When `report aggregates only` option is `true`,
|
||||
only the aggregates (i.e. mean, median and standard deviation, maybe complexity
|
||||
measurements if they were requested) of the runs is reported, to both the
|
||||
reporters - standard output (console), and the file.
|
||||
However when only the `display aggregates only` option is `true`,
|
||||
only the aggregates are displayed in the standard output, while the file
|
||||
output still contains everything.
|
||||
Calling `ReportAggregatesOnly(bool)` / `DisplayAggregatesOnly(bool)` on a
|
||||
registered benchmark object overrides the value of the appropriate flag for that
|
||||
benchmark.
|
||||
|
||||
## User-defined statistics for repeated benchmarks
|
||||
While having mean, median and standard deviation is nice, this may not be
|
||||
@@ -644,9 +665,12 @@ In multithreaded benchmarks, each counter is set on the calling thread only.
|
||||
When the benchmark finishes, the counters from each thread will be summed;
|
||||
the resulting sum is the value which will be shown for the benchmark.
|
||||
|
||||
The `Counter` constructor accepts two parameters: the value as a `double`
|
||||
and a bit flag which allows you to show counters as rates and/or as
|
||||
per-thread averages:
|
||||
The `Counter` constructor accepts three parameters: the value as a `double`
|
||||
; a bit flag which allows you to show counters as rates, and/or as per-thread
|
||||
iteration, and/or as per-thread averages, and/or iteration invariants;
|
||||
and a flag specifying the 'unit' - i.e. is 1k a 1000 (default,
|
||||
`benchmark::Counter::OneK::kIs1000`), or 1024
|
||||
(`benchmark::Counter::OneK::kIs1024`)?
|
||||
|
||||
```c++
|
||||
// sets a simple counter
|
||||
@@ -662,6 +686,9 @@ per-thread averages:
|
||||
|
||||
// There's also a combined flag:
|
||||
state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate);
|
||||
|
||||
// This says that we process with the rate of state.range(0) bytes every iteration:
|
||||
state.counters["BytesProcessed"] = Counter(state.range(0), benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024);
|
||||
```
|
||||
|
||||
When you're compiling in C++11 mode or later you can use `insert()` with
|
||||
@@ -801,8 +828,29 @@ BM_memcpy/32 12 ns 12 ns 54687500
|
||||
BM_memcpy/32k 1834 ns 1837 ns 357143
|
||||
```
|
||||
|
||||
## Runtime and reporting considerations
|
||||
When the benchmark binary is executed, each benchmark function is run serially.
|
||||
The number of iterations to run is determined dynamically by running the
|
||||
benchmark a few times and measuring the time taken and ensuring that the
|
||||
ultimate result will be statistically stable. As such, faster benchmark
|
||||
functions will be run for more iterations than slower benchmark functions, and
|
||||
the number of iterations is thus reported.
|
||||
|
||||
## Output Formats
|
||||
In all cases, the number of iterations for which the benchmark is run is
|
||||
governed by the amount of time the benchmark takes. Concretely, the number of
|
||||
iterations is at least one, not more than 1e9, until CPU time is greater than
|
||||
the minimum time, or the wallclock time is 5x minimum time. The minimum time is
|
||||
set per benchmark by calling `MinTime` on the registered benchmark object.
|
||||
|
||||
Average timings are then reported over the iterations run. If multiple
|
||||
repetitions are requested using the `--benchmark_repetitions` command-line
|
||||
option, or at registration time, the benchmark function will be run several
|
||||
times and statistical results across these repetitions will also be reported.
|
||||
|
||||
As well as the per-benchmark entries, a preamble in the report will include
|
||||
information about the machine on which the benchmarks are run.
|
||||
|
||||
### Output Formats
|
||||
The library supports multiple output formats. Use the
|
||||
`--benchmark_format=<console|json|csv>` flag to set the format type. `console`
|
||||
is the default format.
|
||||
@@ -821,7 +869,7 @@ BM_SetInsert/1024/10 33157 33648 21431 1.13369M
|
||||
The JSON format outputs human readable json split into two top level attributes.
|
||||
The `context` attribute contains information about the run in general, including
|
||||
information about the CPU and the date.
|
||||
The `benchmarks` attribute contains a list of ever benchmark run. Example json
|
||||
The `benchmarks` attribute contains a list of every benchmark run. Example json
|
||||
output looks like:
|
||||
```json
|
||||
{
|
||||
@@ -870,14 +918,19 @@ name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label
|
||||
"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06,
|
||||
```
|
||||
|
||||
## Output Files
|
||||
### Output Files
|
||||
The library supports writing the output of the benchmark to a file specified
|
||||
by `--benchmark_out=<filename>`. The format of the output can be specified
|
||||
using `--benchmark_out_format={json|console|csv}`. Specifying
|
||||
`--benchmark_out` does not suppress the console output.
|
||||
|
||||
## Result comparison
|
||||
|
||||
It is possible to compare the benchmarking results. See [Additional Tooling Documentation](docs/tools.md)
|
||||
|
||||
## Debug vs Release
|
||||
By default, benchmark builds as a debug library. You will see a warning in the output when this is the case. To build it as a release library instead, use:
|
||||
By default, benchmark builds as a debug library. You will see a warning in the
|
||||
output when this is the case. To build it as a release library instead, use:
|
||||
|
||||
```
|
||||
cmake -DCMAKE_BUILD_TYPE=Release
|
||||
@@ -889,13 +942,11 @@ To enable link-time optimisation, use
|
||||
cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
|
||||
```
|
||||
|
||||
If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake cache variables, if autodetection fails.
|
||||
If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, `LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
|
||||
If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake
|
||||
cache variables, if autodetection fails.
|
||||
|
||||
## Linking against the library
|
||||
When using gcc, it is necessary to link against pthread to avoid runtime exceptions.
|
||||
This is due to how gcc implements std::thread.
|
||||
See [issue #67](https://github.com/google/benchmark/issues/67) for more details.
|
||||
If you are using clang, you may need to set `LLVMAR_EXECUTABLE`,
|
||||
`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
|
||||
|
||||
## Compiler Support
|
||||
|
||||
@@ -925,11 +976,3 @@ sudo cpupower frequency-set --governor performance
|
||||
./mybench
|
||||
sudo cpupower frequency-set --governor powersave
|
||||
```
|
||||
|
||||
# Known Issues
|
||||
|
||||
### Windows
|
||||
|
||||
* Users must manually link `shlwapi.lib`. Failure to do so may result
|
||||
in unresolved symbols.
|
||||
|
||||
|
||||
7
utils/google-benchmark/WORKSPACE
Normal file
7
utils/google-benchmark/WORKSPACE
Normal file
@@ -0,0 +1,7 @@
|
||||
workspace(name = "com_github_google_benchmark")
|
||||
|
||||
http_archive(
|
||||
name = "com_google_googletest",
|
||||
urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
|
||||
strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
|
||||
)
|
||||
50
utils/google-benchmark/appveyor.yml
Normal file
50
utils/google-benchmark/appveyor.yml
Normal file
@@ -0,0 +1,50 @@
|
||||
version: '{build}'
|
||||
|
||||
image: Visual Studio 2017
|
||||
|
||||
configuration:
|
||||
- Debug
|
||||
- Release
|
||||
|
||||
environment:
|
||||
matrix:
|
||||
- compiler: msvc-15-seh
|
||||
generator: "Visual Studio 15 2017"
|
||||
|
||||
- compiler: msvc-15-seh
|
||||
generator: "Visual Studio 15 2017 Win64"
|
||||
|
||||
- compiler: msvc-14-seh
|
||||
generator: "Visual Studio 14 2015"
|
||||
|
||||
- compiler: msvc-14-seh
|
||||
generator: "Visual Studio 14 2015 Win64"
|
||||
|
||||
- compiler: gcc-5.3.0-posix
|
||||
generator: "MinGW Makefiles"
|
||||
cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin'
|
||||
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
|
||||
install:
|
||||
# git bash conflicts with MinGW makefiles
|
||||
- if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%")
|
||||
- if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%")
|
||||
|
||||
build_script:
|
||||
- md _build -Force
|
||||
- cd _build
|
||||
- echo %configuration%
|
||||
- cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON ..
|
||||
- cmake --build . --config %configuration%
|
||||
|
||||
test_script:
|
||||
- ctest -c %configuration% --timeout 300 --output-on-failure
|
||||
|
||||
artifacts:
|
||||
- path: '_build/CMakeFiles/*.log'
|
||||
name: logs
|
||||
- path: '_build/Testing/**/*.xml'
|
||||
name: test_results
|
||||
@@ -62,3 +62,13 @@ function(add_required_cxx_compiler_flag FLAG)
|
||||
message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(check_cxx_warning_flag FLAG)
|
||||
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
|
||||
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
|
||||
# Add -Werror to ensure the compiler generates an error if the warning flag
|
||||
# doesn't exist.
|
||||
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}")
|
||||
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
|
||||
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
|
||||
endfunction()
|
||||
|
||||
@@ -27,36 +27,38 @@ function(cxx_feature_check FILE)
|
||||
return()
|
||||
endif()
|
||||
|
||||
message("-- Performing Test ${FEATURE}")
|
||||
if(CMAKE_CROSSCOMPILING)
|
||||
try_compile(COMPILE_${FEATURE}
|
||||
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
|
||||
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
|
||||
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
|
||||
if(COMPILE_${FEATURE})
|
||||
message(WARNING
|
||||
"If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0")
|
||||
set(RUN_${FEATURE} 0)
|
||||
if (NOT DEFINED COMPILE_${FEATURE})
|
||||
message(STATUS "Performing Test ${FEATURE}")
|
||||
if(CMAKE_CROSSCOMPILING)
|
||||
try_compile(COMPILE_${FEATURE}
|
||||
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
|
||||
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
|
||||
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
|
||||
if(COMPILE_${FEATURE})
|
||||
message(WARNING
|
||||
"If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0")
|
||||
set(RUN_${FEATURE} 0)
|
||||
else()
|
||||
set(RUN_${FEATURE} 1)
|
||||
endif()
|
||||
else()
|
||||
set(RUN_${FEATURE} 1)
|
||||
message(STATUS "Performing Test ${FEATURE}")
|
||||
try_run(RUN_${FEATURE} COMPILE_${FEATURE}
|
||||
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
|
||||
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
|
||||
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
|
||||
endif()
|
||||
else()
|
||||
message("-- Performing Test ${FEATURE}")
|
||||
try_run(RUN_${FEATURE} COMPILE_${FEATURE}
|
||||
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
|
||||
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
|
||||
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
|
||||
endif()
|
||||
|
||||
if(RUN_${FEATURE} EQUAL 0)
|
||||
message("-- Performing Test ${FEATURE} -- success")
|
||||
message(STATUS "Performing Test ${FEATURE} -- success")
|
||||
set(HAVE_${VAR} 1 PARENT_SCOPE)
|
||||
add_definitions(-DHAVE_${VAR})
|
||||
else()
|
||||
if(NOT COMPILE_${FEATURE})
|
||||
message("-- Performing Test ${FEATURE} -- failed to compile")
|
||||
message(STATUS "Performing Test ${FEATURE} -- failed to compile")
|
||||
else()
|
||||
message("-- Performing Test ${FEATURE} -- compiled but failed to run")
|
||||
message(STATUS "Performing Test ${FEATURE} -- compiled but failed to run")
|
||||
endif()
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
@@ -21,6 +21,7 @@ set(__get_git_version INCLUDED)
|
||||
function(get_git_version var)
|
||||
if(GIT_EXECUTABLE)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
|
||||
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||
RESULT_VARIABLE status
|
||||
OUTPUT_VARIABLE GIT_VERSION
|
||||
ERROR_QUIET)
|
||||
@@ -33,9 +34,11 @@ function(get_git_version var)
|
||||
|
||||
# Work out if the repository is dirty
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh
|
||||
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||
OUTPUT_QUIET
|
||||
ERROR_QUIET)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD --
|
||||
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE GIT_DIFF_INDEX
|
||||
ERROR_QUIET)
|
||||
string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY)
|
||||
@@ -46,6 +49,6 @@ function(get_git_version var)
|
||||
set(GIT_VERSION "v0.0.0")
|
||||
endif()
|
||||
|
||||
message("-- git Version: ${GIT_VERSION}")
|
||||
message(STATUS "git Version: ${GIT_VERSION}")
|
||||
set(${var} ${GIT_VERSION} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
|
||||
macro(split_list listname)
|
||||
string(REPLACE ";" " " ${listname} "${${listname}}")
|
||||
endmacro()
|
||||
include(split_list)
|
||||
|
||||
macro(build_external_gtest)
|
||||
include(ExternalProject)
|
||||
set(GTEST_FLAGS "")
|
||||
if (BENCHMARK_USE_LIBCXX)
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
|
||||
list(APPEND GTEST_FLAGS -stdlib=libc++)
|
||||
else()
|
||||
message(WARNING "Unsupported compiler (${CMAKE_CXX_COMPILER}) when using libc++")
|
||||
@@ -23,9 +21,22 @@ macro(build_external_gtest)
|
||||
if ("${GTEST_BUILD_TYPE}" STREQUAL "COVERAGE")
|
||||
set(GTEST_BUILD_TYPE "DEBUG")
|
||||
endif()
|
||||
# FIXME: Since 10/Feb/2017 the googletest trunk has had a bug where
|
||||
# -Werror=unused-function fires during the build on OS X. This is a temporary
|
||||
# workaround to keep our travis bots from failing. It should be removed
|
||||
# once gtest is fixed.
|
||||
if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
|
||||
list(APPEND GTEST_FLAGS "-Wno-unused-function")
|
||||
endif()
|
||||
split_list(GTEST_FLAGS)
|
||||
set(EXCLUDE_FROM_ALL_OPT "")
|
||||
set(EXCLUDE_FROM_ALL_VALUE "")
|
||||
if (${CMAKE_VERSION} VERSION_GREATER "3.0.99")
|
||||
set(EXCLUDE_FROM_ALL_OPT "EXCLUDE_FROM_ALL")
|
||||
set(EXCLUDE_FROM_ALL_VALUE "ON")
|
||||
endif()
|
||||
ExternalProject_Add(googletest
|
||||
EXCLUDE_FROM_ALL ON
|
||||
${EXCLUDE_FROM_ALL_OPT} ${EXCLUDE_FROM_ALL_VALUE}
|
||||
GIT_REPOSITORY https://github.com/google/googletest.git
|
||||
GIT_TAG master
|
||||
PREFIX "${CMAKE_BINARY_DIR}/googletest"
|
||||
@@ -35,45 +46,68 @@ macro(build_external_gtest)
|
||||
-DCMAKE_C_COMPILER:STRING=${CMAKE_C_COMPILER}
|
||||
-DCMAKE_CXX_COMPILER:STRING=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_INSTALL_PREFIX:PATH=<INSTALL_DIR>
|
||||
-DCMAKE_INSTALL_LIBDIR:PATH=<INSTALL_DIR>/lib
|
||||
-DCMAKE_CXX_FLAGS:STRING=${GTEST_FLAGS}
|
||||
-Dgtest_force_shared_crt:BOOL=ON
|
||||
)
|
||||
|
||||
ExternalProject_Get_Property(googletest install_dir)
|
||||
|
||||
add_library(gtest UNKNOWN IMPORTED)
|
||||
add_library(gtest_main UNKNOWN IMPORTED)
|
||||
set(GTEST_INCLUDE_DIRS ${install_dir}/include)
|
||||
file(MAKE_DIRECTORY ${GTEST_INCLUDE_DIRS})
|
||||
|
||||
set(LIB_SUFFIX "${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
set(LIB_PREFIX "${CMAKE_STATIC_LIBRARY_PREFIX}")
|
||||
|
||||
if("${GTEST_BUILD_TYPE}" STREQUAL "DEBUG")
|
||||
set(LIB_SUFFIX "d${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
endif()
|
||||
file(MAKE_DIRECTORY ${install_dir}/include)
|
||||
set_target_properties(gtest PROPERTIES
|
||||
IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}gtest${LIB_SUFFIX}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${install_dir}/include
|
||||
)
|
||||
set_target_properties(gtest_main PROPERTIES
|
||||
IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}gtest_main${LIB_SUFFIX}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${install_dir}/include
|
||||
)
|
||||
add_dependencies(gtest googletest)
|
||||
add_dependencies(gtest_main googletest)
|
||||
set(GTEST_BOTH_LIBRARIES gtest gtest_main)
|
||||
#set(GTEST_INCLUDE_DIRS ${install_dir}/include)
|
||||
|
||||
# Use gmock_main instead of gtest_main because it initializes gtest as well.
|
||||
# Note: The libraries are listed in reverse order of their dependancies.
|
||||
foreach(LIB gtest gmock gmock_main)
|
||||
add_library(${LIB} UNKNOWN IMPORTED)
|
||||
set_target_properties(${LIB} PROPERTIES
|
||||
IMPORTED_LOCATION ${install_dir}/lib/${LIB_PREFIX}${LIB}${LIB_SUFFIX}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${GTEST_INCLUDE_DIRS}
|
||||
INTERFACE_LINK_LIBRARIES "${GTEST_BOTH_LIBRARIES}"
|
||||
)
|
||||
add_dependencies(${LIB} googletest)
|
||||
list(APPEND GTEST_BOTH_LIBRARIES ${LIB})
|
||||
endforeach()
|
||||
endmacro(build_external_gtest)
|
||||
|
||||
if (BENCHMARK_ENABLE_GTEST_TESTS)
|
||||
if (IS_DIRECTORY ${CMAKE_SOURCE_DIR}/googletest)
|
||||
if (IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/googletest)
|
||||
set(GTEST_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/googletest")
|
||||
set(INSTALL_GTEST OFF CACHE INTERNAL "")
|
||||
set(INSTALL_GMOCK OFF CACHE INTERNAL "")
|
||||
add_subdirectory(${CMAKE_SOURCE_DIR}/googletest)
|
||||
set(GTEST_BOTH_LIBRARIES gtest gtest_main)
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/googletest)
|
||||
set(GTEST_BOTH_LIBRARIES gtest gmock gmock_main)
|
||||
foreach(HEADER test mock)
|
||||
# CMake 2.8 and older don't respect INTERFACE_INCLUDE_DIRECTORIES, so we
|
||||
# have to add the paths ourselves.
|
||||
set(HFILE g${HEADER}/g${HEADER}.h)
|
||||
set(HPATH ${GTEST_ROOT}/google${HEADER}/include)
|
||||
find_path(HEADER_PATH_${HEADER} ${HFILE}
|
||||
NO_DEFAULT_PATHS
|
||||
HINTS ${HPATH}
|
||||
)
|
||||
if (NOT HEADER_PATH_${HEADER})
|
||||
message(FATAL_ERROR "Failed to find header ${HFILE} in ${HPATH}")
|
||||
endif()
|
||||
list(APPEND GTEST_INCLUDE_DIRS ${HEADER_PATH_${HEADER}})
|
||||
endforeach()
|
||||
elseif(BENCHMARK_DOWNLOAD_DEPENDENCIES)
|
||||
build_external_gtest()
|
||||
else()
|
||||
find_package(GTest REQUIRED)
|
||||
find_path(GMOCK_INCLUDE_DIRS gmock/gmock.h
|
||||
HINTS ${GTEST_INCLUDE_DIRS})
|
||||
if (NOT GMOCK_INCLUDE_DIRS)
|
||||
message(FATAL_ERROR "Failed to find header gmock/gmock.h with hint ${GTEST_INCLUDE_DIRS}")
|
||||
endif()
|
||||
set(GTEST_INCLUDE_DIRS ${GTEST_INCLUDE_DIRS} ${GMOCK_INCLUDE_DIRS})
|
||||
# FIXME: We don't currently require the gmock library to build the tests,
|
||||
# and it's likely we won't find it, so we don't try. As long as we've
|
||||
# found the gmock/gmock.h header and gtest_main that should be good enough.
|
||||
endif()
|
||||
endif()
|
||||
|
||||
3
utils/google-benchmark/cmake/split_list.cmake
Normal file
3
utils/google-benchmark/cmake/split_list.cmake
Normal file
@@ -0,0 +1,3 @@
|
||||
macro(split_list listname)
|
||||
string(REPLACE ";" " " ${listname} "${${listname}}")
|
||||
endmacro()
|
||||
147
utils/google-benchmark/docs/AssemblyTests.md
Normal file
147
utils/google-benchmark/docs/AssemblyTests.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Assembly Tests
|
||||
|
||||
The Benchmark library provides a number of functions whose primary
|
||||
purpose in to affect assembly generation, including `DoNotOptimize`
|
||||
and `ClobberMemory`. In addition there are other functions,
|
||||
such as `KeepRunning`, for which generating good assembly is paramount.
|
||||
|
||||
For these functions it's important to have tests that verify the
|
||||
correctness and quality of the implementation. This requires testing
|
||||
the code generated by the compiler.
|
||||
|
||||
This document describes how the Benchmark library tests compiler output,
|
||||
as well as how to properly write new tests.
|
||||
|
||||
|
||||
## Anatomy of a Test
|
||||
|
||||
Writing a test has two steps:
|
||||
|
||||
* Write the code you want to generate assembly for.
|
||||
* Add `// CHECK` lines to match against the verified assembly.
|
||||
|
||||
Example:
|
||||
```c++
|
||||
|
||||
// CHECK-LABEL: test_add:
|
||||
extern "C" int test_add() {
|
||||
extern int ExternInt;
|
||||
return ExternInt + 1;
|
||||
|
||||
// CHECK: movl ExternInt(%rip), %eax
|
||||
// CHECK: addl %eax
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
#### LLVM Filecheck
|
||||
|
||||
[LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html)
|
||||
is used to test the generated assembly against the `// CHECK` lines
|
||||
specified in the tests source file. Please see the documentation
|
||||
linked above for information on how to write `CHECK` directives.
|
||||
|
||||
#### Tips and Tricks:
|
||||
|
||||
* Tests should match the minimal amount of output required to establish
|
||||
correctness. `CHECK` directives don't have to match on the exact next line
|
||||
after the previous match, so tests should omit checks for unimportant
|
||||
bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive)
|
||||
can be used to ensure a match occurs exactly after the previous match).
|
||||
|
||||
* The tests are compiled with `-O3 -g0`. So we're only testing the
|
||||
optimized output.
|
||||
|
||||
* The assembly output is further cleaned up using `tools/strip_asm.py`.
|
||||
This removes comments, assembler directives, and unused labels before
|
||||
the test is run.
|
||||
|
||||
* The generated and stripped assembly file for a test is output under
|
||||
`<build-directory>/test/<test-name>.s`
|
||||
|
||||
* Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes)
|
||||
to specify lines that should only match in certain situations.
|
||||
The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that
|
||||
are only expected to match Clang or GCC's output respectively. Normal
|
||||
`CHECK` lines match against all compilers. (Note: `CHECK-NOT` and
|
||||
`CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed
|
||||
`CHECK` lines)
|
||||
|
||||
* Use `extern "C"` to disable name mangling for specific functions. This
|
||||
makes them easier to name in the `CHECK` lines.
|
||||
|
||||
|
||||
## Problems Writing Portable Tests
|
||||
|
||||
Writing tests which check the code generated by a compiler are
|
||||
inherently non-portable. Different compilers and even different compiler
|
||||
versions may generate entirely different code. The Benchmark tests
|
||||
must tolerate this.
|
||||
|
||||
LLVM Filecheck provides a number of mechanisms to help write
|
||||
"more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax),
|
||||
allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables)
|
||||
for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive).
|
||||
|
||||
#### Capturing Variables
|
||||
|
||||
For example, say GCC stores a variable in a register but Clang stores
|
||||
it in memory. To write a test that tolerates both cases we "capture"
|
||||
the destination of the store, and then use the captured expression
|
||||
to write the remainder of the test.
|
||||
|
||||
```c++
|
||||
// CHECK-LABEL: test_div_no_op_into_shr:
|
||||
extern "C" void test_div_no_op_into_shr(int value) {
|
||||
int divisor = 2;
|
||||
benchmark::DoNotOptimize(divisor); // hide the value from the optimizer
|
||||
return value / divisor;
|
||||
|
||||
// CHECK: movl $2, [[DEST:.*]]
|
||||
// CHECK: idivl [[DEST]]
|
||||
// CHECK: ret
|
||||
}
|
||||
```
|
||||
|
||||
#### Using Regular Expressions to Match Differing Output
|
||||
|
||||
Often tests require testing assembly lines which may subtly differ
|
||||
between compilers or compiler versions. A common example of this
|
||||
is matching stack frame addresses. In this case regular expressions
|
||||
can be used to match the differing bits of output. For example:
|
||||
|
||||
```c++
|
||||
int ExternInt;
|
||||
struct Point { int x, y, z; };
|
||||
|
||||
// CHECK-LABEL: test_store_point:
|
||||
extern "C" void test_store_point() {
|
||||
Point p{ExternInt, ExternInt, ExternInt};
|
||||
benchmark::DoNotOptimize(p);
|
||||
|
||||
// CHECK: movl ExternInt(%rip), %eax
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
|
||||
// CHECK: ret
|
||||
}
|
||||
```
|
||||
|
||||
## Current Requirements and Limitations
|
||||
|
||||
The tests require Filecheck to be installed along the `PATH` of the
|
||||
build machine. Otherwise the tests will be disabled.
|
||||
|
||||
Additionally, as mentioned in the previous section, codegen tests are
|
||||
inherently non-portable. Currently the tests are limited to:
|
||||
|
||||
* x86_64 targets.
|
||||
* Compiled with GCC or Clang
|
||||
|
||||
Further work could be done, at least on a limited basis, to extend the
|
||||
tests to other architectures and compilers (using `CHECK` prefixes).
|
||||
|
||||
Furthermore, the tests fail for builds which specify additional flags
|
||||
that modify code generation, including `--coverage` or `-fsanitize=`.
|
||||
|
||||
@@ -1,84 +1,25 @@
|
||||
# Benchmark Tools
|
||||
|
||||
## compare_bench.py
|
||||
|
||||
The `compare_bench.py` utility which can be used to compare the result of benchmarks.
|
||||
The program is invoked like:
|
||||
|
||||
``` bash
|
||||
$ compare_bench.py <old-benchmark> <new-benchmark> [benchmark options]...
|
||||
```
|
||||
|
||||
Where `<old-benchmark>` and `<new-benchmark>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
|
||||
|
||||
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
|
||||
|
||||
The sample output using the JSON test files under `Inputs/` gives:
|
||||
|
||||
``` bash
|
||||
$ ./compare_bench.py ./gbench/Inputs/test1_run1.json ./gbench/Inputs/test1_run2.json
|
||||
Comparing ./gbench/Inputs/test1_run1.json to ./gbench/Inputs/test1_run2.json
|
||||
Benchmark Time CPU Time Old Time New CPU Old CPU New
|
||||
-------------------------------------------------------------------------------------------------------------
|
||||
BM_SameTimes +0.0000 +0.0000 10 10 10 10
|
||||
BM_2xFaster -0.5000 -0.5000 50 25 50 25
|
||||
BM_2xSlower +1.0000 +1.0000 50 100 50 100
|
||||
BM_1PercentFaster -0.0100 -0.0100 100 99 100 99
|
||||
BM_1PercentSlower +0.0100 +0.0100 100 101 100 101
|
||||
BM_10PercentFaster -0.1000 -0.1000 100 90 100 90
|
||||
BM_10PercentSlower +0.1000 +0.1000 100 110 100 110
|
||||
BM_100xSlower +99.0000 +99.0000 100 10000 100 10000
|
||||
BM_100xFaster -0.9900 -0.9900 10000 100 10000 100
|
||||
BM_10PercentCPUToTime +0.1000 -0.1000 100 110 100 90
|
||||
BM_ThirdFaster -0.3333 -0.3334 100 67 100 67
|
||||
BM_BadTimeUnit -0.9000 +0.2000 0 0 0 1
|
||||
```
|
||||
|
||||
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
|
||||
|
||||
When a benchmark executable is run, the raw output from the benchmark is printed in real time to stdout. The sample output using `benchmark/basic_test` for both arguments looks like:
|
||||
|
||||
```
|
||||
./compare_bench.py test/basic_test test/basic_test --benchmark_filter=BM_empty.*
|
||||
RUNNING: test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmpN7LF3a
|
||||
Run on (8 X 4000 MHz CPU s)
|
||||
2017-11-07 23:28:36
|
||||
---------------------------------------------------------------------
|
||||
Benchmark Time CPU Iterations
|
||||
---------------------------------------------------------------------
|
||||
BM_empty 4 ns 4 ns 170178757
|
||||
BM_empty/threads:8 1 ns 7 ns 103868920
|
||||
BM_empty_stop_start 0 ns 0 ns 1000000000
|
||||
BM_empty_stop_start/threads:8 0 ns 0 ns 1403031720
|
||||
RUNNING: /test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmplvrIp8
|
||||
Run on (8 X 4000 MHz CPU s)
|
||||
2017-11-07 23:28:38
|
||||
---------------------------------------------------------------------
|
||||
Benchmark Time CPU Iterations
|
||||
---------------------------------------------------------------------
|
||||
BM_empty 4 ns 4 ns 169534855
|
||||
BM_empty/threads:8 1 ns 7 ns 104188776
|
||||
BM_empty_stop_start 0 ns 0 ns 1000000000
|
||||
BM_empty_stop_start/threads:8 0 ns 0 ns 1404159424
|
||||
Comparing ../build/test/basic_test to ../build/test/basic_test
|
||||
Benchmark Time CPU Time Old Time New CPU Old CPU New
|
||||
---------------------------------------------------------------------------------------------------------------------
|
||||
BM_empty -0.0048 -0.0049 4 4 4 4
|
||||
BM_empty/threads:8 -0.0123 -0.0054 1 1 7 7
|
||||
BM_empty_stop_start -0.0000 -0.0000 0 0 0 0
|
||||
BM_empty_stop_start/threads:8 -0.0029 +0.0001 0 0 0 0
|
||||
|
||||
```
|
||||
|
||||
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
|
||||
Obviously this example doesn't give any useful output, but it's intended to show the output format when 'compare_bench.py' needs to run benchmarks.
|
||||
|
||||
## compare.py
|
||||
|
||||
The `compare.py` can be used to compare the result of benchmarks.
|
||||
|
||||
**NOTE**: the utility relies on the scipy package which can be installed using [these instructions](https://www.scipy.org/install.html).
|
||||
|
||||
### Displaying aggregates only
|
||||
|
||||
The switch `-a` / `--display_aggregates_only` can be used to control the
|
||||
displayment of the normal iterations vs the aggregates. When passed, it will
|
||||
be passthrough to the benchmark binaries to be run, and will be accounted for
|
||||
in the tool itself; only the aggregates will be displayed, but not normal runs.
|
||||
It only affects the display, the separate runs will still be used to calculate
|
||||
the U test.
|
||||
|
||||
### Modes of operation
|
||||
|
||||
There are three modes of operation:
|
||||
|
||||
1. Just compare two benchmarks, what `compare_bench.py` did.
|
||||
1. Just compare two benchmarks
|
||||
The program is invoked like:
|
||||
|
||||
``` bash
|
||||
@@ -240,3 +181,19 @@ Benchmark Time CPU Time Old
|
||||
```
|
||||
This is a mix of the previous two modes, two (potentially different) benchmark binaries are run, and a different filter is applied to each one.
|
||||
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
|
||||
|
||||
### U test
|
||||
|
||||
If there is a sufficient repetition count of the benchmarks, the tool can do
|
||||
a [U Test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test), of the
|
||||
null hypothesis that it is equally likely that a randomly selected value from
|
||||
one sample will be less than or greater than a randomly selected value from a
|
||||
second sample.
|
||||
|
||||
If the calculated p-value is below this value is lower than the significance
|
||||
level alpha, then the result is said to be statistically significant and the
|
||||
null hypothesis is rejected. Which in other words means that the two benchmarks
|
||||
aren't identical.
|
||||
|
||||
**WARNING**: requires **LARGE** (no less than 9) number of repetitions to be
|
||||
meaningful!
|
||||
|
||||
@@ -164,7 +164,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
|
||||
#ifndef BENCHMARK_BENCHMARK_H_
|
||||
#define BENCHMARK_BENCHMARK_H_
|
||||
|
||||
|
||||
// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer.
|
||||
#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L)
|
||||
#define BENCHMARK_HAS_CXX11
|
||||
@@ -172,22 +171,23 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
#include <iosfwd>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#if defined(BENCHMARK_HAS_CXX11)
|
||||
#include <type_traits>
|
||||
#include <initializer_list>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#include <intrin.h> // for _ReadWriteBarrier
|
||||
#include <intrin.h> // for _ReadWriteBarrier
|
||||
#endif
|
||||
|
||||
#ifndef BENCHMARK_HAS_CXX11
|
||||
@@ -226,21 +226,36 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
|
||||
#define BENCHMARK_INTERNAL_TOSTRING2(x) #x
|
||||
#define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x)
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y)
|
||||
#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg)))
|
||||
#else
|
||||
#define BENCHMARK_BUILTIN_EXPECT(x, y) x
|
||||
#define BENCHMARK_DEPRECATED_MSG(msg)
|
||||
#define BENCHMARK_WARNING_MSG(msg) __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING(__LINE__) ") : warning note: " msg))
|
||||
#define BENCHMARK_WARNING_MSG(msg) \
|
||||
__pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \
|
||||
__LINE__) ") : warning note: " msg))
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) && !defined(__clang__)
|
||||
#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||
#endif
|
||||
|
||||
#ifndef __has_builtin
|
||||
#define __has_builtin(x) 0
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) || __has_builtin(__builtin_unreachable)
|
||||
#define BENCHMARK_UNREACHABLE() __builtin_unreachable()
|
||||
#elif defined(_MSC_VER)
|
||||
#define BENCHMARK_UNREACHABLE() __assume(false)
|
||||
#else
|
||||
#define BENCHMARK_UNREACHABLE() ((void)0)
|
||||
#endif
|
||||
|
||||
namespace benchmark {
|
||||
class BenchmarkReporter;
|
||||
class MemoryManager;
|
||||
|
||||
void Initialize(int* argc, char** argv);
|
||||
|
||||
@@ -253,7 +268,7 @@ bool ReportUnrecognizedArguments(int argc, char** argv);
|
||||
// of each matching benchmark. Otherwise run each matching benchmark and
|
||||
// report the results.
|
||||
//
|
||||
// The second and third overload use the specified 'console_reporter' and
|
||||
// The second and third overload use the specified 'display_reporter' and
|
||||
// 'file_reporter' respectively. 'file_reporter' will write to the file
|
||||
// specified
|
||||
// by '--benchmark_output'. If '--benchmark_output' is not given the
|
||||
@@ -261,16 +276,13 @@ bool ReportUnrecognizedArguments(int argc, char** argv);
|
||||
//
|
||||
// RETURNS: The number of matching benchmarks.
|
||||
size_t RunSpecifiedBenchmarks();
|
||||
size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter);
|
||||
size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
|
||||
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter);
|
||||
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
|
||||
BenchmarkReporter* file_reporter);
|
||||
|
||||
// If this routine is called, peak memory allocation past this point in the
|
||||
// benchmark is reported at the end of the benchmark report line. (It is
|
||||
// computed by running the benchmark once with a single iteration and a memory
|
||||
// tracer.)
|
||||
// TODO(dominic)
|
||||
// void MemoryUsage();
|
||||
// Register a MemoryManager instance that will be used to collect and report
|
||||
// allocation measurements for benchmark runs.
|
||||
void RegisterMemoryManager(MemoryManager* memory_manager);
|
||||
|
||||
namespace internal {
|
||||
class Benchmark;
|
||||
@@ -289,13 +301,11 @@ BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
|
||||
|
||||
} // namespace internal
|
||||
|
||||
|
||||
#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \
|
||||
defined(EMSCRIPTN)
|
||||
# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
|
||||
defined(__EMSCRIPTEN__)
|
||||
#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY
|
||||
#endif
|
||||
|
||||
|
||||
// The DoNotOptimize(...) function can be used to prevent a value or
|
||||
// expression from being optimized away by the compiler. This function is
|
||||
// intended to add little to no overhead.
|
||||
@@ -303,14 +313,18 @@ BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams();
|
||||
#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY
|
||||
template <class Tp>
|
||||
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
|
||||
// Clang doesn't like the 'X' constraint on `value` and certain GCC versions
|
||||
// don't like the 'g' constraint. Attempt to placate them both.
|
||||
asm volatile("" : : "r,m"(value) : "memory");
|
||||
}
|
||||
|
||||
template <class Tp>
|
||||
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) {
|
||||
#if defined(__clang__)
|
||||
asm volatile("" : : "g"(value) : "memory");
|
||||
asm volatile("" : "+r,m"(value) : : "memory");
|
||||
#else
|
||||
asm volatile("" : : "i,r,m"(value) : "memory");
|
||||
asm volatile("" : "+m,r"(value) : : "memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
// Force the compiler to flush pending writes to global memory. Acts as an
|
||||
// effective read/write barrier
|
||||
inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
|
||||
@@ -323,9 +337,7 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
|
||||
_ReadWriteBarrier();
|
||||
}
|
||||
|
||||
inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() {
|
||||
_ReadWriteBarrier();
|
||||
}
|
||||
inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); }
|
||||
#else
|
||||
template <class Tp>
|
||||
inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
|
||||
@@ -334,39 +346,63 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) {
|
||||
// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
// This class is used for user-defined counters.
|
||||
class Counter {
|
||||
public:
|
||||
|
||||
public:
|
||||
enum Flags {
|
||||
kDefaults = 0,
|
||||
kDefaults = 0,
|
||||
// Mark the counter as a rate. It will be presented divided
|
||||
// by the duration of the benchmark.
|
||||
kIsRate = 1,
|
||||
kIsRate = 1U << 0U,
|
||||
// Mark the counter as a thread-average quantity. It will be
|
||||
// presented divided by the number of threads.
|
||||
kAvgThreads = 2,
|
||||
kAvgThreads = 1U << 1U,
|
||||
// Mark the counter as a thread-average rate. See above.
|
||||
kAvgThreadsRate = kIsRate|kAvgThreads
|
||||
kAvgThreadsRate = kIsRate | kAvgThreads,
|
||||
// Mark the counter as a constant value, valid/same for *every* iteration.
|
||||
// When reporting, it will be *multiplied* by the iteration count.
|
||||
kIsIterationInvariant = 1U << 2U,
|
||||
// Mark the counter as a constant rate.
|
||||
// When reporting, it will be *multiplied* by the iteration count
|
||||
// and then divided by the duration of the benchmark.
|
||||
kIsIterationInvariantRate = kIsRate | kIsIterationInvariant,
|
||||
// Mark the counter as a iteration-average quantity.
|
||||
// It will be presented divided by the number of iterations.
|
||||
kAvgIterations = 1U << 3U,
|
||||
// Mark the counter as a iteration-average rate. See above.
|
||||
kAvgIterationsRate = kIsRate | kAvgIterations
|
||||
};
|
||||
|
||||
enum OneK {
|
||||
// 1'000 items per 1k
|
||||
kIs1000 = 1000,
|
||||
// 1'024 items per 1k
|
||||
kIs1024 = 1024
|
||||
};
|
||||
|
||||
double value;
|
||||
Flags flags;
|
||||
Flags flags;
|
||||
OneK oneK;
|
||||
|
||||
BENCHMARK_ALWAYS_INLINE
|
||||
Counter(double v = 0., Flags f = kDefaults) : value(v), flags(f) {}
|
||||
|
||||
BENCHMARK_ALWAYS_INLINE operator double const& () const { return value; }
|
||||
BENCHMARK_ALWAYS_INLINE operator double & () { return value; }
|
||||
Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000)
|
||||
: value(v), flags(f), oneK(k) {}
|
||||
|
||||
BENCHMARK_ALWAYS_INLINE operator double const&() const { return value; }
|
||||
BENCHMARK_ALWAYS_INLINE operator double&() { return value; }
|
||||
};
|
||||
|
||||
// A helper for user code to create unforeseen combinations of Flags, without
|
||||
// having to do this cast manually each time, or providing this operator.
|
||||
Counter::Flags inline operator|(const Counter::Flags& LHS,
|
||||
const Counter::Flags& RHS) {
|
||||
return static_cast<Counter::Flags>(static_cast<int>(LHS) |
|
||||
static_cast<int>(RHS));
|
||||
}
|
||||
|
||||
// This is the container for the user-defined counters.
|
||||
typedef std::map<std::string, Counter> UserCounters;
|
||||
|
||||
|
||||
// TimeUnit is passed to a benchmark in order to specify the order of magnitude
|
||||
// for the measured time.
|
||||
enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond };
|
||||
@@ -379,7 +415,7 @@ enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda };
|
||||
|
||||
// BigOFunc is passed to a benchmark in order to specify the asymptotic
|
||||
// computational complexity for the benchmark.
|
||||
typedef double(BigOFunc)(int);
|
||||
typedef double(BigOFunc)(int64_t);
|
||||
|
||||
// StatisticsFunc is passed to a benchmark in order to compute some descriptive
|
||||
// statistics over all the measurements of some type
|
||||
@@ -389,24 +425,35 @@ struct Statistics {
|
||||
std::string name_;
|
||||
StatisticsFunc* compute_;
|
||||
|
||||
Statistics(std::string name, StatisticsFunc* compute)
|
||||
: name_(name), compute_(compute) {}
|
||||
Statistics(const std::string& name, StatisticsFunc* compute)
|
||||
: name_(name), compute_(compute) {}
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
struct BenchmarkInstance;
|
||||
class ThreadTimer;
|
||||
class ThreadManager;
|
||||
|
||||
enum ReportMode
|
||||
enum AggregationReportMode
|
||||
#if defined(BENCHMARK_HAS_CXX11)
|
||||
: unsigned
|
||||
: unsigned
|
||||
#else
|
||||
#endif
|
||||
{
|
||||
RM_Unspecified, // The mode has not been manually specified
|
||||
RM_Default, // The mode is user-specified as default.
|
||||
RM_ReportAggregatesOnly
|
||||
{
|
||||
// The mode has not been manually specified
|
||||
ARM_Unspecified = 0,
|
||||
// The mode is user-specified.
|
||||
// This may or may not be set when the following bit-flags are set.
|
||||
ARM_Default = 1U << 0U,
|
||||
// File reporter should only output aggregates.
|
||||
ARM_FileReportAggregatesOnly = 1U << 1U,
|
||||
// Display reporter should only output aggregates
|
||||
ARM_DisplayReportAggregatesOnly = 1U << 2U,
|
||||
// Both reporters should only display aggregates.
|
||||
ARM_ReportAggregatesOnly =
|
||||
ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
// State is passed to a running Benchmark and contains state for the
|
||||
@@ -429,16 +476,19 @@ class State {
|
||||
// Returns true if the benchmark should continue through another iteration.
|
||||
// NOTE: A benchmark may not return from the test until KeepRunning() has
|
||||
// returned false.
|
||||
bool KeepRunning() {
|
||||
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
|
||||
StartKeepRunning();
|
||||
}
|
||||
bool const res = (--total_iterations_ != 0);
|
||||
if (BENCHMARK_BUILTIN_EXPECT(!res, false)) {
|
||||
FinishKeepRunning();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
bool KeepRunning();
|
||||
|
||||
// Returns true iff the benchmark should run n more iterations.
|
||||
// REQUIRES: 'n' > 0.
|
||||
// NOTE: A benchmark must not return from the test until KeepRunningBatch()
|
||||
// has returned false.
|
||||
// NOTE: KeepRunningBatch() may overshoot by up to 'n' iterations.
|
||||
//
|
||||
// Intended usage:
|
||||
// while (state.KeepRunningBatch(1000)) {
|
||||
// // process 1000 elements
|
||||
// }
|
||||
bool KeepRunningBatch(size_t n);
|
||||
|
||||
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called
|
||||
// by the current thread.
|
||||
@@ -499,16 +549,21 @@ class State {
|
||||
|
||||
// Set the number of bytes processed by the current benchmark
|
||||
// execution. This routine is typically called once at the end of a
|
||||
// throughput oriented benchmark. If this routine is called with a
|
||||
// value > 0, the report is printed in MB/sec instead of nanoseconds
|
||||
// per iteration.
|
||||
// throughput oriented benchmark.
|
||||
//
|
||||
// REQUIRES: a benchmark has exited its benchmarking loop.
|
||||
BENCHMARK_ALWAYS_INLINE
|
||||
void SetBytesProcessed(size_t bytes) { bytes_processed_ = bytes; }
|
||||
void SetBytesProcessed(int64_t bytes) {
|
||||
counters["bytes_per_second"] =
|
||||
Counter(static_cast<double>(bytes), Counter::kIsRate, Counter::kIs1024);
|
||||
}
|
||||
|
||||
BENCHMARK_ALWAYS_INLINE
|
||||
size_t bytes_processed() const { return bytes_processed_; }
|
||||
int64_t bytes_processed() const {
|
||||
if (counters.find("bytes_per_second") != counters.end())
|
||||
return static_cast<int64_t>(counters.at("bytes_per_second"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
// If this routine is called with complexity_n > 0 and complexity report is
|
||||
// requested for the
|
||||
@@ -516,10 +571,10 @@ class State {
|
||||
// and complexity_n will
|
||||
// represent the length of N.
|
||||
BENCHMARK_ALWAYS_INLINE
|
||||
void SetComplexityN(int complexity_n) { complexity_n_ = complexity_n; }
|
||||
void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; }
|
||||
|
||||
BENCHMARK_ALWAYS_INLINE
|
||||
int complexity_length_n() { return complexity_n_; }
|
||||
int64_t complexity_length_n() { return complexity_n_; }
|
||||
|
||||
// If this routine is called with items > 0, then an items/s
|
||||
// label is printed on the benchmark report line for the currently
|
||||
@@ -528,10 +583,17 @@ class State {
|
||||
//
|
||||
// REQUIRES: a benchmark has exited its benchmarking loop.
|
||||
BENCHMARK_ALWAYS_INLINE
|
||||
void SetItemsProcessed(size_t items) { items_processed_ = items; }
|
||||
void SetItemsProcessed(int64_t items) {
|
||||
counters["items_per_second"] =
|
||||
Counter(static_cast<double>(items), benchmark::Counter::kIsRate);
|
||||
}
|
||||
|
||||
BENCHMARK_ALWAYS_INLINE
|
||||
size_t items_processed() const { return items_processed_; }
|
||||
int64_t items_processed() const {
|
||||
if (counters.find("items_per_second") != counters.end())
|
||||
return static_cast<int64_t>(counters.at("items_per_second"));
|
||||
return 0;
|
||||
}
|
||||
|
||||
// If this routine is called, the specified label is printed at the
|
||||
// end of the benchmark report line for the currently executing
|
||||
@@ -539,7 +601,7 @@ class State {
|
||||
// static void BM_Compress(benchmark::State& state) {
|
||||
// ...
|
||||
// double compress = input_size / output_size;
|
||||
// state.SetLabel(StringPrintf("compress:%.1f%%", 100.0*compression));
|
||||
// state.SetLabel(StrFormat("compress:%.1f%%", 100.0*compression));
|
||||
// }
|
||||
// Produces output that looks like:
|
||||
// BM_Compress 50 50 14115038 compress:27.3%
|
||||
@@ -553,34 +615,49 @@ class State {
|
||||
|
||||
// Range arguments for this run. CHECKs if the argument has been set.
|
||||
BENCHMARK_ALWAYS_INLINE
|
||||
int range(std::size_t pos = 0) const {
|
||||
int64_t range(std::size_t pos = 0) const {
|
||||
assert(range_.size() > pos);
|
||||
return range_[pos];
|
||||
}
|
||||
|
||||
BENCHMARK_DEPRECATED_MSG("use 'range(0)' instead")
|
||||
int range_x() const { return range(0); }
|
||||
int64_t range_x() const { return range(0); }
|
||||
|
||||
BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead")
|
||||
int range_y() const { return range(1); }
|
||||
int64_t range_y() const { return range(1); }
|
||||
|
||||
BENCHMARK_ALWAYS_INLINE
|
||||
size_t iterations() const { return (max_iterations - total_iterations_) + 1; }
|
||||
size_t iterations() const {
|
||||
if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) {
|
||||
return 0;
|
||||
}
|
||||
return max_iterations - total_iterations_ + batch_leftover_;
|
||||
}
|
||||
|
||||
private
|
||||
: // items we expect on the first cache line (ie 64 bytes of the struct)
|
||||
// When total_iterations_ is 0, KeepRunning() and friends will return false.
|
||||
// May be larger than max_iterations.
|
||||
size_t total_iterations_;
|
||||
|
||||
// When using KeepRunningBatch(), batch_leftover_ holds the number of
|
||||
// iterations beyond max_iters that were run. Used to track
|
||||
// completed_iterations_ accurately.
|
||||
size_t batch_leftover_;
|
||||
|
||||
public:
|
||||
const size_t max_iterations;
|
||||
|
||||
private:
|
||||
bool started_;
|
||||
bool finished_;
|
||||
size_t total_iterations_;
|
||||
|
||||
std::vector<int> range_;
|
||||
|
||||
size_t bytes_processed_;
|
||||
size_t items_processed_;
|
||||
|
||||
int complexity_n_;
|
||||
|
||||
bool error_occurred_;
|
||||
|
||||
private: // items we don't need on the first cache line
|
||||
std::vector<int64_t> range_;
|
||||
|
||||
int64_t complexity_n_;
|
||||
|
||||
public:
|
||||
// Container for user-defined counters.
|
||||
UserCounters counters;
|
||||
@@ -588,27 +665,66 @@ class State {
|
||||
const int thread_index;
|
||||
// Number of threads concurrently executing the benchmark.
|
||||
const int threads;
|
||||
const size_t max_iterations;
|
||||
|
||||
// TODO(EricWF) make me private
|
||||
State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
|
||||
private:
|
||||
State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
|
||||
int n_threads, internal::ThreadTimer* timer,
|
||||
internal::ThreadManager* manager);
|
||||
|
||||
private:
|
||||
void StartKeepRunning();
|
||||
// Implementation of KeepRunning() and KeepRunningBatch().
|
||||
// is_batch must be true unless n is 1.
|
||||
bool KeepRunningInternal(size_t n, bool is_batch);
|
||||
void FinishKeepRunning();
|
||||
internal::ThreadTimer* timer_;
|
||||
internal::ThreadManager* manager_;
|
||||
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State);
|
||||
|
||||
friend struct internal::BenchmarkInstance;
|
||||
};
|
||||
|
||||
inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() {
|
||||
return KeepRunningInternal(1, /*is_batch=*/false);
|
||||
}
|
||||
|
||||
inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(size_t n) {
|
||||
return KeepRunningInternal(n, /*is_batch=*/true);
|
||||
}
|
||||
|
||||
inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(size_t n,
|
||||
bool is_batch) {
|
||||
// total_iterations_ is set to 0 by the constructor, and always set to a
|
||||
// nonzero value by StartKepRunning().
|
||||
assert(n > 0);
|
||||
// n must be 1 unless is_batch is true.
|
||||
assert(is_batch || n == 1);
|
||||
if (BENCHMARK_BUILTIN_EXPECT(total_iterations_ >= n, true)) {
|
||||
total_iterations_ -= n;
|
||||
return true;
|
||||
}
|
||||
if (!started_) {
|
||||
StartKeepRunning();
|
||||
if (!error_occurred_ && total_iterations_ >= n) {
|
||||
total_iterations_ -= n;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// For non-batch runs, total_iterations_ must be 0 by now.
|
||||
if (is_batch && total_iterations_ != 0) {
|
||||
batch_leftover_ = n - total_iterations_;
|
||||
total_iterations_ = 0;
|
||||
return true;
|
||||
}
|
||||
FinishKeepRunning();
|
||||
return false;
|
||||
}
|
||||
|
||||
struct State::StateIterator {
|
||||
struct BENCHMARK_UNUSED Value {};
|
||||
typedef std::forward_iterator_tag iterator_category;
|
||||
typedef Value value_type;
|
||||
typedef Value reference;
|
||||
typedef Value pointer;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
|
||||
private:
|
||||
friend class State;
|
||||
@@ -670,7 +786,7 @@ class Benchmark {
|
||||
// Run this benchmark once with "x" as the extra argument passed
|
||||
// to the function.
|
||||
// REQUIRES: The function passed to the constructor must accept an arg1.
|
||||
Benchmark* Arg(int x);
|
||||
Benchmark* Arg(int64_t x);
|
||||
|
||||
// Run this benchmark with the given time unit for the generated output report
|
||||
Benchmark* Unit(TimeUnit unit);
|
||||
@@ -678,23 +794,23 @@ class Benchmark {
|
||||
// Run this benchmark once for a number of values picked from the
|
||||
// range [start..limit]. (start and limit are always picked.)
|
||||
// REQUIRES: The function passed to the constructor must accept an arg1.
|
||||
Benchmark* Range(int start, int limit);
|
||||
Benchmark* Range(int64_t start, int64_t limit);
|
||||
|
||||
// Run this benchmark once for all values in the range [start..limit] with
|
||||
// specific step
|
||||
// REQUIRES: The function passed to the constructor must accept an arg1.
|
||||
Benchmark* DenseRange(int start, int limit, int step = 1);
|
||||
Benchmark* DenseRange(int64_t start, int64_t limit, int step = 1);
|
||||
|
||||
// Run this benchmark once with "args" as the extra arguments passed
|
||||
// to the function.
|
||||
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
|
||||
Benchmark* Args(const std::vector<int>& args);
|
||||
Benchmark* Args(const std::vector<int64_t>& args);
|
||||
|
||||
// Equivalent to Args({x, y})
|
||||
// NOTE: This is a legacy C++03 interface provided for compatibility only.
|
||||
// New code should use 'Args'.
|
||||
Benchmark* ArgPair(int x, int y) {
|
||||
std::vector<int> args;
|
||||
Benchmark* ArgPair(int64_t x, int64_t y) {
|
||||
std::vector<int64_t> args;
|
||||
args.push_back(x);
|
||||
args.push_back(y);
|
||||
return Args(args);
|
||||
@@ -703,7 +819,7 @@ class Benchmark {
|
||||
// Run this benchmark once for a number of values picked from the
|
||||
// ranges [start..limit]. (starts and limits are always picked.)
|
||||
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
|
||||
Benchmark* Ranges(const std::vector<std::pair<int, int> >& ranges);
|
||||
Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >& ranges);
|
||||
|
||||
// Equivalent to ArgNames({name})
|
||||
Benchmark* ArgName(const std::string& name);
|
||||
@@ -715,8 +831,8 @@ class Benchmark {
|
||||
// Equivalent to Ranges({{lo1, hi1}, {lo2, hi2}}).
|
||||
// NOTE: This is a legacy C++03 interface provided for compatibility only.
|
||||
// New code should use 'Ranges'.
|
||||
Benchmark* RangePair(int lo1, int hi1, int lo2, int hi2) {
|
||||
std::vector<std::pair<int, int> > ranges;
|
||||
Benchmark* RangePair(int64_t lo1, int64_t hi1, int64_t lo2, int64_t hi2) {
|
||||
std::vector<std::pair<int64_t, int64_t> > ranges;
|
||||
ranges.push_back(std::make_pair(lo1, hi1));
|
||||
ranges.push_back(std::make_pair(lo2, hi2));
|
||||
return Ranges(ranges);
|
||||
@@ -752,8 +868,12 @@ class Benchmark {
|
||||
// Specify if each repetition of the benchmark should be reported separately
|
||||
// or if only the final statistics should be reported. If the benchmark
|
||||
// is not repeated then the single result is always reported.
|
||||
// Applies to *ALL* reporters (display and file).
|
||||
Benchmark* ReportAggregatesOnly(bool value = true);
|
||||
|
||||
// Same as ReportAggregatesOnly(), but applies to display reporter only.
|
||||
Benchmark* DisplayAggregatesOnly(bool value = true);
|
||||
|
||||
// If a particular benchmark is I/O bound, runs multiple threads internally or
|
||||
// if for some reason CPU timings are not representative, call this method. If
|
||||
// called, the elapsed time will be used to control how many iterations are
|
||||
@@ -813,9 +933,6 @@ class Benchmark {
|
||||
|
||||
virtual void Run(State& state) = 0;
|
||||
|
||||
// Used inside the benchmark implementation
|
||||
struct Instance;
|
||||
|
||||
protected:
|
||||
explicit Benchmark(const char* name);
|
||||
Benchmark(Benchmark const&);
|
||||
@@ -823,15 +940,13 @@ class Benchmark {
|
||||
|
||||
int ArgsCnt() const;
|
||||
|
||||
static void AddRange(std::vector<int>* dst, int lo, int hi, int mult);
|
||||
|
||||
private:
|
||||
friend class BenchmarkFamilies;
|
||||
|
||||
std::string name_;
|
||||
ReportMode report_mode_;
|
||||
std::vector<std::string> arg_names_; // Args for all benchmark runs
|
||||
std::vector<std::vector<int> > args_; // Args for all benchmark runs
|
||||
AggregationReportMode aggregation_report_mode_;
|
||||
std::vector<std::string> arg_names_; // Args for all benchmark runs
|
||||
std::vector<std::vector<int64_t> > args_; // Args for all benchmark runs
|
||||
TimeUnit time_unit_;
|
||||
int range_multiplier_;
|
||||
double min_time_;
|
||||
@@ -1055,7 +1170,7 @@ class Fixture : public internal::Benchmark {
|
||||
class BaseClass##_##Method##_Benchmark : public BaseClass<a> { \
|
||||
public: \
|
||||
BaseClass##_##Method##_Benchmark() : BaseClass<a>() { \
|
||||
this->SetName(#BaseClass"<" #a ">/" #Method); \
|
||||
this->SetName(#BaseClass "<" #a ">/" #Method); \
|
||||
} \
|
||||
\
|
||||
protected: \
|
||||
@@ -1066,7 +1181,7 @@ class Fixture : public internal::Benchmark {
|
||||
class BaseClass##_##Method##_Benchmark : public BaseClass<a, b> { \
|
||||
public: \
|
||||
BaseClass##_##Method##_Benchmark() : BaseClass<a, b>() { \
|
||||
this->SetName(#BaseClass"<" #a "," #b ">/" #Method); \
|
||||
this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \
|
||||
} \
|
||||
\
|
||||
protected: \
|
||||
@@ -1078,14 +1193,15 @@ class Fixture : public internal::Benchmark {
|
||||
class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \
|
||||
public: \
|
||||
BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \
|
||||
this->SetName(#BaseClass"<" #__VA_ARGS__ ">/" #Method); \
|
||||
this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \
|
||||
} \
|
||||
\
|
||||
protected: \
|
||||
virtual void BenchmarkCase(::benchmark::State&); \
|
||||
};
|
||||
#else
|
||||
#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
|
||||
#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \
|
||||
BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a)
|
||||
#endif
|
||||
|
||||
#define BENCHMARK_DEFINE_F(BaseClass, Method) \
|
||||
@@ -1105,7 +1221,8 @@ class Fixture : public internal::Benchmark {
|
||||
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
|
||||
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
||||
#else
|
||||
#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
|
||||
#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \
|
||||
BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
|
||||
#endif
|
||||
|
||||
#define BENCHMARK_REGISTER_F(BaseClass, Method) \
|
||||
@@ -1132,24 +1249,24 @@ class Fixture : public internal::Benchmark {
|
||||
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
||||
|
||||
#ifdef BENCHMARK_HAS_CXX11
|
||||
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
|
||||
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
|
||||
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
|
||||
BENCHMARK_REGISTER_F(BaseClass, Method); \
|
||||
BENCHMARK_REGISTER_F(BaseClass, Method); \
|
||||
void BaseClass##_##Method##_Benchmark::BenchmarkCase
|
||||
#else
|
||||
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
|
||||
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \
|
||||
BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
|
||||
#endif
|
||||
|
||||
// Helper macro to create a main routine in a test that runs the benchmarks
|
||||
#define BENCHMARK_MAIN() \
|
||||
int main(int argc, char** argv) { \
|
||||
::benchmark::Initialize(&argc, argv); \
|
||||
#define BENCHMARK_MAIN() \
|
||||
int main(int argc, char** argv) { \
|
||||
::benchmark::Initialize(&argc, argv); \
|
||||
if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \
|
||||
::benchmark::RunSpecifiedBenchmarks(); \
|
||||
} \
|
||||
::benchmark::RunSpecifiedBenchmarks(); \
|
||||
} \
|
||||
int main(int, char**)
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Benchmark Reporters
|
||||
|
||||
@@ -1167,6 +1284,7 @@ struct CPUInfo {
|
||||
double cycles_per_second;
|
||||
std::vector<CacheInfo> caches;
|
||||
bool scaling_enabled;
|
||||
std::vector<double> load_avg;
|
||||
|
||||
static const CPUInfo& Get();
|
||||
|
||||
@@ -1186,28 +1304,35 @@ class BenchmarkReporter {
|
||||
CPUInfo const& cpu_info;
|
||||
// The number of chars in the longest benchmark name.
|
||||
size_t name_field_width;
|
||||
|
||||
static const char* executable_name;
|
||||
Context();
|
||||
};
|
||||
|
||||
struct Run {
|
||||
enum RunType { RT_Iteration, RT_Aggregate };
|
||||
|
||||
Run()
|
||||
: error_occurred(false),
|
||||
: run_type(RT_Iteration),
|
||||
error_occurred(false),
|
||||
iterations(1),
|
||||
time_unit(kNanosecond),
|
||||
real_accumulated_time(0),
|
||||
cpu_accumulated_time(0),
|
||||
bytes_per_second(0),
|
||||
items_per_second(0),
|
||||
max_heapbytes_used(0),
|
||||
complexity(oNone),
|
||||
complexity_lambda(),
|
||||
complexity_n(0),
|
||||
report_big_o(false),
|
||||
report_rms(false),
|
||||
counters() {}
|
||||
counters(),
|
||||
has_memory_result(false),
|
||||
allocs_per_iter(0.0),
|
||||
max_bytes_used(0) {}
|
||||
|
||||
std::string benchmark_name;
|
||||
std::string benchmark_name() const;
|
||||
std::string run_name;
|
||||
RunType run_type; // is this a measurement, or an aggregate?
|
||||
std::string aggregate_name;
|
||||
std::string report_label; // Empty if not set by benchmark.
|
||||
bool error_occurred;
|
||||
std::string error_message;
|
||||
@@ -1229,17 +1354,13 @@ class BenchmarkReporter {
|
||||
// accumulated time.
|
||||
double GetAdjustedCPUTime() const;
|
||||
|
||||
// Zero if not set by benchmark.
|
||||
double bytes_per_second;
|
||||
double items_per_second;
|
||||
|
||||
// This is set to 0.0 if memory tracing is not enabled.
|
||||
double max_heapbytes_used;
|
||||
|
||||
// Keep track of arguments to compute asymptotic complexity
|
||||
BigO complexity;
|
||||
BigOFunc* complexity_lambda;
|
||||
int complexity_n;
|
||||
int64_t complexity_n;
|
||||
|
||||
// what statistics to compute from the measurements
|
||||
const std::vector<Statistics>* statistics;
|
||||
@@ -1249,6 +1370,11 @@ class BenchmarkReporter {
|
||||
bool report_rms;
|
||||
|
||||
UserCounters counters;
|
||||
|
||||
// Memory metrics.
|
||||
bool has_memory_result;
|
||||
double allocs_per_iter;
|
||||
int64_t max_bytes_used;
|
||||
};
|
||||
|
||||
// Construct a BenchmarkReporter with the output stream set to 'std::cout'
|
||||
@@ -1309,17 +1435,19 @@ class BenchmarkReporter {
|
||||
// Simple reporter that outputs benchmark data to the console. This is the
|
||||
// default reporter used by RunSpecifiedBenchmarks().
|
||||
class ConsoleReporter : public BenchmarkReporter {
|
||||
public:
|
||||
public:
|
||||
enum OutputOptions {
|
||||
OO_None = 0,
|
||||
OO_Color = 1,
|
||||
OO_Tabular = 2,
|
||||
OO_ColorTabular = OO_Color|OO_Tabular,
|
||||
OO_ColorTabular = OO_Color | OO_Tabular,
|
||||
OO_Defaults = OO_ColorTabular
|
||||
};
|
||||
explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults)
|
||||
: output_options_(opts_), name_field_width_(0),
|
||||
prev_counters_(), printed_header_(false) {}
|
||||
: output_options_(opts_),
|
||||
name_field_width_(0),
|
||||
prev_counters_(),
|
||||
printed_header_(false) {}
|
||||
|
||||
virtual bool ReportContext(const Context& context);
|
||||
virtual void ReportRuns(const std::vector<Run>& reports);
|
||||
@@ -1347,7 +1475,8 @@ class JSONReporter : public BenchmarkReporter {
|
||||
bool first_report_;
|
||||
};
|
||||
|
||||
class CSVReporter : public BenchmarkReporter {
|
||||
class BENCHMARK_DEPRECATED_MSG("The CSV Reporter will be removed in a future release")
|
||||
CSVReporter : public BenchmarkReporter {
|
||||
public:
|
||||
CSVReporter() : printed_header_(false) {}
|
||||
virtual bool ReportContext(const Context& context);
|
||||
@@ -1357,7 +1486,30 @@ class CSVReporter : public BenchmarkReporter {
|
||||
void PrintRunData(const Run& report);
|
||||
|
||||
bool printed_header_;
|
||||
std::set< std::string > user_counter_names_;
|
||||
std::set<std::string> user_counter_names_;
|
||||
};
|
||||
|
||||
// If a MemoryManager is registered, it can be used to collect and report
|
||||
// allocation metrics for a run of the benchmark.
|
||||
class MemoryManager {
|
||||
public:
|
||||
struct Result {
|
||||
Result() : num_allocs(0), max_bytes_used(0) {}
|
||||
|
||||
// The number of allocations made in total between Start and Stop.
|
||||
int64_t num_allocs;
|
||||
|
||||
// The peak memory use between Start and Stop.
|
||||
int64_t max_bytes_used;
|
||||
};
|
||||
|
||||
virtual ~MemoryManager() {}
|
||||
|
||||
// Implement this to start recording allocation information.
|
||||
virtual void Start() = 0;
|
||||
|
||||
// Implement this to stop recording and fill out the given Result structure.
|
||||
virtual void Stop(Result* result) = 0;
|
||||
};
|
||||
|
||||
inline const char* GetTimeUnitString(TimeUnit unit) {
|
||||
@@ -1367,9 +1519,9 @@ inline const char* GetTimeUnitString(TimeUnit unit) {
|
||||
case kMicrosecond:
|
||||
return "us";
|
||||
case kNanosecond:
|
||||
default:
|
||||
return "ns";
|
||||
}
|
||||
BENCHMARK_UNREACHABLE();
|
||||
}
|
||||
|
||||
inline double GetTimeUnitMultiplier(TimeUnit unit) {
|
||||
@@ -1379,11 +1531,11 @@ inline double GetTimeUnitMultiplier(TimeUnit unit) {
|
||||
case kMicrosecond:
|
||||
return 1e6;
|
||||
case kNanosecond:
|
||||
default:
|
||||
return 1e9;
|
||||
}
|
||||
BENCHMARK_UNREACHABLE();
|
||||
}
|
||||
|
||||
} // namespace benchmark
|
||||
} // namespace benchmark
|
||||
|
||||
#endif // BENCHMARK_BENCHMARK_H_
|
||||
|
||||
320
utils/google-benchmark/mingw.py
Normal file
320
utils/google-benchmark/mingw.py
Normal file
@@ -0,0 +1,320 @@
|
||||
#! /usr/bin/env python
|
||||
# encoding: utf-8
|
||||
|
||||
import argparse
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
try:
|
||||
import winreg
|
||||
except ImportError:
|
||||
import _winreg as winreg
|
||||
try:
|
||||
import urllib.request as request
|
||||
except ImportError:
|
||||
import urllib as request
|
||||
try:
|
||||
import urllib.parse as parse
|
||||
except ImportError:
|
||||
import urlparse as parse
|
||||
|
||||
class EmptyLogger(object):
|
||||
'''
|
||||
Provides an implementation that performs no logging
|
||||
'''
|
||||
def debug(self, *k, **kw):
|
||||
pass
|
||||
def info(self, *k, **kw):
|
||||
pass
|
||||
def warn(self, *k, **kw):
|
||||
pass
|
||||
def error(self, *k, **kw):
|
||||
pass
|
||||
def critical(self, *k, **kw):
|
||||
pass
|
||||
def setLevel(self, *k, **kw):
|
||||
pass
|
||||
|
||||
urls = (
|
||||
'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20'
|
||||
'targetting%20Win32/Personal%20Builds/mingw-builds/installer/'
|
||||
'repository.txt',
|
||||
'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/'
|
||||
'repository.txt'
|
||||
)
|
||||
'''
|
||||
A list of mingw-build repositories
|
||||
'''
|
||||
|
||||
def repository(urls = urls, log = EmptyLogger()):
|
||||
'''
|
||||
Downloads and parse mingw-build repository files and parses them
|
||||
'''
|
||||
log.info('getting mingw-builds repository')
|
||||
versions = {}
|
||||
re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files')
|
||||
re_sub = r'http://downloads.sourceforge.net/project/\1'
|
||||
for url in urls:
|
||||
log.debug(' - requesting: %s', url)
|
||||
socket = request.urlopen(url)
|
||||
repo = socket.read()
|
||||
if not isinstance(repo, str):
|
||||
repo = repo.decode();
|
||||
socket.close()
|
||||
for entry in repo.split('\n')[:-1]:
|
||||
value = entry.split('|')
|
||||
version = tuple([int(n) for n in value[0].strip().split('.')])
|
||||
version = versions.setdefault(version, {})
|
||||
arch = value[1].strip()
|
||||
if arch == 'x32':
|
||||
arch = 'i686'
|
||||
elif arch == 'x64':
|
||||
arch = 'x86_64'
|
||||
arch = version.setdefault(arch, {})
|
||||
threading = arch.setdefault(value[2].strip(), {})
|
||||
exceptions = threading.setdefault(value[3].strip(), {})
|
||||
revision = exceptions.setdefault(int(value[4].strip()[3:]),
|
||||
re_sourceforge.sub(re_sub, value[5].strip()))
|
||||
return versions
|
||||
|
||||
def find_in_path(file, path=None):
|
||||
'''
|
||||
Attempts to find an executable in the path
|
||||
'''
|
||||
if platform.system() == 'Windows':
|
||||
file += '.exe'
|
||||
if path is None:
|
||||
path = os.environ.get('PATH', '')
|
||||
if type(path) is type(''):
|
||||
path = path.split(os.pathsep)
|
||||
return list(filter(os.path.exists,
|
||||
map(lambda dir, file=file: os.path.join(dir, file), path)))
|
||||
|
||||
def find_7zip(log = EmptyLogger()):
|
||||
'''
|
||||
Attempts to find 7zip for unpacking the mingw-build archives
|
||||
'''
|
||||
log.info('finding 7zip')
|
||||
path = find_in_path('7z')
|
||||
if not path:
|
||||
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip')
|
||||
path, _ = winreg.QueryValueEx(key, 'Path')
|
||||
path = [os.path.join(path, '7z.exe')]
|
||||
log.debug('found \'%s\'', path[0])
|
||||
return path[0]
|
||||
|
||||
find_7zip()
|
||||
|
||||
def unpack(archive, location, log = EmptyLogger()):
|
||||
'''
|
||||
Unpacks a mingw-builds archive
|
||||
'''
|
||||
sevenzip = find_7zip(log)
|
||||
log.info('unpacking %s', os.path.basename(archive))
|
||||
cmd = [sevenzip, 'x', archive, '-o' + location, '-y']
|
||||
log.debug(' - %r', cmd)
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
subprocess.check_call(cmd, stdout = devnull)
|
||||
|
||||
def download(url, location, log = EmptyLogger()):
|
||||
'''
|
||||
Downloads and unpacks a mingw-builds archive
|
||||
'''
|
||||
log.info('downloading MinGW')
|
||||
log.debug(' - url: %s', url)
|
||||
log.debug(' - location: %s', location)
|
||||
|
||||
re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*')
|
||||
|
||||
stream = request.urlopen(url)
|
||||
try:
|
||||
content = stream.getheader('Content-Disposition') or ''
|
||||
except AttributeError:
|
||||
content = stream.headers.getheader('Content-Disposition') or ''
|
||||
matches = re_content.match(content)
|
||||
if matches:
|
||||
filename = matches.group(2)
|
||||
else:
|
||||
parsed = parse.urlparse(stream.geturl())
|
||||
filename = os.path.basename(parsed.path)
|
||||
|
||||
try:
|
||||
os.makedirs(location)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EEXIST and os.path.isdir(location):
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
archive = os.path.join(location, filename)
|
||||
with open(archive, 'wb') as out:
|
||||
while True:
|
||||
buf = stream.read(1024)
|
||||
if not buf:
|
||||
break
|
||||
out.write(buf)
|
||||
unpack(archive, location, log = log)
|
||||
os.remove(archive)
|
||||
|
||||
possible = os.path.join(location, 'mingw64')
|
||||
if not os.path.exists(possible):
|
||||
possible = os.path.join(location, 'mingw32')
|
||||
if not os.path.exists(possible):
|
||||
raise ValueError('Failed to find unpacked MinGW: ' + possible)
|
||||
return possible
|
||||
|
||||
def root(location = None, arch = None, version = None, threading = None,
|
||||
exceptions = None, revision = None, log = EmptyLogger()):
|
||||
'''
|
||||
Returns the root folder of a specific version of the mingw-builds variant
|
||||
of gcc. Will download the compiler if needed
|
||||
'''
|
||||
|
||||
# Get the repository if we don't have all the information
|
||||
if not (arch and version and threading and exceptions and revision):
|
||||
versions = repository(log = log)
|
||||
|
||||
# Determine some defaults
|
||||
version = version or max(versions.keys())
|
||||
if not arch:
|
||||
arch = platform.machine().lower()
|
||||
if arch == 'x86':
|
||||
arch = 'i686'
|
||||
elif arch == 'amd64':
|
||||
arch = 'x86_64'
|
||||
if not threading:
|
||||
keys = versions[version][arch].keys()
|
||||
if 'posix' in keys:
|
||||
threading = 'posix'
|
||||
elif 'win32' in keys:
|
||||
threading = 'win32'
|
||||
else:
|
||||
threading = keys[0]
|
||||
if not exceptions:
|
||||
keys = versions[version][arch][threading].keys()
|
||||
if 'seh' in keys:
|
||||
exceptions = 'seh'
|
||||
elif 'sjlj' in keys:
|
||||
exceptions = 'sjlj'
|
||||
else:
|
||||
exceptions = keys[0]
|
||||
if revision == None:
|
||||
revision = max(versions[version][arch][threading][exceptions].keys())
|
||||
if not location:
|
||||
location = os.path.join(tempfile.gettempdir(), 'mingw-builds')
|
||||
|
||||
# Get the download url
|
||||
url = versions[version][arch][threading][exceptions][revision]
|
||||
|
||||
# Tell the user whatzzup
|
||||
log.info('finding MinGW %s', '.'.join(str(v) for v in version))
|
||||
log.debug(' - arch: %s', arch)
|
||||
log.debug(' - threading: %s', threading)
|
||||
log.debug(' - exceptions: %s', exceptions)
|
||||
log.debug(' - revision: %s', revision)
|
||||
log.debug(' - url: %s', url)
|
||||
|
||||
# Store each specific revision differently
|
||||
slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}'
|
||||
slug = slug.format(
|
||||
version = '.'.join(str(v) for v in version),
|
||||
arch = arch,
|
||||
threading = threading,
|
||||
exceptions = exceptions,
|
||||
revision = revision
|
||||
)
|
||||
if arch == 'x86_64':
|
||||
root_dir = os.path.join(location, slug, 'mingw64')
|
||||
elif arch == 'i686':
|
||||
root_dir = os.path.join(location, slug, 'mingw32')
|
||||
else:
|
||||
raise ValueError('Unknown MinGW arch: ' + arch)
|
||||
|
||||
# Download if needed
|
||||
if not os.path.exists(root_dir):
|
||||
downloaded = download(url, os.path.join(location, slug), log = log)
|
||||
if downloaded != root_dir:
|
||||
raise ValueError('The location of mingw did not match\n%s\n%s'
|
||||
% (downloaded, root_dir))
|
||||
|
||||
return root_dir
|
||||
|
||||
def str2ver(string):
|
||||
'''
|
||||
Converts a version string into a tuple
|
||||
'''
|
||||
try:
|
||||
version = tuple(int(v) for v in string.split('.'))
|
||||
if len(version) is not 3:
|
||||
raise ValueError()
|
||||
except ValueError:
|
||||
raise argparse.ArgumentTypeError(
|
||||
'please provide a three digit version string')
|
||||
return version
|
||||
|
||||
def main():
|
||||
'''
|
||||
Invoked when the script is run directly by the python interpreter
|
||||
'''
|
||||
parser = argparse.ArgumentParser(
|
||||
description = 'Downloads a specific version of MinGW',
|
||||
formatter_class = argparse.ArgumentDefaultsHelpFormatter
|
||||
)
|
||||
parser.add_argument('--location',
|
||||
help = 'the location to download the compiler to',
|
||||
default = os.path.join(tempfile.gettempdir(), 'mingw-builds'))
|
||||
parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'],
|
||||
help = 'the target MinGW architecture string')
|
||||
parser.add_argument('--version', type = str2ver,
|
||||
help = 'the version of GCC to download')
|
||||
parser.add_argument('--threading', choices = ['posix', 'win32'],
|
||||
help = 'the threading type of the compiler')
|
||||
parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'],
|
||||
help = 'the method to throw exceptions')
|
||||
parser.add_argument('--revision', type=int,
|
||||
help = 'the revision of the MinGW release')
|
||||
group = parser.add_mutually_exclusive_group()
|
||||
group.add_argument('-v', '--verbose', action='store_true',
|
||||
help='increase the script output verbosity')
|
||||
group.add_argument('-q', '--quiet', action='store_true',
|
||||
help='only print errors and warning')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Create the logger
|
||||
logger = logging.getLogger('mingw')
|
||||
handler = logging.StreamHandler()
|
||||
formatter = logging.Formatter('%(message)s')
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(logging.INFO)
|
||||
if args.quiet:
|
||||
logger.setLevel(logging.WARN)
|
||||
if args.verbose:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
# Get MinGW
|
||||
root_dir = root(location = args.location, arch = args.arch,
|
||||
version = args.version, threading = args.threading,
|
||||
exceptions = args.exceptions, revision = args.revision,
|
||||
log = logger)
|
||||
|
||||
sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
main()
|
||||
except IOError as e:
|
||||
sys.stderr.write('IO error: %s\n' % e)
|
||||
sys.exit(1)
|
||||
except OSError as e:
|
||||
sys.stderr.write('OS error: %s\n' % e)
|
||||
sys.exit(1)
|
||||
except KeyboardInterrupt as e:
|
||||
sys.stderr.write('Killed\n')
|
||||
sys.exit(1)
|
||||
16
utils/google-benchmark/releasing.md
Normal file
16
utils/google-benchmark/releasing.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# How to release
|
||||
|
||||
* Make sure you're on master and synced to HEAD
|
||||
* Ensure the project builds and tests run (sanity check only, obviously)
|
||||
* `parallel -j0 exec ::: test/*_test` can help ensure everything at least
|
||||
passes
|
||||
* Prepare release notes
|
||||
* `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of
|
||||
commits between the last annotated tag and HEAD
|
||||
* Pick the most interesting.
|
||||
* Create a release through github's interface
|
||||
* Note this will create a lightweight tag.
|
||||
* Update this to an annotated tag:
|
||||
* `git pull --tags`
|
||||
* `git tag -a -f <tag> <tag>`
|
||||
* `git push --force origin`
|
||||
@@ -11,6 +11,10 @@ file(GLOB
|
||||
*.cc
|
||||
${PROJECT_SOURCE_DIR}/include/benchmark/*.h
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/*.h)
|
||||
file(GLOB BENCHMARK_MAIN "benchmark_main.cc")
|
||||
foreach(item ${BENCHMARK_MAIN})
|
||||
list(REMOVE_ITEM SOURCE_FILES "${item}")
|
||||
endforeach()
|
||||
|
||||
add_library(benchmark ${SOURCE_FILES})
|
||||
set_target_properties(benchmark PROPERTIES
|
||||
@@ -34,6 +38,23 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
|
||||
target_link_libraries(benchmark Shlwapi)
|
||||
endif()
|
||||
|
||||
# We need extra libraries on Solaris
|
||||
if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
|
||||
target_link_libraries(benchmark kstat)
|
||||
endif()
|
||||
|
||||
# Benchmark main library
|
||||
add_library(benchmark_main "benchmark_main.cc")
|
||||
set_target_properties(benchmark_main PROPERTIES
|
||||
OUTPUT_NAME "benchmark_main"
|
||||
VERSION ${GENERIC_LIB_VERSION}
|
||||
SOVERSION ${GENERIC_LIB_SOVERSION}
|
||||
)
|
||||
target_include_directories(benchmark PUBLIC
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
|
||||
)
|
||||
target_link_libraries(benchmark_main benchmark)
|
||||
|
||||
set(include_install_dir "include")
|
||||
set(lib_install_dir "lib/")
|
||||
set(bin_install_dir "bin/")
|
||||
@@ -51,7 +72,7 @@ set(namespace "${PROJECT_NAME}::")
|
||||
|
||||
include(CMakePackageConfigHelpers)
|
||||
write_basic_package_version_file(
|
||||
"${version_config}" VERSION ${GIT_VERSION} COMPATIBILITY SameMajorVersion
|
||||
"${version_config}" VERSION ${GENERIC_LIB_VERSION} COMPATIBILITY SameMajorVersion
|
||||
)
|
||||
|
||||
configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY)
|
||||
@@ -60,7 +81,7 @@ configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ON
|
||||
if (BENCHMARK_ENABLE_INSTALL)
|
||||
# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
|
||||
install(
|
||||
TARGETS benchmark
|
||||
TARGETS benchmark benchmark_main
|
||||
EXPORT ${targets_export_name}
|
||||
ARCHIVE DESTINATION ${lib_install_dir}
|
||||
LIBRARY DESTINATION ${lib_install_dir}
|
||||
|
||||
@@ -14,10 +14,13 @@
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "benchmark_api_internal.h"
|
||||
#include "benchmark_runner.h"
|
||||
#include "internal_macros.h"
|
||||
|
||||
#ifndef BENCHMARK_OS_WINDOWS
|
||||
#ifndef BENCHMARK_OS_FUCHSIA
|
||||
#include <sys/resource.h>
|
||||
#endif
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
@@ -27,11 +30,12 @@
|
||||
#include <condition_variable>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
#include "check.h"
|
||||
#include "colorprint.h"
|
||||
@@ -44,7 +48,8 @@
|
||||
#include "re.h"
|
||||
#include "statistics.h"
|
||||
#include "string_util.h"
|
||||
#include "timers.h"
|
||||
#include "thread_manager.h"
|
||||
#include "thread_timer.h"
|
||||
|
||||
DEFINE_bool(benchmark_list_tests, false,
|
||||
"Print a list of benchmarks. This option overrides all other "
|
||||
@@ -69,10 +74,19 @@ DEFINE_int32(benchmark_repetitions, 1,
|
||||
"The number of runs of each benchmark. If greater than 1, the "
|
||||
"mean and standard deviation of the runs will be reported.");
|
||||
|
||||
DEFINE_bool(benchmark_report_aggregates_only, false,
|
||||
"Report the result of each benchmark repetitions. When 'true' is "
|
||||
"specified only the mean, standard deviation, and other statistics "
|
||||
"are reported for repeated benchmarks.");
|
||||
DEFINE_bool(
|
||||
benchmark_report_aggregates_only, false,
|
||||
"Report the result of each benchmark repetitions. When 'true' is specified "
|
||||
"only the mean, standard deviation, and other statistics are reported for "
|
||||
"repeated benchmarks. Affects all reporters.");
|
||||
|
||||
DEFINE_bool(
|
||||
benchmark_display_aggregates_only, false,
|
||||
"Display the result of each benchmark repetitions. When 'true' is "
|
||||
"specified only the mean, standard deviation, and other statistics are "
|
||||
"displayed for repeated benchmarks. Unlike "
|
||||
"benchmark_report_aggregates_only, only affects the display reporter, but "
|
||||
"*NOT* file reporter, which will still contain all the output.");
|
||||
|
||||
DEFINE_string(benchmark_format, "console",
|
||||
"The format to use for console output. Valid values are "
|
||||
@@ -82,7 +96,7 @@ DEFINE_string(benchmark_out_format, "json",
|
||||
"The format to use for file output. Valid values are "
|
||||
"'console', 'json', or 'csv'.");
|
||||
|
||||
DEFINE_string(benchmark_out, "", "The file to write additonal output to");
|
||||
DEFINE_string(benchmark_out, "", "The file to write additional output to");
|
||||
|
||||
DEFINE_string(benchmark_color, "auto",
|
||||
"Whether to use colors in the output. Valid values: "
|
||||
@@ -100,320 +114,55 @@ DEFINE_int32(v, 0, "The level of verbose logging to output");
|
||||
|
||||
namespace benchmark {
|
||||
|
||||
namespace {
|
||||
static const size_t kMaxIterations = 1000000000;
|
||||
} // end namespace
|
||||
|
||||
namespace internal {
|
||||
|
||||
// FIXME: wouldn't LTO mess this up?
|
||||
void UseCharPointer(char const volatile*) {}
|
||||
|
||||
class ThreadManager {
|
||||
public:
|
||||
ThreadManager(int num_threads)
|
||||
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
|
||||
|
||||
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
|
||||
return benchmark_mutex_;
|
||||
}
|
||||
|
||||
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
|
||||
return start_stop_barrier_.wait();
|
||||
}
|
||||
|
||||
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
|
||||
start_stop_barrier_.removeThread();
|
||||
if (--alive_threads_ == 0) {
|
||||
MutexLock lock(end_cond_mutex_);
|
||||
end_condition_.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
|
||||
MutexLock lock(end_cond_mutex_);
|
||||
end_condition_.wait(lock.native_handle(),
|
||||
[this]() { return alive_threads_ == 0; });
|
||||
}
|
||||
|
||||
public:
|
||||
struct Result {
|
||||
double real_time_used = 0;
|
||||
double cpu_time_used = 0;
|
||||
double manual_time_used = 0;
|
||||
int64_t bytes_processed = 0;
|
||||
int64_t items_processed = 0;
|
||||
int complexity_n = 0;
|
||||
std::string report_label_;
|
||||
std::string error_message_;
|
||||
bool has_error_ = false;
|
||||
UserCounters counters;
|
||||
};
|
||||
GUARDED_BY(GetBenchmarkMutex()) Result results;
|
||||
|
||||
private:
|
||||
mutable Mutex benchmark_mutex_;
|
||||
std::atomic<int> alive_threads_;
|
||||
Barrier start_stop_barrier_;
|
||||
Mutex end_cond_mutex_;
|
||||
Condition end_condition_;
|
||||
};
|
||||
|
||||
// Timer management class
|
||||
class ThreadTimer {
|
||||
public:
|
||||
ThreadTimer() = default;
|
||||
|
||||
// Called by each thread
|
||||
void StartTimer() {
|
||||
running_ = true;
|
||||
start_real_time_ = ChronoClockNow();
|
||||
start_cpu_time_ = ThreadCPUUsage();
|
||||
}
|
||||
|
||||
// Called by each thread
|
||||
void StopTimer() {
|
||||
CHECK(running_);
|
||||
running_ = false;
|
||||
real_time_used_ += ChronoClockNow() - start_real_time_;
|
||||
// Floating point error can result in the subtraction producing a negative
|
||||
// time. Guard against that.
|
||||
cpu_time_used_ += std::max<double>(ThreadCPUUsage() - start_cpu_time_, 0);
|
||||
}
|
||||
|
||||
// Called by each thread
|
||||
void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
|
||||
|
||||
bool running() const { return running_; }
|
||||
|
||||
// REQUIRES: timer is not running
|
||||
double real_time_used() {
|
||||
CHECK(!running_);
|
||||
return real_time_used_;
|
||||
}
|
||||
|
||||
// REQUIRES: timer is not running
|
||||
double cpu_time_used() {
|
||||
CHECK(!running_);
|
||||
return cpu_time_used_;
|
||||
}
|
||||
|
||||
// REQUIRES: timer is not running
|
||||
double manual_time_used() {
|
||||
CHECK(!running_);
|
||||
return manual_time_used_;
|
||||
}
|
||||
|
||||
private:
|
||||
bool running_ = false; // Is the timer running
|
||||
double start_real_time_ = 0; // If running_
|
||||
double start_cpu_time_ = 0; // If running_
|
||||
|
||||
// Accumulated time so far (does not contain current slice if running_)
|
||||
double real_time_used_ = 0;
|
||||
double cpu_time_used_ = 0;
|
||||
// Manually set iteration time. User sets this with SetIterationTime(seconds).
|
||||
double manual_time_used_ = 0;
|
||||
};
|
||||
|
||||
namespace {
|
||||
|
||||
BenchmarkReporter::Run CreateRunReport(
|
||||
const benchmark::internal::Benchmark::Instance& b,
|
||||
const internal::ThreadManager::Result& results, size_t iters,
|
||||
double seconds) {
|
||||
// Create report about this benchmark run.
|
||||
BenchmarkReporter::Run report;
|
||||
|
||||
report.benchmark_name = b.name;
|
||||
report.error_occurred = results.has_error_;
|
||||
report.error_message = results.error_message_;
|
||||
report.report_label = results.report_label_;
|
||||
// Report the total iterations across all threads.
|
||||
report.iterations = static_cast<int64_t>(iters) * b.threads;
|
||||
report.time_unit = b.time_unit;
|
||||
|
||||
if (!report.error_occurred) {
|
||||
double bytes_per_second = 0;
|
||||
if (results.bytes_processed > 0 && seconds > 0.0) {
|
||||
bytes_per_second = (results.bytes_processed / seconds);
|
||||
}
|
||||
double items_per_second = 0;
|
||||
if (results.items_processed > 0 && seconds > 0.0) {
|
||||
items_per_second = (results.items_processed / seconds);
|
||||
}
|
||||
|
||||
if (b.use_manual_time) {
|
||||
report.real_accumulated_time = results.manual_time_used;
|
||||
} else {
|
||||
report.real_accumulated_time = results.real_time_used;
|
||||
}
|
||||
report.cpu_accumulated_time = results.cpu_time_used;
|
||||
report.bytes_per_second = bytes_per_second;
|
||||
report.items_per_second = items_per_second;
|
||||
report.complexity_n = results.complexity_n;
|
||||
report.complexity = b.complexity;
|
||||
report.complexity_lambda = b.complexity_lambda;
|
||||
report.statistics = b.statistics;
|
||||
report.counters = results.counters;
|
||||
internal::Finish(&report.counters, seconds, b.threads);
|
||||
}
|
||||
return report;
|
||||
}
|
||||
|
||||
// Execute one thread of benchmark b for the specified number of iterations.
|
||||
// Adds the stats collected for the thread into *total.
|
||||
void RunInThread(const benchmark::internal::Benchmark::Instance* b,
|
||||
size_t iters, int thread_id,
|
||||
internal::ThreadManager* manager) {
|
||||
internal::ThreadTimer timer;
|
||||
State st(iters, b->arg, thread_id, b->threads, &timer, manager);
|
||||
b->benchmark->Run(st);
|
||||
CHECK(st.iterations() == st.max_iterations)
|
||||
<< "Benchmark returned before State::KeepRunning() returned false!";
|
||||
{
|
||||
MutexLock l(manager->GetBenchmarkMutex());
|
||||
internal::ThreadManager::Result& results = manager->results;
|
||||
results.cpu_time_used += timer.cpu_time_used();
|
||||
results.real_time_used += timer.real_time_used();
|
||||
results.manual_time_used += timer.manual_time_used();
|
||||
results.bytes_processed += st.bytes_processed();
|
||||
results.items_processed += st.items_processed();
|
||||
results.complexity_n += st.complexity_length_n();
|
||||
internal::Increment(&results.counters, st.counters);
|
||||
}
|
||||
manager->NotifyThreadComplete();
|
||||
}
|
||||
|
||||
std::vector<BenchmarkReporter::Run> RunBenchmark(
|
||||
const benchmark::internal::Benchmark::Instance& b,
|
||||
std::vector<BenchmarkReporter::Run>* complexity_reports) {
|
||||
std::vector<BenchmarkReporter::Run> reports; // return value
|
||||
|
||||
const bool has_explicit_iteration_count = b.iterations != 0;
|
||||
size_t iters = has_explicit_iteration_count ? b.iterations : 1;
|
||||
std::unique_ptr<internal::ThreadManager> manager;
|
||||
std::vector<std::thread> pool(b.threads - 1);
|
||||
const int repeats =
|
||||
b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions;
|
||||
const bool report_aggregates_only =
|
||||
repeats != 1 &&
|
||||
(b.report_mode == internal::RM_Unspecified
|
||||
? FLAGS_benchmark_report_aggregates_only
|
||||
: b.report_mode == internal::RM_ReportAggregatesOnly);
|
||||
for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
|
||||
for (;;) {
|
||||
// Try benchmark
|
||||
VLOG(2) << "Running " << b.name << " for " << iters << "\n";
|
||||
|
||||
manager.reset(new internal::ThreadManager(b.threads));
|
||||
for (std::size_t ti = 0; ti < pool.size(); ++ti) {
|
||||
pool[ti] = std::thread(&RunInThread, &b, iters,
|
||||
static_cast<int>(ti + 1), manager.get());
|
||||
}
|
||||
RunInThread(&b, iters, 0, manager.get());
|
||||
manager->WaitForAllThreads();
|
||||
for (std::thread& thread : pool) thread.join();
|
||||
internal::ThreadManager::Result results;
|
||||
{
|
||||
MutexLock l(manager->GetBenchmarkMutex());
|
||||
results = manager->results;
|
||||
}
|
||||
manager.reset();
|
||||
// Adjust real/manual time stats since they were reported per thread.
|
||||
results.real_time_used /= b.threads;
|
||||
results.manual_time_used /= b.threads;
|
||||
|
||||
VLOG(2) << "Ran in " << results.cpu_time_used << "/"
|
||||
<< results.real_time_used << "\n";
|
||||
|
||||
// Base decisions off of real time if requested by this benchmark.
|
||||
double seconds = results.cpu_time_used;
|
||||
if (b.use_manual_time) {
|
||||
seconds = results.manual_time_used;
|
||||
} else if (b.use_real_time) {
|
||||
seconds = results.real_time_used;
|
||||
}
|
||||
|
||||
const double min_time =
|
||||
!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time;
|
||||
|
||||
// Determine if this run should be reported; Either it has
|
||||
// run for a sufficient amount of time or because an error was reported.
|
||||
const bool should_report = repetition_num > 0
|
||||
|| has_explicit_iteration_count // An exact iteration count was requested
|
||||
|| results.has_error_
|
||||
|| iters >= kMaxIterations
|
||||
|| seconds >= min_time // the elapsed time is large enough
|
||||
// CPU time is specified but the elapsed real time greatly exceeds the
|
||||
// minimum time. Note that user provided timers are except from this
|
||||
// sanity check.
|
||||
|| ((results.real_time_used >= 5 * min_time) && !b.use_manual_time);
|
||||
|
||||
if (should_report) {
|
||||
BenchmarkReporter::Run report =
|
||||
CreateRunReport(b, results, iters, seconds);
|
||||
if (!report.error_occurred && b.complexity != oNone)
|
||||
complexity_reports->push_back(report);
|
||||
reports.push_back(report);
|
||||
break;
|
||||
}
|
||||
|
||||
// See how much iterations should be increased by
|
||||
// Note: Avoid division by zero with max(seconds, 1ns).
|
||||
double multiplier = min_time * 1.4 / std::max(seconds, 1e-9);
|
||||
// If our last run was at least 10% of FLAGS_benchmark_min_time then we
|
||||
// use the multiplier directly. Otherwise we use at most 10 times
|
||||
// expansion.
|
||||
// NOTE: When the last run was at least 10% of the min time the max
|
||||
// expansion should be 14x.
|
||||
bool is_significant = (seconds / min_time) > 0.1;
|
||||
multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
|
||||
if (multiplier <= 1.0) multiplier = 2.0;
|
||||
double next_iters = std::max(multiplier * iters, iters + 1.0);
|
||||
if (next_iters > kMaxIterations) {
|
||||
next_iters = kMaxIterations;
|
||||
}
|
||||
VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
|
||||
iters = static_cast<int>(next_iters + 0.5);
|
||||
}
|
||||
}
|
||||
// Calculate additional statistics
|
||||
auto stat_reports = ComputeStats(reports);
|
||||
if ((b.complexity != oNone) && b.last_benchmark_instance) {
|
||||
auto additional_run_stats = ComputeBigO(*complexity_reports);
|
||||
stat_reports.insert(stat_reports.end(), additional_run_stats.begin(),
|
||||
additional_run_stats.end());
|
||||
complexity_reports->clear();
|
||||
}
|
||||
|
||||
if (report_aggregates_only) reports.clear();
|
||||
reports.insert(reports.end(), stat_reports.begin(), stat_reports.end());
|
||||
return reports;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace internal
|
||||
|
||||
State::State(size_t max_iters, const std::vector<int>& ranges, int thread_i,
|
||||
State::State(size_t max_iters, const std::vector<int64_t>& ranges, int thread_i,
|
||||
int n_threads, internal::ThreadTimer* timer,
|
||||
internal::ThreadManager* manager)
|
||||
: started_(false),
|
||||
: total_iterations_(0),
|
||||
batch_leftover_(0),
|
||||
max_iterations(max_iters),
|
||||
started_(false),
|
||||
finished_(false),
|
||||
total_iterations_(max_iters + 1),
|
||||
range_(ranges),
|
||||
bytes_processed_(0),
|
||||
items_processed_(0),
|
||||
complexity_n_(0),
|
||||
error_occurred_(false),
|
||||
range_(ranges),
|
||||
complexity_n_(0),
|
||||
counters(),
|
||||
thread_index(thread_i),
|
||||
threads(n_threads),
|
||||
max_iterations(max_iters),
|
||||
timer_(timer),
|
||||
manager_(manager) {
|
||||
CHECK(max_iterations != 0) << "At least one iteration must be run";
|
||||
CHECK(total_iterations_ != 0) << "max iterations wrapped around";
|
||||
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
|
||||
|
||||
// Note: The use of offsetof below is technically undefined until C++17
|
||||
// because State is not a standard layout type. However, all compilers
|
||||
// currently provide well-defined behavior as an extension (which is
|
||||
// demonstrated since constexpr evaluation must diagnose all undefined
|
||||
// behavior). However, GCC and Clang also warn about this use of offsetof,
|
||||
// which must be suppressed.
|
||||
#if defined(__INTEL_COMPILER)
|
||||
#pragma warning push
|
||||
#pragma warning(disable:1875)
|
||||
#elif defined(__GNUC__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Winvalid-offsetof"
|
||||
#endif
|
||||
// Offset tests to ensure commonly accessed data is on the first cache line.
|
||||
const int cache_line_size = 64;
|
||||
static_assert(offsetof(State, error_occurred_) <=
|
||||
(cache_line_size - sizeof(error_occurred_)),
|
||||
"");
|
||||
#if defined(__INTEL_COMPILER)
|
||||
#pragma warning pop
|
||||
#elif defined(__GNUC__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
}
|
||||
|
||||
void State::PauseTiming() {
|
||||
@@ -437,7 +186,7 @@ void State::SkipWithError(const char* msg) {
|
||||
manager_->results.has_error_ = true;
|
||||
}
|
||||
}
|
||||
total_iterations_ = 1;
|
||||
total_iterations_ = 0;
|
||||
if (timer_->running()) timer_->StopTimer();
|
||||
}
|
||||
|
||||
@@ -453,6 +202,7 @@ void State::SetLabel(const char* label) {
|
||||
void State::StartKeepRunning() {
|
||||
CHECK(!started_ && !finished_);
|
||||
started_ = true;
|
||||
total_iterations_ = error_occurred_ ? 0 : max_iterations;
|
||||
manager_->StartStopBarrier();
|
||||
if (!error_occurred_) ResumeTiming();
|
||||
}
|
||||
@@ -462,8 +212,8 @@ void State::FinishKeepRunning() {
|
||||
if (!error_occurred_) {
|
||||
PauseTiming();
|
||||
}
|
||||
// Total iterations has now wrapped around zero. Fix this.
|
||||
total_iterations_ = 1;
|
||||
// Total iterations has now wrapped around past 0. Fix this.
|
||||
total_iterations_ = 0;
|
||||
finished_ = true;
|
||||
manager_->StartStopBarrier();
|
||||
}
|
||||
@@ -471,31 +221,31 @@ void State::FinishKeepRunning() {
|
||||
namespace internal {
|
||||
namespace {
|
||||
|
||||
void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
|
||||
BenchmarkReporter* console_reporter,
|
||||
BenchmarkReporter* file_reporter) {
|
||||
void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
|
||||
BenchmarkReporter* display_reporter,
|
||||
BenchmarkReporter* file_reporter) {
|
||||
// Note the file_reporter can be null.
|
||||
CHECK(console_reporter != nullptr);
|
||||
CHECK(display_reporter != nullptr);
|
||||
|
||||
// Determine the width of the name field using a minimum width of 10.
|
||||
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
|
||||
bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
|
||||
size_t name_field_width = 10;
|
||||
size_t stat_field_width = 0;
|
||||
for (const Benchmark::Instance& benchmark : benchmarks) {
|
||||
for (const BenchmarkInstance& benchmark : benchmarks) {
|
||||
name_field_width =
|
||||
std::max<size_t>(name_field_width, benchmark.name.size());
|
||||
has_repetitions |= benchmark.repetitions > 1;
|
||||
might_have_aggregates |= benchmark.repetitions > 1;
|
||||
|
||||
for(const auto& Stat : *benchmark.statistics)
|
||||
for (const auto& Stat : *benchmark.statistics)
|
||||
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
|
||||
}
|
||||
if (has_repetitions) name_field_width += 1 + stat_field_width;
|
||||
if (might_have_aggregates) name_field_width += 1 + stat_field_width;
|
||||
|
||||
// Print header here
|
||||
BenchmarkReporter::Context context;
|
||||
context.name_field_width = name_field_width;
|
||||
|
||||
// Keep track of runing times of all instances of current benchmark
|
||||
// Keep track of running times of all instances of current benchmark
|
||||
std::vector<BenchmarkReporter::Run> complexity_reports;
|
||||
|
||||
// We flush streams after invoking reporter methods that write to them. This
|
||||
@@ -506,22 +256,36 @@ void RunBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
|
||||
std::flush(reporter->GetErrorStream());
|
||||
};
|
||||
|
||||
if (console_reporter->ReportContext(context) &&
|
||||
if (display_reporter->ReportContext(context) &&
|
||||
(!file_reporter || file_reporter->ReportContext(context))) {
|
||||
flushStreams(console_reporter);
|
||||
flushStreams(display_reporter);
|
||||
flushStreams(file_reporter);
|
||||
|
||||
for (const auto& benchmark : benchmarks) {
|
||||
std::vector<BenchmarkReporter::Run> reports =
|
||||
RunBenchmark(benchmark, &complexity_reports);
|
||||
console_reporter->ReportRuns(reports);
|
||||
if (file_reporter) file_reporter->ReportRuns(reports);
|
||||
flushStreams(console_reporter);
|
||||
RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
|
||||
|
||||
auto report = [&run_results](BenchmarkReporter* reporter,
|
||||
bool report_aggregates_only) {
|
||||
assert(reporter);
|
||||
// If there are no aggregates, do output non-aggregates.
|
||||
report_aggregates_only &= !run_results.aggregates_only.empty();
|
||||
if (!report_aggregates_only)
|
||||
reporter->ReportRuns(run_results.non_aggregates);
|
||||
if (!run_results.aggregates_only.empty())
|
||||
reporter->ReportRuns(run_results.aggregates_only);
|
||||
};
|
||||
|
||||
report(display_reporter, run_results.display_report_aggregates_only);
|
||||
if (file_reporter)
|
||||
report(file_reporter, run_results.file_report_aggregates_only);
|
||||
|
||||
flushStreams(display_reporter);
|
||||
flushStreams(file_reporter);
|
||||
}
|
||||
}
|
||||
console_reporter->Finalize();
|
||||
display_reporter->Finalize();
|
||||
if (file_reporter) file_reporter->Finalize();
|
||||
flushStreams(console_reporter);
|
||||
flushStreams(display_reporter);
|
||||
flushStreams(file_reporter);
|
||||
}
|
||||
|
||||
@@ -548,21 +312,26 @@ bool IsZero(double n) {
|
||||
|
||||
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) {
|
||||
int output_opts = ConsoleReporter::OO_Defaults;
|
||||
if ((FLAGS_benchmark_color == "auto" && IsColorTerminal()) ||
|
||||
IsTruthyFlagValue(FLAGS_benchmark_color)) {
|
||||
auto is_benchmark_color = [force_no_color] () -> bool {
|
||||
if (force_no_color) {
|
||||
return false;
|
||||
}
|
||||
if (FLAGS_benchmark_color == "auto") {
|
||||
return IsColorTerminal();
|
||||
}
|
||||
return IsTruthyFlagValue(FLAGS_benchmark_color);
|
||||
};
|
||||
if (is_benchmark_color()) {
|
||||
output_opts |= ConsoleReporter::OO_Color;
|
||||
} else {
|
||||
output_opts &= ~ConsoleReporter::OO_Color;
|
||||
}
|
||||
if(force_no_color) {
|
||||
output_opts &= ~ConsoleReporter::OO_Color;
|
||||
}
|
||||
if(FLAGS_benchmark_counters_tabular) {
|
||||
if (FLAGS_benchmark_counters_tabular) {
|
||||
output_opts |= ConsoleReporter::OO_Tabular;
|
||||
} else {
|
||||
output_opts &= ~ConsoleReporter::OO_Tabular;
|
||||
}
|
||||
return static_cast< ConsoleReporter::OutputOptions >(output_opts);
|
||||
return static_cast<ConsoleReporter::OutputOptions>(output_opts);
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
@@ -571,11 +340,11 @@ size_t RunSpecifiedBenchmarks() {
|
||||
return RunSpecifiedBenchmarks(nullptr, nullptr);
|
||||
}
|
||||
|
||||
size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter) {
|
||||
return RunSpecifiedBenchmarks(console_reporter, nullptr);
|
||||
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) {
|
||||
return RunSpecifiedBenchmarks(display_reporter, nullptr);
|
||||
}
|
||||
|
||||
size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
|
||||
size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter,
|
||||
BenchmarkReporter* file_reporter) {
|
||||
std::string spec = FLAGS_benchmark_filter;
|
||||
if (spec.empty() || spec == "all")
|
||||
@@ -583,15 +352,15 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
|
||||
|
||||
// Setup the reporters
|
||||
std::ofstream output_file;
|
||||
std::unique_ptr<BenchmarkReporter> default_console_reporter;
|
||||
std::unique_ptr<BenchmarkReporter> default_display_reporter;
|
||||
std::unique_ptr<BenchmarkReporter> default_file_reporter;
|
||||
if (!console_reporter) {
|
||||
default_console_reporter = internal::CreateReporter(
|
||||
FLAGS_benchmark_format, internal::GetOutputOptions());
|
||||
console_reporter = default_console_reporter.get();
|
||||
if (!display_reporter) {
|
||||
default_display_reporter = internal::CreateReporter(
|
||||
FLAGS_benchmark_format, internal::GetOutputOptions());
|
||||
display_reporter = default_display_reporter.get();
|
||||
}
|
||||
auto& Out = console_reporter->GetOutputStream();
|
||||
auto& Err = console_reporter->GetErrorStream();
|
||||
auto& Out = display_reporter->GetOutputStream();
|
||||
auto& Err = display_reporter->GetErrorStream();
|
||||
|
||||
std::string const& fname = FLAGS_benchmark_out;
|
||||
if (fname.empty() && file_reporter) {
|
||||
@@ -615,7 +384,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
|
||||
file_reporter->SetErrorStream(&output_file);
|
||||
}
|
||||
|
||||
std::vector<internal::Benchmark::Instance> benchmarks;
|
||||
std::vector<internal::BenchmarkInstance> benchmarks;
|
||||
if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0;
|
||||
|
||||
if (benchmarks.empty()) {
|
||||
@@ -626,12 +395,16 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter,
|
||||
if (FLAGS_benchmark_list_tests) {
|
||||
for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n";
|
||||
} else {
|
||||
internal::RunBenchmarks(benchmarks, console_reporter, file_reporter);
|
||||
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
|
||||
}
|
||||
|
||||
return benchmarks.size();
|
||||
}
|
||||
|
||||
void RegisterMemoryManager(MemoryManager* manager) {
|
||||
internal::memory_manager = manager;
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
void PrintUsageAndExit() {
|
||||
@@ -641,7 +414,8 @@ void PrintUsageAndExit() {
|
||||
" [--benchmark_filter=<regex>]\n"
|
||||
" [--benchmark_min_time=<min_time>]\n"
|
||||
" [--benchmark_repetitions=<num_repetitions>]\n"
|
||||
" [--benchmark_report_aggregates_only={true|false}\n"
|
||||
" [--benchmark_report_aggregates_only={true|false}]\n"
|
||||
" [--benchmark_display_aggregates_only={true|false}]\n"
|
||||
" [--benchmark_format=<console|json|csv>]\n"
|
||||
" [--benchmark_out=<filename>]\n"
|
||||
" [--benchmark_out_format=<json|console|csv>]\n"
|
||||
@@ -653,6 +427,8 @@ void PrintUsageAndExit() {
|
||||
|
||||
void ParseCommandLineFlags(int* argc, char** argv) {
|
||||
using namespace benchmark;
|
||||
BenchmarkReporter::Context::executable_name =
|
||||
(argc && *argc > 0) ? argv[0] : "unknown";
|
||||
for (int i = 1; i < *argc; ++i) {
|
||||
if (ParseBoolFlag(argv[i], "benchmark_list_tests",
|
||||
&FLAGS_benchmark_list_tests) ||
|
||||
@@ -663,6 +439,8 @@ void ParseCommandLineFlags(int* argc, char** argv) {
|
||||
&FLAGS_benchmark_repetitions) ||
|
||||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only",
|
||||
&FLAGS_benchmark_report_aggregates_only) ||
|
||||
ParseBoolFlag(argv[i], "benchmark_display_aggregates_only",
|
||||
&FLAGS_benchmark_display_aggregates_only) ||
|
||||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
|
||||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
|
||||
ParseStringFlag(argv[i], "benchmark_out_format",
|
||||
@@ -672,7 +450,7 @@ void ParseCommandLineFlags(int* argc, char** argv) {
|
||||
// TODO: Remove this.
|
||||
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
|
||||
ParseBoolFlag(argv[i], "benchmark_counters_tabular",
|
||||
&FLAGS_benchmark_counters_tabular) ||
|
||||
&FLAGS_benchmark_counters_tabular) ||
|
||||
ParseInt32Flag(argv[i], "v", &FLAGS_v)) {
|
||||
for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1];
|
||||
|
||||
@@ -706,7 +484,8 @@ void Initialize(int* argc, char** argv) {
|
||||
|
||||
bool ReportUnrecognizedArguments(int argc, char** argv) {
|
||||
for (int i = 1; i < argc; ++i) {
|
||||
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
|
||||
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0],
|
||||
argv[i]);
|
||||
}
|
||||
return argc > 1;
|
||||
}
|
||||
|
||||
15
utils/google-benchmark/src/benchmark_api_internal.cc
Normal file
15
utils/google-benchmark/src/benchmark_api_internal.cc
Normal file
@@ -0,0 +1,15 @@
|
||||
#include "benchmark_api_internal.h"
|
||||
|
||||
namespace benchmark {
|
||||
namespace internal {
|
||||
|
||||
State BenchmarkInstance::Run(
|
||||
size_t iters, int thread_id, internal::ThreadTimer* timer,
|
||||
internal::ThreadManager* manager) const {
|
||||
State st(iters, arg, thread_id, threads, timer, manager);
|
||||
benchmark->Run(st);
|
||||
return st;
|
||||
}
|
||||
|
||||
} // internal
|
||||
} // benchmark
|
||||
@@ -2,10 +2,12 @@
|
||||
#define BENCHMARK_API_INTERNAL_H
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "commandlineflags.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <iosfwd>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@@ -13,11 +15,11 @@ namespace benchmark {
|
||||
namespace internal {
|
||||
|
||||
// Information kept per benchmark we may want to run
|
||||
struct Benchmark::Instance {
|
||||
struct BenchmarkInstance {
|
||||
std::string name;
|
||||
Benchmark* benchmark;
|
||||
ReportMode report_mode;
|
||||
std::vector<int> arg;
|
||||
AggregationReportMode aggregation_report_mode;
|
||||
std::vector<int64_t> arg;
|
||||
TimeUnit time_unit;
|
||||
int range_multiplier;
|
||||
bool use_real_time;
|
||||
@@ -31,10 +33,13 @@ struct Benchmark::Instance {
|
||||
double min_time;
|
||||
size_t iterations;
|
||||
int threads; // Number of concurrent threads to us
|
||||
|
||||
State Run(size_t iters, int thread_id, internal::ThreadTimer* timer,
|
||||
internal::ThreadManager* manager) const;
|
||||
};
|
||||
|
||||
bool FindBenchmarksInternal(const std::string& re,
|
||||
std::vector<Benchmark::Instance>* benchmarks,
|
||||
std::vector<BenchmarkInstance>* benchmarks,
|
||||
std::ostream* Err);
|
||||
|
||||
bool IsZero(double n);
|
||||
|
||||
17
utils/google-benchmark/src/benchmark_main.cc
Normal file
17
utils/google-benchmark/src/benchmark_main.cc
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2018 Google Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
|
||||
BENCHMARK_MAIN();
|
||||
@@ -12,12 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "benchmark_api_internal.h"
|
||||
#include "internal_macros.h"
|
||||
#include "benchmark_register.h"
|
||||
|
||||
#ifndef BENCHMARK_OS_WINDOWS
|
||||
#ifndef BENCHMARK_OS_FUCHSIA
|
||||
#include <sys/resource.h>
|
||||
#endif
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
@@ -34,13 +34,16 @@
|
||||
#include <sstream>
|
||||
#include <thread>
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "benchmark_api_internal.h"
|
||||
#include "check.h"
|
||||
#include "commandlineflags.h"
|
||||
#include "complexity.h"
|
||||
#include "statistics.h"
|
||||
#include "internal_macros.h"
|
||||
#include "log.h"
|
||||
#include "mutex.h"
|
||||
#include "re.h"
|
||||
#include "statistics.h"
|
||||
#include "string_util.h"
|
||||
#include "timers.h"
|
||||
|
||||
@@ -74,8 +77,8 @@ class BenchmarkFamilies {
|
||||
|
||||
// Extract the list of benchmark instances that match the specified
|
||||
// regular expression.
|
||||
bool FindBenchmarks(const std::string& re,
|
||||
std::vector<Benchmark::Instance>* benchmarks,
|
||||
bool FindBenchmarks(std::string re,
|
||||
std::vector<BenchmarkInstance>* benchmarks,
|
||||
std::ostream* Err);
|
||||
|
||||
private:
|
||||
@@ -104,13 +107,18 @@ void BenchmarkFamilies::ClearBenchmarks() {
|
||||
}
|
||||
|
||||
bool BenchmarkFamilies::FindBenchmarks(
|
||||
const std::string& spec, std::vector<Benchmark::Instance>* benchmarks,
|
||||
std::string spec, std::vector<BenchmarkInstance>* benchmarks,
|
||||
std::ostream* ErrStream) {
|
||||
CHECK(ErrStream);
|
||||
auto& Err = *ErrStream;
|
||||
// Make regular expression out of command-line flag
|
||||
std::string error_msg;
|
||||
Regex re;
|
||||
bool isNegativeFilter = false;
|
||||
if (spec[0] == '-') {
|
||||
spec.replace(0, 1, "");
|
||||
isNegativeFilter = true;
|
||||
}
|
||||
if (!re.Init(spec, &error_msg)) {
|
||||
Err << "Could not compile benchmark re: " << error_msg << std::endl;
|
||||
return false;
|
||||
@@ -144,10 +152,10 @@ bool BenchmarkFamilies::FindBenchmarks(
|
||||
|
||||
for (auto const& args : family->args_) {
|
||||
for (int num_threads : *thread_counts) {
|
||||
Benchmark::Instance instance;
|
||||
BenchmarkInstance instance;
|
||||
instance.name = family->name_;
|
||||
instance.benchmark = family.get();
|
||||
instance.report_mode = family->report_mode_;
|
||||
instance.aggregation_report_mode = family->aggregation_report_mode_;
|
||||
instance.arg = args;
|
||||
instance.time_unit = family->time_unit_;
|
||||
instance.range_multiplier = family->range_multiplier_;
|
||||
@@ -170,20 +178,20 @@ bool BenchmarkFamilies::FindBenchmarks(
|
||||
const auto& arg_name = family->arg_names_[arg_i];
|
||||
if (!arg_name.empty()) {
|
||||
instance.name +=
|
||||
StringPrintF("%s:", family->arg_names_[arg_i].c_str());
|
||||
StrFormat("%s:", family->arg_names_[arg_i].c_str());
|
||||
}
|
||||
}
|
||||
|
||||
instance.name += StringPrintF("%d", arg);
|
||||
|
||||
instance.name += StrFormat("%d", arg);
|
||||
++arg_i;
|
||||
}
|
||||
|
||||
if (!IsZero(family->min_time_))
|
||||
instance.name += StringPrintF("/min_time:%0.3f", family->min_time_);
|
||||
instance.name += StrFormat("/min_time:%0.3f", family->min_time_);
|
||||
if (family->iterations_ != 0)
|
||||
instance.name += StringPrintF("/iterations:%d", family->iterations_);
|
||||
instance.name += StrFormat("/iterations:%d", family->iterations_);
|
||||
if (family->repetitions_ != 0)
|
||||
instance.name += StringPrintF("/repeats:%d", family->repetitions_);
|
||||
instance.name += StrFormat("/repeats:%d", family->repetitions_);
|
||||
|
||||
if (family->use_manual_time_) {
|
||||
instance.name += "/manual_time";
|
||||
@@ -193,10 +201,11 @@ bool BenchmarkFamilies::FindBenchmarks(
|
||||
|
||||
// Add the number of threads used to the name
|
||||
if (!family->thread_counts_.empty()) {
|
||||
instance.name += StringPrintF("/threads:%d", instance.threads);
|
||||
instance.name += StrFormat("/threads:%d", instance.threads);
|
||||
}
|
||||
|
||||
if (re.Match(instance.name)) {
|
||||
if ((re.Match(instance.name) && !isNegativeFilter) ||
|
||||
(!re.Match(instance.name) && isNegativeFilter)) {
|
||||
instance.last_benchmark_instance = (&args == &family->args_.back());
|
||||
benchmarks->push_back(std::move(instance));
|
||||
}
|
||||
@@ -216,7 +225,7 @@ Benchmark* RegisterBenchmarkInternal(Benchmark* bench) {
|
||||
// FIXME: This function is a hack so that benchmark.cc can access
|
||||
// `BenchmarkFamilies`
|
||||
bool FindBenchmarksInternal(const std::string& re,
|
||||
std::vector<Benchmark::Instance>* benchmarks,
|
||||
std::vector<BenchmarkInstance>* benchmarks,
|
||||
std::ostream* Err) {
|
||||
return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
|
||||
}
|
||||
@@ -227,7 +236,7 @@ bool FindBenchmarksInternal(const std::string& re,
|
||||
|
||||
Benchmark::Benchmark(const char* name)
|
||||
: name_(name),
|
||||
report_mode_(RM_Unspecified),
|
||||
aggregation_report_mode_(ARM_Unspecified),
|
||||
time_unit_(kNanosecond),
|
||||
range_multiplier_(kRangeMultiplier),
|
||||
min_time_(0),
|
||||
@@ -244,30 +253,7 @@ Benchmark::Benchmark(const char* name)
|
||||
|
||||
Benchmark::~Benchmark() {}
|
||||
|
||||
void Benchmark::AddRange(std::vector<int>* dst, int lo, int hi, int mult) {
|
||||
CHECK_GE(lo, 0);
|
||||
CHECK_GE(hi, lo);
|
||||
CHECK_GE(mult, 2);
|
||||
|
||||
// Add "lo"
|
||||
dst->push_back(lo);
|
||||
|
||||
static const int kint32max = std::numeric_limits<int32_t>::max();
|
||||
|
||||
// Now space out the benchmarks in multiples of "mult"
|
||||
for (int32_t i = 1; i < kint32max / mult; i *= mult) {
|
||||
if (i >= hi) break;
|
||||
if (i > lo) {
|
||||
dst->push_back(i);
|
||||
}
|
||||
}
|
||||
// Add "hi" (if different from "lo")
|
||||
if (hi != lo) {
|
||||
dst->push_back(hi);
|
||||
}
|
||||
}
|
||||
|
||||
Benchmark* Benchmark::Arg(int x) {
|
||||
Benchmark* Benchmark::Arg(int64_t x) {
|
||||
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
|
||||
args_.push_back({x});
|
||||
return this;
|
||||
@@ -278,20 +264,21 @@ Benchmark* Benchmark::Unit(TimeUnit unit) {
|
||||
return this;
|
||||
}
|
||||
|
||||
Benchmark* Benchmark::Range(int start, int limit) {
|
||||
Benchmark* Benchmark::Range(int64_t start, int64_t limit) {
|
||||
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
|
||||
std::vector<int> arglist;
|
||||
std::vector<int64_t> arglist;
|
||||
AddRange(&arglist, start, limit, range_multiplier_);
|
||||
|
||||
for (int i : arglist) {
|
||||
for (int64_t i : arglist) {
|
||||
args_.push_back({i});
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Benchmark* Benchmark::Ranges(const std::vector<std::pair<int, int>>& ranges) {
|
||||
Benchmark* Benchmark::Ranges(
|
||||
const std::vector<std::pair<int64_t, int64_t>>& ranges) {
|
||||
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
|
||||
std::vector<std::vector<int>> arglists(ranges.size());
|
||||
std::vector<std::vector<int64_t>> arglists(ranges.size());
|
||||
std::size_t total = 1;
|
||||
for (std::size_t i = 0; i < ranges.size(); i++) {
|
||||
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
|
||||
@@ -302,7 +289,7 @@ Benchmark* Benchmark::Ranges(const std::vector<std::pair<int, int>>& ranges) {
|
||||
std::vector<std::size_t> ctr(arglists.size(), 0);
|
||||
|
||||
for (std::size_t i = 0; i < total; i++) {
|
||||
std::vector<int> tmp;
|
||||
std::vector<int64_t> tmp;
|
||||
tmp.reserve(arglists.size());
|
||||
|
||||
for (std::size_t j = 0; j < arglists.size(); j++) {
|
||||
@@ -334,17 +321,17 @@ Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
|
||||
return this;
|
||||
}
|
||||
|
||||
Benchmark* Benchmark::DenseRange(int start, int limit, int step) {
|
||||
Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
|
||||
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
|
||||
CHECK_GE(start, 0);
|
||||
CHECK_LE(start, limit);
|
||||
for (int arg = start; arg <= limit; arg += step) {
|
||||
for (int64_t arg = start; arg <= limit; arg += step) {
|
||||
args_.push_back({arg});
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
Benchmark* Benchmark::Args(const std::vector<int>& args) {
|
||||
Benchmark* Benchmark::Args(const std::vector<int64_t>& args) {
|
||||
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
|
||||
args_.push_back(args);
|
||||
return this;
|
||||
@@ -361,7 +348,6 @@ Benchmark* Benchmark::RangeMultiplier(int multiplier) {
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
Benchmark* Benchmark::MinTime(double t) {
|
||||
CHECK(t > 0.0);
|
||||
CHECK(iterations_ == 0);
|
||||
@@ -369,7 +355,6 @@ Benchmark* Benchmark::MinTime(double t) {
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
Benchmark* Benchmark::Iterations(size_t n) {
|
||||
CHECK(n > 0);
|
||||
CHECK(IsZero(min_time_));
|
||||
@@ -384,7 +369,23 @@ Benchmark* Benchmark::Repetitions(int n) {
|
||||
}
|
||||
|
||||
Benchmark* Benchmark::ReportAggregatesOnly(bool value) {
|
||||
report_mode_ = value ? RM_ReportAggregatesOnly : RM_Default;
|
||||
aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
|
||||
return this;
|
||||
}
|
||||
|
||||
Benchmark* Benchmark::DisplayAggregatesOnly(bool value) {
|
||||
// If we were called, the report mode is no longer 'unspecified', in any case.
|
||||
aggregation_report_mode_ = static_cast<AggregationReportMode>(
|
||||
aggregation_report_mode_ | ARM_Default);
|
||||
|
||||
if (value) {
|
||||
aggregation_report_mode_ = static_cast<AggregationReportMode>(
|
||||
aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
|
||||
} else {
|
||||
aggregation_report_mode_ = static_cast<AggregationReportMode>(
|
||||
aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
33
utils/google-benchmark/src/benchmark_register.h
Normal file
33
utils/google-benchmark/src/benchmark_register.h
Normal file
@@ -0,0 +1,33 @@
|
||||
#ifndef BENCHMARK_REGISTER_H
|
||||
#define BENCHMARK_REGISTER_H
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "check.h"
|
||||
|
||||
template <typename T>
|
||||
void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
|
||||
CHECK_GE(lo, 0);
|
||||
CHECK_GE(hi, lo);
|
||||
CHECK_GE(mult, 2);
|
||||
|
||||
// Add "lo"
|
||||
dst->push_back(lo);
|
||||
|
||||
static const T kmax = std::numeric_limits<T>::max();
|
||||
|
||||
// Now space out the benchmarks in multiples of "mult"
|
||||
for (T i = 1; i < kmax / mult; i *= mult) {
|
||||
if (i >= hi) break;
|
||||
if (i > lo) {
|
||||
dst->push_back(i);
|
||||
}
|
||||
}
|
||||
|
||||
// Add "hi" (if different from "lo")
|
||||
if (hi != lo) {
|
||||
dst->push_back(hi);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // BENCHMARK_REGISTER_H
|
||||
350
utils/google-benchmark/src/benchmark_runner.cc
Normal file
350
utils/google-benchmark/src/benchmark_runner.cc
Normal file
@@ -0,0 +1,350 @@
|
||||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "benchmark_runner.h"
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "benchmark_api_internal.h"
|
||||
#include "internal_macros.h"
|
||||
|
||||
#ifndef BENCHMARK_OS_WINDOWS
|
||||
#ifndef BENCHMARK_OS_FUCHSIA
|
||||
#include <sys/resource.h>
|
||||
#endif
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
#include "check.h"
|
||||
#include "colorprint.h"
|
||||
#include "commandlineflags.h"
|
||||
#include "complexity.h"
|
||||
#include "counter.h"
|
||||
#include "internal_macros.h"
|
||||
#include "log.h"
|
||||
#include "mutex.h"
|
||||
#include "re.h"
|
||||
#include "statistics.h"
|
||||
#include "string_util.h"
|
||||
#include "thread_manager.h"
|
||||
#include "thread_timer.h"
|
||||
|
||||
namespace benchmark {
|
||||
|
||||
namespace internal {
|
||||
|
||||
MemoryManager* memory_manager = nullptr;
|
||||
|
||||
namespace {
|
||||
|
||||
static const size_t kMaxIterations = 1000000000;
|
||||
|
||||
BenchmarkReporter::Run CreateRunReport(
|
||||
const benchmark::internal::BenchmarkInstance& b,
|
||||
const internal::ThreadManager::Result& results, size_t memory_iterations,
|
||||
const MemoryManager::Result& memory_result, double seconds) {
|
||||
// Create report about this benchmark run.
|
||||
BenchmarkReporter::Run report;
|
||||
|
||||
report.run_name = b.name;
|
||||
report.error_occurred = results.has_error_;
|
||||
report.error_message = results.error_message_;
|
||||
report.report_label = results.report_label_;
|
||||
// This is the total iterations across all threads.
|
||||
report.iterations = results.iterations;
|
||||
report.time_unit = b.time_unit;
|
||||
|
||||
if (!report.error_occurred) {
|
||||
if (b.use_manual_time) {
|
||||
report.real_accumulated_time = results.manual_time_used;
|
||||
} else {
|
||||
report.real_accumulated_time = results.real_time_used;
|
||||
}
|
||||
report.cpu_accumulated_time = results.cpu_time_used;
|
||||
report.complexity_n = results.complexity_n;
|
||||
report.complexity = b.complexity;
|
||||
report.complexity_lambda = b.complexity_lambda;
|
||||
report.statistics = b.statistics;
|
||||
report.counters = results.counters;
|
||||
|
||||
if (memory_iterations > 0) {
|
||||
report.has_memory_result = true;
|
||||
report.allocs_per_iter =
|
||||
memory_iterations ? static_cast<double>(memory_result.num_allocs) /
|
||||
memory_iterations
|
||||
: 0;
|
||||
report.max_bytes_used = memory_result.max_bytes_used;
|
||||
}
|
||||
|
||||
internal::Finish(&report.counters, results.iterations, seconds, b.threads);
|
||||
}
|
||||
return report;
|
||||
}
|
||||
|
||||
// Execute one thread of benchmark b for the specified number of iterations.
|
||||
// Adds the stats collected for the thread into *total.
|
||||
void RunInThread(const BenchmarkInstance* b, size_t iters, int thread_id,
|
||||
ThreadManager* manager) {
|
||||
internal::ThreadTimer timer;
|
||||
State st = b->Run(iters, thread_id, &timer, manager);
|
||||
CHECK(st.iterations() >= st.max_iterations)
|
||||
<< "Benchmark returned before State::KeepRunning() returned false!";
|
||||
{
|
||||
MutexLock l(manager->GetBenchmarkMutex());
|
||||
internal::ThreadManager::Result& results = manager->results;
|
||||
results.iterations += st.iterations();
|
||||
results.cpu_time_used += timer.cpu_time_used();
|
||||
results.real_time_used += timer.real_time_used();
|
||||
results.manual_time_used += timer.manual_time_used();
|
||||
results.complexity_n += st.complexity_length_n();
|
||||
internal::Increment(&results.counters, st.counters);
|
||||
}
|
||||
manager->NotifyThreadComplete();
|
||||
}
|
||||
|
||||
class BenchmarkRunner {
|
||||
public:
|
||||
BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
|
||||
std::vector<BenchmarkReporter::Run>* complexity_reports_)
|
||||
: b(b_),
|
||||
complexity_reports(*complexity_reports_),
|
||||
min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
|
||||
repeats(b.repetitions != 0 ? b.repetitions
|
||||
: FLAGS_benchmark_repetitions),
|
||||
has_explicit_iteration_count(b.iterations != 0),
|
||||
pool(b.threads - 1),
|
||||
iters(has_explicit_iteration_count ? b.iterations : 1) {
|
||||
run_results.display_report_aggregates_only =
|
||||
(FLAGS_benchmark_report_aggregates_only ||
|
||||
FLAGS_benchmark_display_aggregates_only);
|
||||
run_results.file_report_aggregates_only =
|
||||
FLAGS_benchmark_report_aggregates_only;
|
||||
if (b.aggregation_report_mode != internal::ARM_Unspecified) {
|
||||
run_results.display_report_aggregates_only =
|
||||
(b.aggregation_report_mode &
|
||||
internal::ARM_DisplayReportAggregatesOnly);
|
||||
run_results.file_report_aggregates_only =
|
||||
(b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
|
||||
}
|
||||
|
||||
for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
|
||||
const bool is_the_first_repetition = repetition_num == 0;
|
||||
DoOneRepetition(is_the_first_repetition);
|
||||
}
|
||||
|
||||
// Calculate additional statistics
|
||||
run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
|
||||
|
||||
// Maybe calculate complexity report
|
||||
if ((b.complexity != oNone) && b.last_benchmark_instance) {
|
||||
auto additional_run_stats = ComputeBigO(complexity_reports);
|
||||
run_results.aggregates_only.insert(run_results.aggregates_only.end(),
|
||||
additional_run_stats.begin(),
|
||||
additional_run_stats.end());
|
||||
complexity_reports.clear();
|
||||
}
|
||||
}
|
||||
|
||||
RunResults&& get_results() { return std::move(run_results); }
|
||||
|
||||
private:
|
||||
RunResults run_results;
|
||||
|
||||
const benchmark::internal::BenchmarkInstance& b;
|
||||
std::vector<BenchmarkReporter::Run>& complexity_reports;
|
||||
|
||||
const double min_time;
|
||||
const int repeats;
|
||||
const bool has_explicit_iteration_count;
|
||||
|
||||
std::vector<std::thread> pool;
|
||||
|
||||
size_t iters; // preserved between repetitions!
|
||||
// So only the first repetition has to find/calculate it,
|
||||
// the other repetitions will just use that precomputed iteration count.
|
||||
|
||||
struct IterationResults {
|
||||
internal::ThreadManager::Result results;
|
||||
size_t iters;
|
||||
double seconds;
|
||||
};
|
||||
IterationResults DoNIterations() {
|
||||
VLOG(2) << "Running " << b.name << " for " << iters << "\n";
|
||||
|
||||
std::unique_ptr<internal::ThreadManager> manager;
|
||||
manager.reset(new internal::ThreadManager(b.threads));
|
||||
|
||||
// Run all but one thread in separate threads
|
||||
for (std::size_t ti = 0; ti < pool.size(); ++ti) {
|
||||
pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
|
||||
manager.get());
|
||||
}
|
||||
// And run one thread here directly.
|
||||
// (If we were asked to run just one thread, we don't create new threads.)
|
||||
// Yes, we need to do this here *after* we start the separate threads.
|
||||
RunInThread(&b, iters, 0, manager.get());
|
||||
|
||||
// The main thread has finished. Now let's wait for the other threads.
|
||||
manager->WaitForAllThreads();
|
||||
for (std::thread& thread : pool) thread.join();
|
||||
|
||||
IterationResults i;
|
||||
// Acquire the measurements/counters from the manager, UNDER THE LOCK!
|
||||
{
|
||||
MutexLock l(manager->GetBenchmarkMutex());
|
||||
i.results = manager->results;
|
||||
}
|
||||
|
||||
// And get rid of the manager.
|
||||
manager.reset();
|
||||
|
||||
// Adjust real/manual time stats since they were reported per thread.
|
||||
i.results.real_time_used /= b.threads;
|
||||
i.results.manual_time_used /= b.threads;
|
||||
|
||||
VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
|
||||
<< i.results.real_time_used << "\n";
|
||||
|
||||
// So for how long were we running?
|
||||
i.iters = iters;
|
||||
// Base decisions off of real time if requested by this benchmark.
|
||||
i.seconds = i.results.cpu_time_used;
|
||||
if (b.use_manual_time) {
|
||||
i.seconds = i.results.manual_time_used;
|
||||
} else if (b.use_real_time) {
|
||||
i.seconds = i.results.real_time_used;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
size_t PredictNumItersNeeded(const IterationResults& i) const {
|
||||
// See how much iterations should be increased by.
|
||||
// Note: Avoid division by zero with max(seconds, 1ns).
|
||||
double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
|
||||
// If our last run was at least 10% of FLAGS_benchmark_min_time then we
|
||||
// use the multiplier directly.
|
||||
// Otherwise we use at most 10 times expansion.
|
||||
// NOTE: When the last run was at least 10% of the min time the max
|
||||
// expansion should be 14x.
|
||||
bool is_significant = (i.seconds / min_time) > 0.1;
|
||||
multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
|
||||
if (multiplier <= 1.0) multiplier = 2.0;
|
||||
|
||||
// So what seems to be the sufficiently-large iteration count? Round up.
|
||||
const size_t max_next_iters =
|
||||
0.5 + std::max(multiplier * i.iters, i.iters + 1.0);
|
||||
// But we do have *some* sanity limits though..
|
||||
const size_t next_iters = std::min(max_next_iters, kMaxIterations);
|
||||
|
||||
VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
|
||||
return next_iters; // round up before conversion to integer.
|
||||
}
|
||||
|
||||
bool ShouldReportIterationResults(const IterationResults& i) const {
|
||||
// Determine if this run should be reported;
|
||||
// Either it has run for a sufficient amount of time
|
||||
// or because an error was reported.
|
||||
return i.results.has_error_ ||
|
||||
i.iters >= kMaxIterations || // Too many iterations already.
|
||||
i.seconds >= min_time || // The elapsed time is large enough.
|
||||
// CPU time is specified but the elapsed real time greatly exceeds
|
||||
// the minimum time.
|
||||
// Note that user provided timers are except from this sanity check.
|
||||
((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
|
||||
}
|
||||
|
||||
void DoOneRepetition(bool is_the_first_repetition) {
|
||||
IterationResults i;
|
||||
|
||||
// We *may* be gradually increasing the length (iteration count)
|
||||
// of the benchmark until we decide the results are significant.
|
||||
// And once we do, we report those last results and exit.
|
||||
// Please do note that the if there are repetitions, the iteration count
|
||||
// is *only* calculated for the *first* repetition, and other repetitions
|
||||
// simply use that precomputed iteration count.
|
||||
for (;;) {
|
||||
i = DoNIterations();
|
||||
|
||||
// Do we consider the results to be significant?
|
||||
// If we are doing repetitions, and the first repetition was already done,
|
||||
// it has calculated the correct iteration time, so we have run that very
|
||||
// iteration count just now. No need to calculate anything. Just report.
|
||||
// Else, the normal rules apply.
|
||||
const bool results_are_significant = !is_the_first_repetition ||
|
||||
has_explicit_iteration_count ||
|
||||
ShouldReportIterationResults(i);
|
||||
|
||||
if (results_are_significant) break; // Good, let's report them!
|
||||
|
||||
// Nope, bad iteration. Let's re-estimate the hopefully-sufficient
|
||||
// iteration count, and run the benchmark again...
|
||||
|
||||
iters = PredictNumItersNeeded(i);
|
||||
assert(iters > i.iters &&
|
||||
"if we did more iterations than we want to do the next time, "
|
||||
"then we should have accepted the current iteration run.");
|
||||
}
|
||||
|
||||
// Oh, one last thing, we need to also produce the 'memory measurements'..
|
||||
MemoryManager::Result memory_result;
|
||||
size_t memory_iterations = 0;
|
||||
if (memory_manager != nullptr) {
|
||||
// Only run a few iterations to reduce the impact of one-time
|
||||
// allocations in benchmarks that are not properly managed.
|
||||
memory_iterations = std::min<size_t>(16, iters);
|
||||
memory_manager->Start();
|
||||
std::unique_ptr<internal::ThreadManager> manager;
|
||||
manager.reset(new internal::ThreadManager(1));
|
||||
RunInThread(&b, memory_iterations, 0, manager.get());
|
||||
manager->WaitForAllThreads();
|
||||
manager.reset();
|
||||
|
||||
memory_manager->Stop(&memory_result);
|
||||
}
|
||||
|
||||
// Ok, now actualy report.
|
||||
BenchmarkReporter::Run report = CreateRunReport(
|
||||
b, i.results, memory_iterations, memory_result, i.seconds);
|
||||
|
||||
if (!report.error_occurred && b.complexity != oNone)
|
||||
complexity_reports.push_back(report);
|
||||
|
||||
run_results.non_aggregates.push_back(report);
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace
|
||||
|
||||
RunResults RunBenchmark(
|
||||
const benchmark::internal::BenchmarkInstance& b,
|
||||
std::vector<BenchmarkReporter::Run>* complexity_reports) {
|
||||
internal::BenchmarkRunner r(b, complexity_reports);
|
||||
return r.get_results();
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace benchmark
|
||||
51
utils/google-benchmark/src/benchmark_runner.h
Normal file
51
utils/google-benchmark/src/benchmark_runner.h
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#ifndef BENCHMARK_RUNNER_H_
|
||||
#define BENCHMARK_RUNNER_H_
|
||||
|
||||
#include "benchmark_api_internal.h"
|
||||
#include "internal_macros.h"
|
||||
|
||||
DECLARE_double(benchmark_min_time);
|
||||
|
||||
DECLARE_int32(benchmark_repetitions);
|
||||
|
||||
DECLARE_bool(benchmark_report_aggregates_only);
|
||||
|
||||
DECLARE_bool(benchmark_display_aggregates_only);
|
||||
|
||||
namespace benchmark {
|
||||
|
||||
namespace internal {
|
||||
|
||||
extern MemoryManager* memory_manager;
|
||||
|
||||
struct RunResults {
|
||||
std::vector<BenchmarkReporter::Run> non_aggregates;
|
||||
std::vector<BenchmarkReporter::Run> aggregates_only;
|
||||
|
||||
bool display_report_aggregates_only = false;
|
||||
bool file_report_aggregates_only = false;
|
||||
};
|
||||
|
||||
RunResults RunBenchmark(
|
||||
const benchmark::internal::BenchmarkInstance& b,
|
||||
std::vector<BenchmarkReporter::Run>* complexity_reports);
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // end namespace benchmark
|
||||
|
||||
#endif // BENCHMARK_RUNNER_H_
|
||||
@@ -1,9 +1,9 @@
|
||||
#ifndef CHECK_H_
|
||||
#define CHECK_H_
|
||||
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <ostream>
|
||||
#include <cmath>
|
||||
|
||||
#include "internal_macros.h"
|
||||
#include "log.h"
|
||||
@@ -62,6 +62,8 @@ class CheckHandler {
|
||||
#define CHECK(b) ::benchmark::internal::GetNullLogInstance()
|
||||
#endif
|
||||
|
||||
// clang-format off
|
||||
// preserve whitespacing between operators for alignment
|
||||
#define CHECK_EQ(a, b) CHECK((a) == (b))
|
||||
#define CHECK_NE(a, b) CHECK((a) != (b))
|
||||
#define CHECK_GE(a, b) CHECK((a) >= (b))
|
||||
@@ -75,5 +77,6 @@ class CheckHandler {
|
||||
#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps))
|
||||
#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps))
|
||||
#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps))
|
||||
//clang-format on
|
||||
|
||||
#endif // CHECK_H_
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
#include "internal_macros.h"
|
||||
|
||||
#ifdef BENCHMARK_OS_WINDOWS
|
||||
#include <Windows.h>
|
||||
#include <windows.h>
|
||||
#include <io.h>
|
||||
#else
|
||||
#include <unistd.h>
|
||||
|
||||
@@ -45,7 +45,7 @@ bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) {
|
||||
// LONG_MAX or LONG_MIN when the input overflows.)
|
||||
result != long_value
|
||||
// The parsed value overflows as an Int32.
|
||||
) {
|
||||
) {
|
||||
std::cerr << src_text << " is expected to be a 32-bit integer, "
|
||||
<< "but actually has value \"" << str << "\", "
|
||||
<< "which overflows.\n";
|
||||
|
||||
@@ -26,20 +26,23 @@ namespace benchmark {
|
||||
|
||||
// Internal function to calculate the different scalability forms
|
||||
BigOFunc* FittingCurve(BigO complexity) {
|
||||
static const double kLog2E = 1.44269504088896340736;
|
||||
switch (complexity) {
|
||||
case oN:
|
||||
return [](int n) -> double { return n; };
|
||||
return [](int64_t n) -> double { return static_cast<double>(n); };
|
||||
case oNSquared:
|
||||
return [](int n) -> double { return std::pow(n, 2); };
|
||||
return [](int64_t n) -> double { return std::pow(n, 2); };
|
||||
case oNCubed:
|
||||
return [](int n) -> double { return std::pow(n, 3); };
|
||||
return [](int64_t n) -> double { return std::pow(n, 3); };
|
||||
case oLogN:
|
||||
return [](int n) { return log2(n); };
|
||||
/* Note: can't use log2 because Android's GNU STL lacks it */
|
||||
return [](int64_t n) { return kLog2E * log(static_cast<double>(n)); };
|
||||
case oNLogN:
|
||||
return [](int n) { return n * log2(n); };
|
||||
/* Note: can't use log2 because Android's GNU STL lacks it */
|
||||
return [](int64_t n) { return kLog2E * n * log(static_cast<double>(n)); };
|
||||
case o1:
|
||||
default:
|
||||
return [](int) { return 1.0; };
|
||||
return [](int64_t) { return 1.0; };
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,15 +68,15 @@ std::string GetBigOString(BigO complexity) {
|
||||
|
||||
// Find the coefficient for the high-order term in the running time, by
|
||||
// minimizing the sum of squares of relative error, for the fitting curve
|
||||
// given by the lambda expresion.
|
||||
// given by the lambda expression.
|
||||
// - n : Vector containing the size of the benchmark tests.
|
||||
// - time : Vector containing the times for the benchmark tests.
|
||||
// - fitting_curve : lambda expresion (e.g. [](int n) {return n; };).
|
||||
// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
|
||||
|
||||
// For a deeper explanation on the algorithm logic, look the README file at
|
||||
// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
|
||||
|
||||
LeastSq MinimalLeastSq(const std::vector<int>& n,
|
||||
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
|
||||
const std::vector<double>& time,
|
||||
BigOFunc* fitting_curve) {
|
||||
double sigma_gn = 0.0;
|
||||
@@ -117,7 +120,7 @@ LeastSq MinimalLeastSq(const std::vector<int>& n,
|
||||
// - complexity : If different than oAuto, the fitting curve will stick to
|
||||
// this one. If it is oAuto, it will be calculated the best
|
||||
// fitting curve.
|
||||
LeastSq MinimalLeastSq(const std::vector<int>& n,
|
||||
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
|
||||
const std::vector<double>& time, const BigO complexity) {
|
||||
CHECK_EQ(n.size(), time.size());
|
||||
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
|
||||
@@ -157,7 +160,7 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
|
||||
if (reports.size() < 2) return results;
|
||||
|
||||
// Accumulators.
|
||||
std::vector<int> n;
|
||||
std::vector<int64_t> n;
|
||||
std::vector<double> real_time;
|
||||
std::vector<double> cpu_time;
|
||||
|
||||
@@ -179,12 +182,15 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
|
||||
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
|
||||
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
|
||||
}
|
||||
std::string benchmark_name =
|
||||
reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
|
||||
|
||||
std::string run_name = reports[0].benchmark_name().substr(
|
||||
0, reports[0].benchmark_name().find('/'));
|
||||
|
||||
// Get the data from the accumulator to BenchmarkReporter::Run's.
|
||||
Run big_o;
|
||||
big_o.benchmark_name = benchmark_name + "_BigO";
|
||||
big_o.run_name = run_name;
|
||||
big_o.run_type = BenchmarkReporter::Run::RT_Aggregate;
|
||||
big_o.aggregate_name = "BigO";
|
||||
big_o.iterations = 0;
|
||||
big_o.real_accumulated_time = result_real.coef;
|
||||
big_o.cpu_accumulated_time = result_cpu.coef;
|
||||
@@ -200,8 +206,10 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
|
||||
|
||||
// Only add label to mean/stddev if it is same for all runs
|
||||
Run rms;
|
||||
rms.run_name = run_name;
|
||||
big_o.report_label = reports[0].report_label;
|
||||
rms.benchmark_name = benchmark_name + "_RMS";
|
||||
rms.run_type = BenchmarkReporter::Run::RT_Aggregate;
|
||||
rms.aggregate_name = "RMS";
|
||||
rms.report_label = big_o.report_label;
|
||||
rms.iterations = 0;
|
||||
rms.real_accumulated_time = result_real.rms / multiplier;
|
||||
|
||||
@@ -106,7 +106,7 @@ void ConsoleReporter::PrintRunData(const Run& result) {
|
||||
auto name_color =
|
||||
(result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
|
||||
printer(Out, name_color, "%-*s ", name_field_width_,
|
||||
result.benchmark_name.c_str());
|
||||
result.benchmark_name().c_str());
|
||||
|
||||
if (result.error_occurred) {
|
||||
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'",
|
||||
@@ -114,18 +114,6 @@ void ConsoleReporter::PrintRunData(const Run& result) {
|
||||
printer(Out, COLOR_DEFAULT, "\n");
|
||||
return;
|
||||
}
|
||||
// Format bytes per second
|
||||
std::string rate;
|
||||
if (result.bytes_per_second > 0) {
|
||||
rate = StrCat(" ", HumanReadableNumber(result.bytes_per_second), "B/s");
|
||||
}
|
||||
|
||||
// Format items per second
|
||||
std::string items;
|
||||
if (result.items_per_second > 0) {
|
||||
items =
|
||||
StrCat(" ", HumanReadableNumber(result.items_per_second), " items/s");
|
||||
}
|
||||
|
||||
const double real_time = result.GetAdjustedRealTime();
|
||||
const double cpu_time = result.GetAdjustedCPUTime();
|
||||
@@ -150,7 +138,7 @@ void ConsoleReporter::PrintRunData(const Run& result) {
|
||||
for (auto& c : result.counters) {
|
||||
const std::size_t cNameLen = std::max(std::string::size_type(10),
|
||||
c.first.length());
|
||||
auto const& s = HumanReadableNumber(c.second.value, 1000);
|
||||
auto const& s = HumanReadableNumber(c.second.value, c.second.oneK);
|
||||
if (output_options_ & OO_Tabular) {
|
||||
if (c.second.flags & Counter::kIsRate) {
|
||||
printer(Out, COLOR_DEFAULT, " %*s/s", cNameLen - 2, s.c_str());
|
||||
@@ -164,14 +152,6 @@ void ConsoleReporter::PrintRunData(const Run& result) {
|
||||
}
|
||||
}
|
||||
|
||||
if (!rate.empty()) {
|
||||
printer(Out, COLOR_DEFAULT, " %*s", 13, rate.c_str());
|
||||
}
|
||||
|
||||
if (!items.empty()) {
|
||||
printer(Out, COLOR_DEFAULT, " %*s", 18, items.c_str());
|
||||
}
|
||||
|
||||
if (!result.report_label.empty()) {
|
||||
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
|
||||
}
|
||||
|
||||
@@ -17,7 +17,8 @@
|
||||
namespace benchmark {
|
||||
namespace internal {
|
||||
|
||||
double Finish(Counter const& c, double cpu_time, double num_threads) {
|
||||
double Finish(Counter const& c, int64_t iterations, double cpu_time,
|
||||
double num_threads) {
|
||||
double v = c.value;
|
||||
if (c.flags & Counter::kIsRate) {
|
||||
v /= cpu_time;
|
||||
@@ -25,25 +26,31 @@ double Finish(Counter const& c, double cpu_time, double num_threads) {
|
||||
if (c.flags & Counter::kAvgThreads) {
|
||||
v /= num_threads;
|
||||
}
|
||||
if (c.flags & Counter::kIsIterationInvariant) {
|
||||
v *= iterations;
|
||||
}
|
||||
if (c.flags & Counter::kAvgIterations) {
|
||||
v /= iterations;
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
void Finish(UserCounters *l, double cpu_time, double num_threads) {
|
||||
for (auto &c : *l) {
|
||||
c.second.value = Finish(c.second, cpu_time, num_threads);
|
||||
void Finish(UserCounters* l, int64_t iterations, double cpu_time, double num_threads) {
|
||||
for (auto& c : *l) {
|
||||
c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
|
||||
}
|
||||
}
|
||||
|
||||
void Increment(UserCounters *l, UserCounters const& r) {
|
||||
void Increment(UserCounters* l, UserCounters const& r) {
|
||||
// add counters present in both or just in *l
|
||||
for (auto &c : *l) {
|
||||
for (auto& c : *l) {
|
||||
auto it = r.find(c.first);
|
||||
if (it != r.end()) {
|
||||
c.second.value = c.second + it->second;
|
||||
}
|
||||
}
|
||||
// add counters present in r, but not in *l
|
||||
for (auto const &tc : r) {
|
||||
for (auto const& tc : r) {
|
||||
auto it = l->find(tc.first);
|
||||
if (it == l->end()) {
|
||||
(*l)[tc.first] = tc.second;
|
||||
@@ -64,5 +71,5 @@ bool SameNames(UserCounters const& l, UserCounters const& r) {
|
||||
return true;
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
} // end namespace benchmark
|
||||
} // end namespace internal
|
||||
} // end namespace benchmark
|
||||
|
||||
@@ -18,9 +18,9 @@ namespace benchmark {
|
||||
|
||||
// these counter-related functions are hidden to reduce API surface.
|
||||
namespace internal {
|
||||
void Finish(UserCounters *l, double time, double num_threads);
|
||||
void Increment(UserCounters *l, UserCounters const& r);
|
||||
void Finish(UserCounters* l, int64_t iterations, double time, double num_threads);
|
||||
void Increment(UserCounters* l, UserCounters const& r);
|
||||
bool SameNames(UserCounters const& l, UserCounters const& r);
|
||||
} // end namespace internal
|
||||
} // end namespace internal
|
||||
|
||||
} //end namespace benchmark
|
||||
} // end namespace benchmark
|
||||
|
||||
@@ -22,9 +22,9 @@
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include "check.h"
|
||||
#include "string_util.h"
|
||||
#include "timers.h"
|
||||
#include "check.h"
|
||||
|
||||
// File format reference: http://edoceo.com/utilitas/csv-file-format.
|
||||
|
||||
@@ -42,13 +42,15 @@ bool CSVReporter::ReportContext(const Context& context) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
|
||||
void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
|
||||
std::ostream& Out = GetOutputStream();
|
||||
|
||||
if (!printed_header_) {
|
||||
// save the names of all the user counters
|
||||
for (const auto& run : reports) {
|
||||
for (const auto& cnt : run.counters) {
|
||||
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
|
||||
continue;
|
||||
user_counter_names_.insert(cnt.first);
|
||||
}
|
||||
}
|
||||
@@ -58,7 +60,8 @@ void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
|
||||
Out << *B++;
|
||||
if (B != elements.end()) Out << ",";
|
||||
}
|
||||
for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();) {
|
||||
for (auto B = user_counter_names_.begin();
|
||||
B != user_counter_names_.end();) {
|
||||
Out << ",\"" << *B++ << "\"";
|
||||
}
|
||||
Out << "\n";
|
||||
@@ -68,10 +71,12 @@ void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
|
||||
// check that all the current counters are saved in the name set
|
||||
for (const auto& run : reports) {
|
||||
for (const auto& cnt : run.counters) {
|
||||
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
|
||||
continue;
|
||||
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
|
||||
<< "All counters must be present in each run. "
|
||||
<< "Counter named \"" << cnt.first
|
||||
<< "\" was not in a run after being added to the header";
|
||||
<< "All counters must be present in each run. "
|
||||
<< "Counter named \"" << cnt.first
|
||||
<< "\" was not in a run after being added to the header";
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -80,15 +85,14 @@ void CSVReporter::ReportRuns(const std::vector<Run> & reports) {
|
||||
for (const auto& run : reports) {
|
||||
PrintRunData(run);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void CSVReporter::PrintRunData(const Run & run) {
|
||||
void CSVReporter::PrintRunData(const Run& run) {
|
||||
std::ostream& Out = GetOutputStream();
|
||||
|
||||
// Field with embedded double-quote characters must be doubled and the field
|
||||
// delimited with double-quotes.
|
||||
std::string name = run.benchmark_name;
|
||||
std::string name = run.benchmark_name();
|
||||
ReplaceAll(&name, "\"", "\"\"");
|
||||
Out << '"' << name << "\",";
|
||||
if (run.error_occurred) {
|
||||
@@ -117,12 +121,12 @@ void CSVReporter::PrintRunData(const Run & run) {
|
||||
}
|
||||
Out << ",";
|
||||
|
||||
if (run.bytes_per_second > 0.0) {
|
||||
Out << run.bytes_per_second;
|
||||
if (run.counters.find("bytes_per_second") != run.counters.end()) {
|
||||
Out << run.counters.at("bytes_per_second");
|
||||
}
|
||||
Out << ",";
|
||||
if (run.items_per_second > 0.0) {
|
||||
Out << run.items_per_second;
|
||||
if (run.counters.find("items_per_second") != run.counters.end()) {
|
||||
Out << run.counters.at("items_per_second");
|
||||
}
|
||||
Out << ",";
|
||||
if (!run.report_label.empty()) {
|
||||
@@ -135,9 +139,9 @@ void CSVReporter::PrintRunData(const Run & run) {
|
||||
Out << ",,"; // for error_occurred and error_message
|
||||
|
||||
// Print user counters
|
||||
for (const auto &ucn : user_counter_names_) {
|
||||
for (const auto& ucn : user_counter_names_) {
|
||||
auto it = run.counters.find(ucn);
|
||||
if(it == run.counters.end()) {
|
||||
if (it == run.counters.end()) {
|
||||
Out << ",";
|
||||
} else {
|
||||
Out << "," << it->second;
|
||||
|
||||
@@ -41,7 +41,7 @@ extern "C" uint64_t __rdtsc();
|
||||
#pragma intrinsic(__rdtsc)
|
||||
#endif
|
||||
|
||||
#ifndef BENCHMARK_OS_WINDOWS
|
||||
#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW)
|
||||
#include <sys/time.h>
|
||||
#include <time.h>
|
||||
#endif
|
||||
@@ -121,7 +121,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
|
||||
// because is provides nanosecond resolution (which is noticable at
|
||||
// least for PNaCl modules running on x86 Mac & Linux).
|
||||
// Initialize to always return 0 if clock_gettime fails.
|
||||
struct timespec ts = { 0, 0 };
|
||||
struct timespec ts = {0, 0};
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
|
||||
#elif defined(__aarch64__)
|
||||
@@ -159,6 +159,11 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, nullptr);
|
||||
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
|
||||
#elif defined(__s390__) // Covers both s390 and s390x.
|
||||
// Return the CPU clock.
|
||||
uint64_t tsc;
|
||||
asm("stck %0" : "=Q"(tsc) : : "cc");
|
||||
return tsc;
|
||||
#else
|
||||
// The soft failover to a generic implementation is automatic only for ARM.
|
||||
// For other platforms the developer is expected to make an attempt to create
|
||||
|
||||
@@ -3,12 +3,14 @@
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
|
||||
/* Needed to detect STL */
|
||||
#include <cstdlib>
|
||||
|
||||
// clang-format off
|
||||
|
||||
#ifndef __has_feature
|
||||
#define __has_feature(x) 0
|
||||
#endif
|
||||
#ifndef __has_builtin
|
||||
#define __has_builtin(x) 0
|
||||
#endif
|
||||
|
||||
#if defined(__clang__)
|
||||
#if !defined(COMPILER_CLANG)
|
||||
@@ -38,7 +40,11 @@
|
||||
#define BENCHMARK_OS_CYGWIN 1
|
||||
#elif defined(_WIN32)
|
||||
#define BENCHMARK_OS_WINDOWS 1
|
||||
#if defined(__MINGW32__)
|
||||
#define BENCHMARK_OS_MINGW 1
|
||||
#endif
|
||||
#elif defined(__APPLE__)
|
||||
#define BENCHMARK_OS_APPLE 1
|
||||
#include "TargetConditionals.h"
|
||||
#if defined(TARGET_OS_MAC)
|
||||
#define BENCHMARK_OS_MACOSX 1
|
||||
@@ -50,14 +56,24 @@
|
||||
#define BENCHMARK_OS_FREEBSD 1
|
||||
#elif defined(__NetBSD__)
|
||||
#define BENCHMARK_OS_NETBSD 1
|
||||
#elif defined(__OpenBSD__)
|
||||
#define BENCHMARK_OS_OPENBSD 1
|
||||
#elif defined(__linux__)
|
||||
#define BENCHMARK_OS_LINUX 1
|
||||
#elif defined(__native_client__)
|
||||
#define BENCHMARK_OS_NACL 1
|
||||
#elif defined(EMSCRIPTEN)
|
||||
#elif defined(__EMSCRIPTEN__)
|
||||
#define BENCHMARK_OS_EMSCRIPTEN 1
|
||||
#elif defined(__rtems__)
|
||||
#define BENCHMARK_OS_RTEMS 1
|
||||
#elif defined(__Fuchsia__)
|
||||
#define BENCHMARK_OS_FUCHSIA 1
|
||||
#elif defined (__SVR4) && defined (__sun)
|
||||
#define BENCHMARK_OS_SOLARIS 1
|
||||
#endif
|
||||
|
||||
#if defined(__ANDROID__) && defined(__GLIBCXX__)
|
||||
#define BENCHMARK_STL_ANDROID_GNUSTL 1
|
||||
#endif
|
||||
|
||||
#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \
|
||||
@@ -71,12 +87,6 @@
|
||||
#define BENCHMARK_MAYBE_UNUSED
|
||||
#endif
|
||||
|
||||
#if defined(COMPILER_GCC) || __has_builtin(__builtin_unreachable)
|
||||
#define BENCHMARK_UNREACHABLE() __builtin_unreachable()
|
||||
#elif defined(COMPILER_MSVC)
|
||||
#define BENCHMARK_UNREACHABLE() __assume(false)
|
||||
#else
|
||||
#define BENCHMARK_UNREACHABLE() ((void)0)
|
||||
#endif
|
||||
// clang-format on
|
||||
|
||||
#endif // BENCHMARK_INTERNAL_MACROS_H_
|
||||
|
||||
@@ -17,12 +17,12 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <iomanip> // for setprecision
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <iomanip> // for setprecision
|
||||
#include <limits>
|
||||
|
||||
#include "string_util.h"
|
||||
#include "timers.h"
|
||||
@@ -32,15 +32,15 @@ namespace benchmark {
|
||||
namespace {
|
||||
|
||||
std::string FormatKV(std::string const& key, std::string const& value) {
|
||||
return StringPrintF("\"%s\": \"%s\"", key.c_str(), value.c_str());
|
||||
return StrFormat("\"%s\": \"%s\"", key.c_str(), value.c_str());
|
||||
}
|
||||
|
||||
std::string FormatKV(std::string const& key, const char* value) {
|
||||
return StringPrintF("\"%s\": \"%s\"", key.c_str(), value);
|
||||
return StrFormat("\"%s\": \"%s\"", key.c_str(), value);
|
||||
}
|
||||
|
||||
std::string FormatKV(std::string const& key, bool value) {
|
||||
return StringPrintF("\"%s\": %s", key.c_str(), value ? "true" : "false");
|
||||
return StrFormat("\"%s\": %s", key.c_str(), value ? "true" : "false");
|
||||
}
|
||||
|
||||
std::string FormatKV(std::string const& key, int64_t value) {
|
||||
@@ -53,7 +53,7 @@ std::string FormatKV(std::string const& key, double value) {
|
||||
std::stringstream ss;
|
||||
ss << '"' << key << "\": ";
|
||||
|
||||
const auto max_digits10 = std::numeric_limits<decltype (value)>::max_digits10;
|
||||
const auto max_digits10 = std::numeric_limits<decltype(value)>::max_digits10;
|
||||
const auto max_fractional_digits10 = max_digits10 - 1;
|
||||
|
||||
ss << std::scientific << std::setprecision(max_fractional_digits10) << value;
|
||||
@@ -77,6 +77,15 @@ bool JSONReporter::ReportContext(const Context& context) {
|
||||
std::string walltime_value = LocalDateTimeString();
|
||||
out << indent << FormatKV("date", walltime_value) << ",\n";
|
||||
|
||||
if (Context::executable_name) {
|
||||
// windows uses backslash for its path separator,
|
||||
// which must be escaped in JSON otherwise it blows up conforming JSON
|
||||
// decoders
|
||||
std::string executable_name = Context::executable_name;
|
||||
ReplaceAll(&executable_name, "\\", "\\\\");
|
||||
out << indent << FormatKV("executable", executable_name) << ",\n";
|
||||
}
|
||||
|
||||
CPUInfo const& info = context.cpu_info;
|
||||
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus))
|
||||
<< ",\n";
|
||||
@@ -107,6 +116,12 @@ bool JSONReporter::ReportContext(const Context& context) {
|
||||
}
|
||||
indent = std::string(4, ' ');
|
||||
out << indent << "],\n";
|
||||
out << indent << "\"load_avg\": [";
|
||||
for (auto it = info.load_avg.begin(); it != info.load_avg.end();) {
|
||||
out << *it++;
|
||||
if (it != info.load_avg.end()) out << ",";
|
||||
}
|
||||
out << "],\n";
|
||||
|
||||
#if defined(NDEBUG)
|
||||
const char build_type[] = "release";
|
||||
@@ -150,52 +165,54 @@ void JSONReporter::Finalize() {
|
||||
void JSONReporter::PrintRunData(Run const& run) {
|
||||
std::string indent(6, ' ');
|
||||
std::ostream& out = GetOutputStream();
|
||||
out << indent << FormatKV("name", run.benchmark_name) << ",\n";
|
||||
out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
|
||||
out << indent << FormatKV("run_name", run.run_name) << ",\n";
|
||||
out << indent << FormatKV("run_type", [&run]() -> const char* {
|
||||
switch (run.run_type) {
|
||||
case BenchmarkReporter::Run::RT_Iteration:
|
||||
return "iteration";
|
||||
case BenchmarkReporter::Run::RT_Aggregate:
|
||||
return "aggregate";
|
||||
}
|
||||
BENCHMARK_UNREACHABLE();
|
||||
}()) << ",\n";
|
||||
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) {
|
||||
out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
|
||||
}
|
||||
if (run.error_occurred) {
|
||||
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
|
||||
out << indent << FormatKV("error_message", run.error_message) << ",\n";
|
||||
}
|
||||
if (!run.report_big_o && !run.report_rms) {
|
||||
out << indent << FormatKV("iterations", run.iterations) << ",\n";
|
||||
out << indent
|
||||
<< FormatKV("real_time", run.GetAdjustedRealTime())
|
||||
<< ",\n";
|
||||
out << indent
|
||||
<< FormatKV("cpu_time", run.GetAdjustedCPUTime());
|
||||
out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n";
|
||||
out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
|
||||
out << ",\n"
|
||||
<< indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
|
||||
} else if (run.report_big_o) {
|
||||
out << indent
|
||||
<< FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
|
||||
out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime())
|
||||
<< ",\n";
|
||||
out << indent
|
||||
<< FormatKV("real_coefficient", run.GetAdjustedRealTime())
|
||||
out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime())
|
||||
<< ",\n";
|
||||
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
|
||||
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
|
||||
} else if (run.report_rms) {
|
||||
out << indent
|
||||
<< FormatKV("rms", run.GetAdjustedCPUTime());
|
||||
out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
|
||||
}
|
||||
if (run.bytes_per_second > 0.0) {
|
||||
out << ",\n"
|
||||
<< indent
|
||||
<< FormatKV("bytes_per_second", run.bytes_per_second);
|
||||
|
||||
for (auto& c : run.counters) {
|
||||
out << ",\n" << indent << FormatKV(c.first, c.second);
|
||||
}
|
||||
if (run.items_per_second > 0.0) {
|
||||
out << ",\n"
|
||||
<< indent
|
||||
<< FormatKV("items_per_second", run.items_per_second);
|
||||
}
|
||||
for(auto &c : run.counters) {
|
||||
out << ",\n"
|
||||
<< indent
|
||||
<< FormatKV(c.first, c.second);
|
||||
|
||||
if (run.has_memory_result) {
|
||||
out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
|
||||
out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used);
|
||||
}
|
||||
|
||||
if (!run.report_label.empty()) {
|
||||
out << ",\n" << indent << FormatKV("label", run.report_label);
|
||||
}
|
||||
out << '\n';
|
||||
}
|
||||
|
||||
} // end namespace benchmark
|
||||
} // end namespace benchmark
|
||||
|
||||
@@ -66,8 +66,9 @@ inline LogType& GetLogInstanceForLevel(int level) {
|
||||
} // end namespace internal
|
||||
} // end namespace benchmark
|
||||
|
||||
// clang-format off
|
||||
#define VLOG(x) \
|
||||
(::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \
|
||||
" ")
|
||||
|
||||
// clang-format on
|
||||
#endif
|
||||
|
||||
@@ -17,22 +17,39 @@
|
||||
|
||||
#include "internal_macros.h"
|
||||
|
||||
// clang-format off
|
||||
|
||||
#if !defined(HAVE_STD_REGEX) && \
|
||||
!defined(HAVE_GNU_POSIX_REGEX) && \
|
||||
!defined(HAVE_POSIX_REGEX)
|
||||
// No explicit regex selection; detect based on builtin hints.
|
||||
#if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE)
|
||||
#define HAVE_POSIX_REGEX 1
|
||||
#elif __cplusplus >= 199711L
|
||||
#define HAVE_STD_REGEX 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Prefer C regex libraries when compiling w/o exceptions so that we can
|
||||
// correctly report errors.
|
||||
#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && defined(HAVE_STD_REGEX) && \
|
||||
#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \
|
||||
defined(BENCHMARK_HAVE_STD_REGEX) && \
|
||||
(defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
|
||||
#undef HAVE_STD_REGEX
|
||||
#undef HAVE_STD_REGEX
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_STD_REGEX)
|
||||
#include <regex>
|
||||
#include <regex>
|
||||
#elif defined(HAVE_GNU_POSIX_REGEX)
|
||||
#include <gnuregex.h>
|
||||
#include <gnuregex.h>
|
||||
#elif defined(HAVE_POSIX_REGEX)
|
||||
#include <regex.h>
|
||||
#include <regex.h>
|
||||
#else
|
||||
#error No regular expression backend was found!
|
||||
#endif
|
||||
|
||||
// clang-format on
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "check.h"
|
||||
@@ -72,20 +89,21 @@ class Regex {
|
||||
|
||||
inline bool Regex::Init(const std::string& spec, std::string* error) {
|
||||
#ifdef BENCHMARK_HAS_NO_EXCEPTIONS
|
||||
((void)error); // suppress unused warning
|
||||
((void)error); // suppress unused warning
|
||||
#else
|
||||
try {
|
||||
#endif
|
||||
re_ = std::regex(spec, std::regex_constants::extended);
|
||||
init_ = true;
|
||||
re_ = std::regex(spec, std::regex_constants::extended);
|
||||
init_ = true;
|
||||
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
|
||||
} catch (const std::regex_error& e) {
|
||||
if (error) {
|
||||
*error = e.what();
|
||||
}
|
||||
}
|
||||
catch (const std::regex_error& e) {
|
||||
if (error) {
|
||||
*error = e.what();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return init_;
|
||||
return init_;
|
||||
}
|
||||
|
||||
inline Regex::~Regex() {}
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "check.h"
|
||||
#include "string_util.h"
|
||||
|
||||
namespace benchmark {
|
||||
|
||||
@@ -37,6 +38,9 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
|
||||
|
||||
Out << LocalDateTimeString() << "\n";
|
||||
|
||||
if (context.executable_name)
|
||||
Out << "Running " << context.executable_name << "\n";
|
||||
|
||||
const CPUInfo &info = context.cpu_info;
|
||||
Out << "Run on (" << info.num_cpus << " X "
|
||||
<< (info.cycles_per_second / 1000000.0) << " MHz CPU "
|
||||
@@ -51,6 +55,14 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
|
||||
Out << "\n";
|
||||
}
|
||||
}
|
||||
if (!info.load_avg.empty()) {
|
||||
Out << "Load Average: ";
|
||||
for (auto It = info.load_avg.begin(); It != info.load_avg.end();) {
|
||||
Out << StrFormat("%.2f", *It++);
|
||||
if (It != info.load_avg.end()) Out << ", ";
|
||||
}
|
||||
Out << "\n";
|
||||
}
|
||||
|
||||
if (info.scaling_enabled) {
|
||||
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
|
||||
@@ -64,8 +76,19 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
|
||||
#endif
|
||||
}
|
||||
|
||||
// No initializer because it's already initialized to NULL.
|
||||
const char *BenchmarkReporter::Context::executable_name;
|
||||
|
||||
BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {}
|
||||
|
||||
std::string BenchmarkReporter::Run::benchmark_name() const {
|
||||
std::string name = run_name;
|
||||
if (run_type == RT_Aggregate) {
|
||||
name += "_" + aggregate_name;
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
double BenchmarkReporter::Run::GetAdjustedRealTime() const {
|
||||
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
|
||||
if (iterations != 0) new_time /= static_cast<double>(iterations);
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
#include "internal_macros.h"
|
||||
|
||||
#ifdef BENCHMARK_OS_WINDOWS
|
||||
#include <Windows.h>
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
namespace benchmark {
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <numeric>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <numeric>
|
||||
#include "check.h"
|
||||
#include "statistics.h"
|
||||
|
||||
@@ -30,22 +30,25 @@ auto StatisticsSum = [](const std::vector<double>& v) {
|
||||
};
|
||||
|
||||
double StatisticsMean(const std::vector<double>& v) {
|
||||
if (v.size() == 0) return 0.0;
|
||||
if (v.empty()) return 0.0;
|
||||
return StatisticsSum(v) * (1.0 / v.size());
|
||||
}
|
||||
|
||||
double StatisticsMedian(const std::vector<double>& v) {
|
||||
if (v.size() < 3) return StatisticsMean(v);
|
||||
std::vector<double> partial;
|
||||
// we need roundDown(count/2)+1 slots
|
||||
partial.resize(1 + (v.size() / 2));
|
||||
std::partial_sort_copy(v.begin(), v.end(), partial.begin(), partial.end());
|
||||
// did we have odd number of samples?
|
||||
// if yes, then the last element of partially-sorted vector is the median
|
||||
// it no, then the average of the last two elements is the median
|
||||
if(v.size() % 2 == 1)
|
||||
return partial.back();
|
||||
return (partial[partial.size() - 2] + partial[partial.size() - 1]) / 2.0;
|
||||
std::vector<double> copy(v);
|
||||
|
||||
auto center = copy.begin() + v.size() / 2;
|
||||
std::nth_element(copy.begin(), center, copy.end());
|
||||
|
||||
// did we have an odd number of samples?
|
||||
// if yes, then center is the median
|
||||
// it no, then we are looking for the average between center and the value
|
||||
// before
|
||||
if (v.size() % 2 == 1) return *center;
|
||||
auto center2 = copy.begin() + v.size() / 2 - 1;
|
||||
std::nth_element(copy.begin(), center2, copy.end());
|
||||
return (*center + *center2) / 2.0;
|
||||
}
|
||||
|
||||
// Return the sum of the squares of this sample set
|
||||
@@ -62,11 +65,10 @@ auto Sqrt = [](const double dat) {
|
||||
|
||||
double StatisticsStdDev(const std::vector<double>& v) {
|
||||
const auto mean = StatisticsMean(v);
|
||||
if (v.size() == 0) return mean;
|
||||
if (v.empty()) return mean;
|
||||
|
||||
// Sample standard deviation is undefined for n = 1
|
||||
if (v.size() == 1)
|
||||
return 0.0;
|
||||
if (v.size() == 1) return 0.0;
|
||||
|
||||
const double avg_squares = SumSquares(v) * (1.0 / v.size());
|
||||
return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
|
||||
@@ -89,13 +91,9 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
// Accumulators.
|
||||
std::vector<double> real_accumulated_time_stat;
|
||||
std::vector<double> cpu_accumulated_time_stat;
|
||||
std::vector<double> bytes_per_second_stat;
|
||||
std::vector<double> items_per_second_stat;
|
||||
|
||||
real_accumulated_time_stat.reserve(reports.size());
|
||||
cpu_accumulated_time_stat.reserve(reports.size());
|
||||
bytes_per_second_stat.reserve(reports.size());
|
||||
items_per_second_stat.reserve(reports.size());
|
||||
|
||||
// All repetitions should be run with the same number of iterations so we
|
||||
// can take this information from the first benchmark.
|
||||
@@ -105,11 +103,11 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
Counter c;
|
||||
std::vector<double> s;
|
||||
};
|
||||
std::map< std::string, CounterStat > counter_stats;
|
||||
for(Run const& r : reports) {
|
||||
for(auto const& cnt : r.counters) {
|
||||
std::map<std::string, CounterStat> counter_stats;
|
||||
for (Run const& r : reports) {
|
||||
for (auto const& cnt : r.counters) {
|
||||
auto it = counter_stats.find(cnt.first);
|
||||
if(it == counter_stats.end()) {
|
||||
if (it == counter_stats.end()) {
|
||||
counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
|
||||
it = counter_stats.find(cnt.first);
|
||||
it->second.s.reserve(reports.size());
|
||||
@@ -121,15 +119,13 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
|
||||
// Populate the accumulators.
|
||||
for (Run const& run : reports) {
|
||||
CHECK_EQ(reports[0].benchmark_name, run.benchmark_name);
|
||||
CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
|
||||
CHECK_EQ(run_iterations, run.iterations);
|
||||
if (run.error_occurred) continue;
|
||||
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
|
||||
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
|
||||
items_per_second_stat.emplace_back(run.items_per_second);
|
||||
bytes_per_second_stat.emplace_back(run.bytes_per_second);
|
||||
// user counters
|
||||
for(auto const& cnt : run.counters) {
|
||||
for (auto const& cnt : run.counters) {
|
||||
auto it = counter_stats.find(cnt.first);
|
||||
CHECK_NE(it, counter_stats.end());
|
||||
it->second.s.emplace_back(cnt.second);
|
||||
@@ -145,24 +141,43 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
|
||||
}
|
||||
}
|
||||
|
||||
for(const auto& Stat : *reports[0].statistics) {
|
||||
const double iteration_rescale_factor =
|
||||
double(reports.size()) / double(run_iterations);
|
||||
|
||||
for (const auto& Stat : *reports[0].statistics) {
|
||||
// Get the data from the accumulator to BenchmarkReporter::Run's.
|
||||
Run data;
|
||||
data.benchmark_name = reports[0].benchmark_name + "_" + Stat.name_;
|
||||
data.run_name = reports[0].benchmark_name();
|
||||
data.run_type = BenchmarkReporter::Run::RT_Aggregate;
|
||||
data.aggregate_name = Stat.name_;
|
||||
data.report_label = report_label;
|
||||
data.iterations = run_iterations;
|
||||
|
||||
// It is incorrect to say that an aggregate is computed over
|
||||
// run's iterations, because those iterations already got averaged.
|
||||
// Similarly, if there are N repetitions with 1 iterations each,
|
||||
// an aggregate will be computed over N measurements, not 1.
|
||||
// Thus it is best to simply use the count of separate reports.
|
||||
data.iterations = reports.size();
|
||||
|
||||
data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
|
||||
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
|
||||
data.bytes_per_second = Stat.compute_(bytes_per_second_stat);
|
||||
data.items_per_second = Stat.compute_(items_per_second_stat);
|
||||
|
||||
// We will divide these times by data.iterations when reporting, but the
|
||||
// data.iterations is not nessesairly the scale of these measurements,
|
||||
// because in each repetition, these timers are sum over all the iterations.
|
||||
// And if we want to say that the stats are over N repetitions and not
|
||||
// M iterations, we need to multiply these by (N/M).
|
||||
data.real_accumulated_time *= iteration_rescale_factor;
|
||||
data.cpu_accumulated_time *= iteration_rescale_factor;
|
||||
|
||||
data.time_unit = reports[0].time_unit;
|
||||
|
||||
// user counters
|
||||
for(auto const& kv : counter_stats) {
|
||||
for (auto const& kv : counter_stats) {
|
||||
// Do *NOT* rescale the custom counters. They are already properly scaled.
|
||||
const auto uc_stat = Stat.compute_(kv.second.s);
|
||||
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags);
|
||||
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags,
|
||||
counter_stats[kv.first].c.oneK);
|
||||
data.counters[kv.first] = c;
|
||||
}
|
||||
|
||||
|
||||
@@ -122,7 +122,7 @@ std::string HumanReadableNumber(double n, double one_k) {
|
||||
return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
|
||||
}
|
||||
|
||||
std::string StringPrintFImp(const char* msg, va_list args) {
|
||||
std::string StrFormatImp(const char* msg, va_list args) {
|
||||
// we might need a second shot at this, so pre-emptivly make a copy
|
||||
va_list args_cp;
|
||||
va_copy(args_cp, args);
|
||||
@@ -152,10 +152,10 @@ std::string StringPrintFImp(const char* msg, va_list args) {
|
||||
return std::string(buff_ptr.get());
|
||||
}
|
||||
|
||||
std::string StringPrintF(const char* format, ...) {
|
||||
std::string StrFormat(const char* format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
std::string tmp = StringPrintFImp(format, args);
|
||||
std::string tmp = StrFormatImp(format, args);
|
||||
va_end(args);
|
||||
return tmp;
|
||||
}
|
||||
@@ -169,4 +169,93 @@ void ReplaceAll(std::string* str, const std::string& from,
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
|
||||
/*
|
||||
* GNU STL in Android NDK lacks support for some C++11 functions, including
|
||||
* stoul, stoi, stod. We reimplement them here using C functions strtoul,
|
||||
* strtol, strtod. Note that reimplemented functions are in benchmark::
|
||||
* namespace, not std:: namespace.
|
||||
*/
|
||||
unsigned long stoul(const std::string& str, size_t* pos, int base) {
|
||||
/* Record previous errno */
|
||||
const int oldErrno = errno;
|
||||
errno = 0;
|
||||
|
||||
const char* strStart = str.c_str();
|
||||
char* strEnd = const_cast<char*>(strStart);
|
||||
const unsigned long result = strtoul(strStart, &strEnd, base);
|
||||
|
||||
const int strtoulErrno = errno;
|
||||
/* Restore previous errno */
|
||||
errno = oldErrno;
|
||||
|
||||
/* Check for errors and return */
|
||||
if (strtoulErrno == ERANGE) {
|
||||
throw std::out_of_range(
|
||||
"stoul failed: " + str + " is outside of range of unsigned long");
|
||||
} else if (strEnd == strStart || strtoulErrno != 0) {
|
||||
throw std::invalid_argument(
|
||||
"stoul failed: " + str + " is not an integer");
|
||||
}
|
||||
if (pos != nullptr) {
|
||||
*pos = static_cast<size_t>(strEnd - strStart);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
int stoi(const std::string& str, size_t* pos, int base) {
|
||||
/* Record previous errno */
|
||||
const int oldErrno = errno;
|
||||
errno = 0;
|
||||
|
||||
const char* strStart = str.c_str();
|
||||
char* strEnd = const_cast<char*>(strStart);
|
||||
const long result = strtol(strStart, &strEnd, base);
|
||||
|
||||
const int strtolErrno = errno;
|
||||
/* Restore previous errno */
|
||||
errno = oldErrno;
|
||||
|
||||
/* Check for errors and return */
|
||||
if (strtolErrno == ERANGE || long(int(result)) != result) {
|
||||
throw std::out_of_range(
|
||||
"stoul failed: " + str + " is outside of range of int");
|
||||
} else if (strEnd == strStart || strtolErrno != 0) {
|
||||
throw std::invalid_argument(
|
||||
"stoul failed: " + str + " is not an integer");
|
||||
}
|
||||
if (pos != nullptr) {
|
||||
*pos = static_cast<size_t>(strEnd - strStart);
|
||||
}
|
||||
return int(result);
|
||||
}
|
||||
|
||||
double stod(const std::string& str, size_t* pos) {
|
||||
/* Record previous errno */
|
||||
const int oldErrno = errno;
|
||||
errno = 0;
|
||||
|
||||
const char* strStart = str.c_str();
|
||||
char* strEnd = const_cast<char*>(strStart);
|
||||
const double result = strtod(strStart, &strEnd);
|
||||
|
||||
/* Restore previous errno */
|
||||
const int strtodErrno = errno;
|
||||
errno = oldErrno;
|
||||
|
||||
/* Check for errors and return */
|
||||
if (strtodErrno == ERANGE) {
|
||||
throw std::out_of_range(
|
||||
"stoul failed: " + str + " is outside of range of int");
|
||||
} else if (strEnd == strStart || strtodErrno != 0) {
|
||||
throw std::invalid_argument(
|
||||
"stoul failed: " + str + " is not an integer");
|
||||
}
|
||||
if (pos != nullptr) {
|
||||
*pos = static_cast<size_t>(strEnd - strStart);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // end namespace benchmark
|
||||
|
||||
@@ -12,29 +12,45 @@ void AppendHumanReadable(int n, std::string* str);
|
||||
|
||||
std::string HumanReadableNumber(double n, double one_k = 1024.0);
|
||||
|
||||
std::string StringPrintF(const char* format, ...);
|
||||
std::string StrFormat(const char* format, ...);
|
||||
|
||||
inline std::ostream& StringCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
|
||||
inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT {
|
||||
return out;
|
||||
}
|
||||
|
||||
template <class First, class... Rest>
|
||||
inline std::ostream& StringCatImp(std::ostream& out, First&& f,
|
||||
Rest&&... rest) {
|
||||
inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) {
|
||||
out << std::forward<First>(f);
|
||||
return StringCatImp(out, std::forward<Rest>(rest)...);
|
||||
return StrCatImp(out, std::forward<Rest>(rest)...);
|
||||
}
|
||||
|
||||
template <class... Args>
|
||||
inline std::string StrCat(Args&&... args) {
|
||||
std::ostringstream ss;
|
||||
StringCatImp(ss, std::forward<Args>(args)...);
|
||||
StrCatImp(ss, std::forward<Args>(args)...);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
void ReplaceAll(std::string* str, const std::string& from,
|
||||
const std::string& to);
|
||||
|
||||
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
|
||||
/*
|
||||
* GNU STL in Android NDK lacks support for some C++11 functions, including
|
||||
* stoul, stoi, stod. We reimplement them here using C functions strtoul,
|
||||
* strtol, strtod. Note that reimplemented functions are in benchmark::
|
||||
* namespace, not std:: namespace.
|
||||
*/
|
||||
unsigned long stoul(const std::string& str, size_t* pos = nullptr,
|
||||
int base = 10);
|
||||
int stoi(const std::string& str, size_t* pos = nullptr, int base = 10);
|
||||
double stod(const std::string& str, size_t* pos = nullptr);
|
||||
#else
|
||||
using std::stoul;
|
||||
using std::stoi;
|
||||
using std::stod;
|
||||
#endif
|
||||
|
||||
} // end namespace benchmark
|
||||
|
||||
#endif // BENCHMARK_STRING_UTIL_H_
|
||||
|
||||
@@ -15,21 +15,27 @@
|
||||
#include "internal_macros.h"
|
||||
|
||||
#ifdef BENCHMARK_OS_WINDOWS
|
||||
#include <Shlwapi.h>
|
||||
#include <VersionHelpers.h>
|
||||
#include <Windows.h>
|
||||
#include <shlwapi.h>
|
||||
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
|
||||
#include <versionhelpers.h>
|
||||
#include <windows.h>
|
||||
#else
|
||||
#include <fcntl.h>
|
||||
#ifndef BENCHMARK_OS_FUCHSIA
|
||||
#include <sys/resource.h>
|
||||
#endif
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
|
||||
#include <unistd.h>
|
||||
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \
|
||||
defined BENCHMARK_OS_NETBSD
|
||||
defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD
|
||||
#define BENCHMARK_HAS_SYSCTL
|
||||
#include <sys/sysctl.h>
|
||||
#endif
|
||||
#endif
|
||||
#if defined(BENCHMARK_OS_SOLARIS)
|
||||
#include <kstat.h>
|
||||
#endif
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
@@ -130,6 +136,26 @@ struct ValueUnion {
|
||||
};
|
||||
|
||||
ValueUnion GetSysctlImp(std::string const& Name) {
|
||||
#if defined BENCHMARK_OS_OPENBSD
|
||||
int mib[2];
|
||||
|
||||
mib[0] = CTL_HW;
|
||||
if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){
|
||||
ValueUnion buff(sizeof(int));
|
||||
|
||||
if (Name == "hw.ncpu") {
|
||||
mib[1] = HW_NCPU;
|
||||
} else {
|
||||
mib[1] = HW_CPUSPEED;
|
||||
}
|
||||
|
||||
if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1) {
|
||||
return ValueUnion();
|
||||
}
|
||||
return buff;
|
||||
}
|
||||
return ValueUnion();
|
||||
#else
|
||||
size_t CurBuffSize = 0;
|
||||
if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1)
|
||||
return ValueUnion();
|
||||
@@ -138,6 +164,7 @@ ValueUnion GetSysctlImp(std::string const& Name) {
|
||||
if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0)
|
||||
return buff;
|
||||
return ValueUnion();
|
||||
#endif
|
||||
}
|
||||
|
||||
BENCHMARK_MAYBE_UNUSED
|
||||
@@ -198,7 +225,7 @@ int CountSetBitsInCPUMap(std::string Val) {
|
||||
auto CountBits = [](std::string Part) {
|
||||
using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>;
|
||||
Part = "0x" + Part;
|
||||
CPUMask Mask(std::stoul(Part, nullptr, 16));
|
||||
CPUMask Mask(benchmark::stoul(Part, nullptr, 16));
|
||||
return static_cast<int>(Mask.count());
|
||||
};
|
||||
size_t Pos;
|
||||
@@ -261,7 +288,7 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX() {
|
||||
std::string name;
|
||||
std::string type;
|
||||
int level;
|
||||
size_t num_sharing;
|
||||
uint64_t num_sharing;
|
||||
} Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]},
|
||||
{"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
|
||||
{"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
|
||||
@@ -303,7 +330,7 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
|
||||
if (!B.test(0)) continue;
|
||||
CInfo* Cache = &it->Cache;
|
||||
CPUInfo::CacheInfo C;
|
||||
C.num_sharing = B.count();
|
||||
C.num_sharing = static_cast<int>(B.count());
|
||||
C.level = Cache->Level;
|
||||
C.size = Cache->Size;
|
||||
switch (Cache->Type) {
|
||||
@@ -354,6 +381,15 @@ int GetNumCPUs() {
|
||||
return sysinfo.dwNumberOfProcessors; // number of logical
|
||||
// processors in the current
|
||||
// group
|
||||
#elif defined(BENCHMARK_OS_SOLARIS)
|
||||
// Returns -1 in case of a failure.
|
||||
int NumCPU = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
if (NumCPU < 0) {
|
||||
fprintf(stderr,
|
||||
"sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n",
|
||||
strerror(errno));
|
||||
}
|
||||
return NumCPU;
|
||||
#else
|
||||
int NumCPUs = 0;
|
||||
int MaxID = -1;
|
||||
@@ -368,11 +404,17 @@ int GetNumCPUs() {
|
||||
if (ln.empty()) continue;
|
||||
size_t SplitIdx = ln.find(':');
|
||||
std::string value;
|
||||
#if defined(__s390__)
|
||||
// s390 has another format in /proc/cpuinfo
|
||||
// it needs to be parsed differently
|
||||
if (SplitIdx != std::string::npos) value = ln.substr(Key.size()+1,SplitIdx-Key.size()-1);
|
||||
#else
|
||||
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
|
||||
#endif
|
||||
if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) {
|
||||
NumCPUs++;
|
||||
if (!value.empty()) {
|
||||
int CurID = std::stoi(value);
|
||||
int CurID = benchmark::stoi(value);
|
||||
MaxID = std::max(CurID, MaxID);
|
||||
}
|
||||
}
|
||||
@@ -441,16 +483,16 @@ double GetCPUCyclesPerSecond() {
|
||||
std::string value;
|
||||
if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1);
|
||||
// When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
|
||||
// accept postive values. Some environments (virtual machines) report zero,
|
||||
// accept positive values. Some environments (virtual machines) report zero,
|
||||
// which would cause infinite looping in WallTime_Init.
|
||||
if (startsWithKey(ln, "cpu MHz")) {
|
||||
if (!value.empty()) {
|
||||
double cycles_per_second = std::stod(value) * 1000000.0;
|
||||
double cycles_per_second = benchmark::stod(value) * 1000000.0;
|
||||
if (cycles_per_second > 0) return cycles_per_second;
|
||||
}
|
||||
} else if (startsWithKey(ln, "bogomips")) {
|
||||
if (!value.empty()) {
|
||||
bogo_clock = std::stod(value) * 1000000.0;
|
||||
bogo_clock = benchmark::stod(value) * 1000000.0;
|
||||
if (bogo_clock < 0.0) bogo_clock = error_value;
|
||||
}
|
||||
}
|
||||
@@ -473,12 +515,17 @@ double GetCPUCyclesPerSecond() {
|
||||
constexpr auto* FreqStr =
|
||||
#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD)
|
||||
"machdep.tsc_freq";
|
||||
#elif defined BENCHMARK_OS_OPENBSD
|
||||
"hw.cpuspeed";
|
||||
#else
|
||||
"hw.cpufrequency";
|
||||
#endif
|
||||
unsigned long long hz = 0;
|
||||
#if defined BENCHMARK_OS_OPENBSD
|
||||
if (GetSysctl(FreqStr, &hz)) return hz * 1000000;
|
||||
#else
|
||||
if (GetSysctl(FreqStr, &hz)) return hz;
|
||||
|
||||
#endif
|
||||
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
|
||||
FreqStr, strerror(errno));
|
||||
|
||||
@@ -493,6 +540,35 @@ double GetCPUCyclesPerSecond() {
|
||||
"~MHz", nullptr, &data, &data_size)))
|
||||
return static_cast<double>((int64_t)data *
|
||||
(int64_t)(1000 * 1000)); // was mhz
|
||||
#elif defined (BENCHMARK_OS_SOLARIS)
|
||||
kstat_ctl_t *kc = kstat_open();
|
||||
if (!kc) {
|
||||
std::cerr << "failed to open /dev/kstat\n";
|
||||
return -1;
|
||||
}
|
||||
kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0");
|
||||
if (!ksp) {
|
||||
std::cerr << "failed to lookup in /dev/kstat\n";
|
||||
return -1;
|
||||
}
|
||||
if (kstat_read(kc, ksp, NULL) < 0) {
|
||||
std::cerr << "failed to read from /dev/kstat\n";
|
||||
return -1;
|
||||
}
|
||||
kstat_named_t *knp =
|
||||
(kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz");
|
||||
if (!knp) {
|
||||
std::cerr << "failed to lookup data in /dev/kstat\n";
|
||||
return -1;
|
||||
}
|
||||
if (knp->data_type != KSTAT_DATA_UINT64) {
|
||||
std::cerr << "current_clock_Hz is of unexpected data type: "
|
||||
<< knp->data_type << "\n";
|
||||
return -1;
|
||||
}
|
||||
double clock_hz = knp->value.ui64;
|
||||
kstat_close(kc);
|
||||
return clock_hz;
|
||||
#endif
|
||||
// If we've fallen through, attempt to roughly estimate the CPU clock rate.
|
||||
const int estimate_time_ms = 1000;
|
||||
@@ -501,6 +577,24 @@ double GetCPUCyclesPerSecond() {
|
||||
return static_cast<double>(cycleclock::Now() - start_ticks);
|
||||
}
|
||||
|
||||
std::vector<double> GetLoadAvg() {
|
||||
#if defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
|
||||
defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
|
||||
defined BENCHMARK_OS_OPENBSD
|
||||
constexpr int kMaxSamples = 3;
|
||||
std::vector<double> res(kMaxSamples, 0.0);
|
||||
const int nelem = getloadavg(res.data(), kMaxSamples);
|
||||
if (nelem < 1) {
|
||||
res.clear();
|
||||
} else {
|
||||
res.resize(nelem);
|
||||
}
|
||||
return res;
|
||||
#else
|
||||
return {};
|
||||
#endif
|
||||
}
|
||||
|
||||
} // end namespace
|
||||
|
||||
const CPUInfo& CPUInfo::Get() {
|
||||
@@ -512,6 +606,7 @@ CPUInfo::CPUInfo()
|
||||
: num_cpus(GetNumCPUs()),
|
||||
cycles_per_second(GetCPUCyclesPerSecond()),
|
||||
caches(GetCacheSizes()),
|
||||
scaling_enabled(CpuScalingEnabled(num_cpus)) {}
|
||||
scaling_enabled(CpuScalingEnabled(num_cpus)),
|
||||
load_avg(GetLoadAvg()) {}
|
||||
|
||||
} // end namespace benchmark
|
||||
|
||||
64
utils/google-benchmark/src/thread_manager.h
Normal file
64
utils/google-benchmark/src/thread_manager.h
Normal file
@@ -0,0 +1,64 @@
|
||||
#ifndef BENCHMARK_THREAD_MANAGER_H
|
||||
#define BENCHMARK_THREAD_MANAGER_H
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "mutex.h"
|
||||
|
||||
namespace benchmark {
|
||||
namespace internal {
|
||||
|
||||
class ThreadManager {
|
||||
public:
|
||||
ThreadManager(int num_threads)
|
||||
: alive_threads_(num_threads), start_stop_barrier_(num_threads) {}
|
||||
|
||||
Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) {
|
||||
return benchmark_mutex_;
|
||||
}
|
||||
|
||||
bool StartStopBarrier() EXCLUDES(end_cond_mutex_) {
|
||||
return start_stop_barrier_.wait();
|
||||
}
|
||||
|
||||
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_) {
|
||||
start_stop_barrier_.removeThread();
|
||||
if (--alive_threads_ == 0) {
|
||||
MutexLock lock(end_cond_mutex_);
|
||||
end_condition_.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
void WaitForAllThreads() EXCLUDES(end_cond_mutex_) {
|
||||
MutexLock lock(end_cond_mutex_);
|
||||
end_condition_.wait(lock.native_handle(),
|
||||
[this]() { return alive_threads_ == 0; });
|
||||
}
|
||||
|
||||
public:
|
||||
struct Result {
|
||||
int64_t iterations = 0;
|
||||
double real_time_used = 0;
|
||||
double cpu_time_used = 0;
|
||||
double manual_time_used = 0;
|
||||
int64_t complexity_n = 0;
|
||||
std::string report_label_;
|
||||
std::string error_message_;
|
||||
bool has_error_ = false;
|
||||
UserCounters counters;
|
||||
};
|
||||
GUARDED_BY(GetBenchmarkMutex()) Result results;
|
||||
|
||||
private:
|
||||
mutable Mutex benchmark_mutex_;
|
||||
std::atomic<int> alive_threads_;
|
||||
Barrier start_stop_barrier_;
|
||||
Mutex end_cond_mutex_;
|
||||
Condition end_condition_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace benchmark
|
||||
|
||||
#endif // BENCHMARK_THREAD_MANAGER_H
|
||||
69
utils/google-benchmark/src/thread_timer.h
Normal file
69
utils/google-benchmark/src/thread_timer.h
Normal file
@@ -0,0 +1,69 @@
|
||||
#ifndef BENCHMARK_THREAD_TIMER_H
|
||||
#define BENCHMARK_THREAD_TIMER_H
|
||||
|
||||
#include "check.h"
|
||||
#include "timers.h"
|
||||
|
||||
namespace benchmark {
|
||||
namespace internal {
|
||||
|
||||
class ThreadTimer {
|
||||
public:
|
||||
ThreadTimer() = default;
|
||||
|
||||
// Called by each thread
|
||||
void StartTimer() {
|
||||
running_ = true;
|
||||
start_real_time_ = ChronoClockNow();
|
||||
start_cpu_time_ = ThreadCPUUsage();
|
||||
}
|
||||
|
||||
// Called by each thread
|
||||
void StopTimer() {
|
||||
CHECK(running_);
|
||||
running_ = false;
|
||||
real_time_used_ += ChronoClockNow() - start_real_time_;
|
||||
// Floating point error can result in the subtraction producing a negative
|
||||
// time. Guard against that.
|
||||
cpu_time_used_ += std::max<double>(ThreadCPUUsage() - start_cpu_time_, 0);
|
||||
}
|
||||
|
||||
// Called by each thread
|
||||
void SetIterationTime(double seconds) { manual_time_used_ += seconds; }
|
||||
|
||||
bool running() const { return running_; }
|
||||
|
||||
// REQUIRES: timer is not running
|
||||
double real_time_used() {
|
||||
CHECK(!running_);
|
||||
return real_time_used_;
|
||||
}
|
||||
|
||||
// REQUIRES: timer is not running
|
||||
double cpu_time_used() {
|
||||
CHECK(!running_);
|
||||
return cpu_time_used_;
|
||||
}
|
||||
|
||||
// REQUIRES: timer is not running
|
||||
double manual_time_used() {
|
||||
CHECK(!running_);
|
||||
return manual_time_used_;
|
||||
}
|
||||
|
||||
private:
|
||||
bool running_ = false; // Is the timer running
|
||||
double start_real_time_ = 0; // If running_
|
||||
double start_cpu_time_ = 0; // If running_
|
||||
|
||||
// Accumulated time so far (does not contain current slice if running_)
|
||||
double real_time_used_ = 0;
|
||||
double cpu_time_used_ = 0;
|
||||
// Manually set iteration time. User sets this with SetIterationTime(seconds).
|
||||
double manual_time_used_ = 0;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace benchmark
|
||||
|
||||
#endif // BENCHMARK_THREAD_TIMER_H
|
||||
@@ -16,12 +16,15 @@
|
||||
#include "internal_macros.h"
|
||||
|
||||
#ifdef BENCHMARK_OS_WINDOWS
|
||||
#include <Shlwapi.h>
|
||||
#include <VersionHelpers.h>
|
||||
#include <Windows.h>
|
||||
#include <shlwapi.h>
|
||||
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
|
||||
#include <versionhelpers.h>
|
||||
#include <windows.h>
|
||||
#else
|
||||
#include <fcntl.h>
|
||||
#ifndef BENCHMARK_OS_FUCHSIA
|
||||
#include <sys/resource.h>
|
||||
#endif
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
|
||||
#include <unistd.h>
|
||||
@@ -74,7 +77,7 @@ double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) {
|
||||
static_cast<double>(user.QuadPart)) *
|
||||
1e-7;
|
||||
}
|
||||
#else
|
||||
#elif !defined(BENCHMARK_OS_FUCHSIA)
|
||||
double MakeTime(struct rusage const& ru) {
|
||||
return (static_cast<double>(ru.ru_utime.tv_sec) +
|
||||
static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
|
||||
@@ -162,6 +165,10 @@ double ThreadCPUUsage() {
|
||||
// RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
|
||||
// https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
|
||||
return ProcessCPUUsage();
|
||||
#elif defined(BENCHMARK_OS_SOLARIS)
|
||||
struct rusage ru;
|
||||
if (getrusage(RUSAGE_LWP, &ru) == 0) return MakeTime(ru);
|
||||
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
|
||||
#elif defined(CLOCK_THREAD_CPUTIME_ID)
|
||||
struct timespec ts;
|
||||
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts);
|
||||
@@ -186,7 +193,6 @@ std::string DateTimeString(bool local) {
|
||||
std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
|
||||
#else
|
||||
std::tm timeinfo;
|
||||
std::memset(&timeinfo, 0, sizeof(std::tm));
|
||||
::localtime_r(&now, &timeinfo);
|
||||
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
|
||||
#endif
|
||||
@@ -195,7 +201,6 @@ std::string DateTimeString(bool local) {
|
||||
written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
|
||||
#else
|
||||
std::tm timeinfo;
|
||||
std::memset(&timeinfo, 0, sizeof(std::tm));
|
||||
::gmtime_r(&now, &timeinfo);
|
||||
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
|
||||
#endif
|
||||
|
||||
46
utils/google-benchmark/test/AssemblyTests.cmake
Normal file
46
utils/google-benchmark/test/AssemblyTests.cmake
Normal file
@@ -0,0 +1,46 @@
|
||||
|
||||
include(split_list)
|
||||
|
||||
set(ASM_TEST_FLAGS "")
|
||||
check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
|
||||
if (BENCHMARK_HAS_O3_FLAG)
|
||||
list(APPEND ASM_TEST_FLAGS -O3)
|
||||
endif()
|
||||
|
||||
check_cxx_compiler_flag(-g0 BENCHMARK_HAS_G0_FLAG)
|
||||
if (BENCHMARK_HAS_G0_FLAG)
|
||||
list(APPEND ASM_TEST_FLAGS -g0)
|
||||
endif()
|
||||
|
||||
check_cxx_compiler_flag(-fno-stack-protector BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
|
||||
if (BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
|
||||
list(APPEND ASM_TEST_FLAGS -fno-stack-protector)
|
||||
endif()
|
||||
|
||||
split_list(ASM_TEST_FLAGS)
|
||||
string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER)
|
||||
|
||||
macro(add_filecheck_test name)
|
||||
cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV})
|
||||
add_library(${name} OBJECT ${name}.cc)
|
||||
set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}")
|
||||
set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s")
|
||||
add_custom_target(copy_${name} ALL
|
||||
COMMAND ${PROJECT_SOURCE_DIR}/tools/strip_asm.py
|
||||
$<TARGET_OBJECTS:${name}>
|
||||
${ASM_OUTPUT_FILE}
|
||||
BYPRODUCTS ${ASM_OUTPUT_FILE})
|
||||
add_dependencies(copy_${name} ${name})
|
||||
if (NOT ARG_CHECK_PREFIXES)
|
||||
set(ARG_CHECK_PREFIXES "CHECK")
|
||||
endif()
|
||||
foreach(prefix ${ARG_CHECK_PREFIXES})
|
||||
add_test(NAME run_${name}_${prefix}
|
||||
COMMAND
|
||||
${LLVM_FILECHECK_EXE} ${name}.cc
|
||||
--input-file=${ASM_OUTPUT_FILE}
|
||||
--check-prefixes=CHECK,CHECK-${ASM_TEST_COMPILER}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
@@ -22,6 +22,12 @@ if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" )
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
|
||||
set(BENCHMARK_O3_FLAG "")
|
||||
if (BENCHMARK_HAS_O3_FLAG)
|
||||
set(BENCHMARK_O3_FLAG "-O3")
|
||||
endif()
|
||||
|
||||
# NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise
|
||||
# they will break the configuration check.
|
||||
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
|
||||
@@ -35,6 +41,10 @@ macro(compile_benchmark_test name)
|
||||
target_link_libraries(${name} benchmark ${CMAKE_THREAD_LIBS_INIT})
|
||||
endmacro(compile_benchmark_test)
|
||||
|
||||
macro(compile_benchmark_test_with_main name)
|
||||
add_executable(${name} "${name}.cc")
|
||||
target_link_libraries(${name} benchmark_main)
|
||||
endmacro(compile_benchmark_test_with_main)
|
||||
|
||||
macro(compile_output_test name)
|
||||
add_executable(${name} "${name}.cc" output_test.h)
|
||||
@@ -53,14 +63,23 @@ macro(add_filter_test name filter expect)
|
||||
endmacro(add_filter_test)
|
||||
|
||||
add_filter_test(filter_simple "Foo" 3)
|
||||
add_filter_test(filter_simple_negative "-Foo" 2)
|
||||
add_filter_test(filter_suffix "BM_.*" 4)
|
||||
add_filter_test(filter_suffix_negative "-BM_.*" 1)
|
||||
add_filter_test(filter_regex_all ".*" 5)
|
||||
add_filter_test(filter_regex_all_negative "-.*" 0)
|
||||
add_filter_test(filter_regex_blank "" 5)
|
||||
add_filter_test(filter_regex_blank_negative "-" 0)
|
||||
add_filter_test(filter_regex_none "monkey" 0)
|
||||
add_filter_test(filter_regex_none_negative "-monkey" 5)
|
||||
add_filter_test(filter_regex_wildcard ".*Foo.*" 3)
|
||||
add_filter_test(filter_regex_wildcard_negative "-.*Foo.*" 2)
|
||||
add_filter_test(filter_regex_begin "^BM_.*" 4)
|
||||
add_filter_test(filter_regex_begin_negative "-^BM_.*" 1)
|
||||
add_filter_test(filter_regex_begin2 "^N" 1)
|
||||
add_filter_test(filter_regex_begin2_negative "-^N" 4)
|
||||
add_filter_test(filter_regex_end ".*Ba$" 1)
|
||||
add_filter_test(filter_regex_end_negative "-.*Ba$" 4)
|
||||
|
||||
compile_benchmark_test(options_test)
|
||||
add_test(options_benchmarks options_test --benchmark_min_time=0.01)
|
||||
@@ -94,6 +113,9 @@ add_test(map_test map_test --benchmark_min_time=0.01)
|
||||
compile_benchmark_test(multiple_ranges_test)
|
||||
add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01)
|
||||
|
||||
compile_benchmark_test_with_main(link_main_test)
|
||||
add_test(link_main_test link_main_test --benchmark_min_time=0.01)
|
||||
|
||||
compile_output_test(reporter_output_test)
|
||||
add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01)
|
||||
|
||||
@@ -103,9 +125,21 @@ add_test(templated_fixture_test templated_fixture_test --benchmark_min_time=0.01
|
||||
compile_output_test(user_counters_test)
|
||||
add_test(user_counters_test user_counters_test --benchmark_min_time=0.01)
|
||||
|
||||
compile_output_test(report_aggregates_only_test)
|
||||
add_test(report_aggregates_only_test report_aggregates_only_test --benchmark_min_time=0.01)
|
||||
|
||||
compile_output_test(display_aggregates_only_test)
|
||||
add_test(display_aggregates_only_test display_aggregates_only_test --benchmark_min_time=0.01)
|
||||
|
||||
compile_output_test(user_counters_tabular_test)
|
||||
add_test(user_counters_tabular_test user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01)
|
||||
|
||||
compile_output_test(user_counters_thousands_test)
|
||||
add_test(user_counters_thousands_test user_counters_thousands_test --benchmark_min_time=0.01)
|
||||
|
||||
compile_output_test(memory_manager_test)
|
||||
add_test(memory_manager_test memory_manager_test --benchmark_min_time=0.01)
|
||||
|
||||
check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
|
||||
if (BENCHMARK_HAS_CXX03_FLAG)
|
||||
compile_benchmark_test(cxx03_test)
|
||||
@@ -144,8 +178,11 @@ if (BENCHMARK_ENABLE_GTEST_TESTS)
|
||||
if (TARGET googletest)
|
||||
add_dependencies(${name} googletest)
|
||||
endif()
|
||||
if (GTEST_INCLUDE_DIRS)
|
||||
target_include_directories(${name} PRIVATE ${GTEST_INCLUDE_DIRS})
|
||||
endif()
|
||||
target_link_libraries(${name} benchmark
|
||||
"${GTEST_BOTH_LIBRARIES}" ${CMAKE_THREAD_LIBS_INIT})
|
||||
${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||
endmacro(compile_gtest)
|
||||
|
||||
macro(add_gtest name)
|
||||
@@ -153,9 +190,30 @@ if (BENCHMARK_ENABLE_GTEST_TESTS)
|
||||
add_test(${name} ${name})
|
||||
endmacro()
|
||||
|
||||
add_gtest(statistics_test)
|
||||
add_gtest(benchmark_gtest)
|
||||
add_gtest(statistics_gtest)
|
||||
add_gtest(string_util_gtest)
|
||||
endif(BENCHMARK_ENABLE_GTEST_TESTS)
|
||||
|
||||
###############################################################################
|
||||
# Assembly Unit Tests
|
||||
###############################################################################
|
||||
|
||||
if (BENCHMARK_ENABLE_ASSEMBLY_TESTS)
|
||||
if (NOT LLVM_FILECHECK_EXE)
|
||||
message(FATAL_ERROR "LLVM FileCheck is required when including this file")
|
||||
endif()
|
||||
include(AssemblyTests.cmake)
|
||||
add_filecheck_test(donotoptimize_assembly_test)
|
||||
add_filecheck_test(state_assembly_test)
|
||||
add_filecheck_test(clobber_memory_assembly_test)
|
||||
endif()
|
||||
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Code Coverage Configuration
|
||||
###############################################################################
|
||||
|
||||
# Add the coverage command(s)
|
||||
if(CMAKE_BUILD_TYPE)
|
||||
|
||||
@@ -99,13 +99,25 @@ BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
|
||||
|
||||
void BM_KeepRunning(benchmark::State& state) {
|
||||
size_t iter_count = 0;
|
||||
assert(iter_count == state.iterations());
|
||||
while (state.KeepRunning()) {
|
||||
++iter_count;
|
||||
}
|
||||
assert(iter_count == state.max_iterations);
|
||||
assert(iter_count == state.iterations());
|
||||
}
|
||||
BENCHMARK(BM_KeepRunning);
|
||||
|
||||
void BM_KeepRunningBatch(benchmark::State& state) {
|
||||
// Choose a prime batch size to avoid evenly dividing max_iterations.
|
||||
const size_t batch_size = 101;
|
||||
size_t iter_count = 0;
|
||||
while (state.KeepRunningBatch(batch_size)) {
|
||||
iter_count += batch_size;
|
||||
}
|
||||
assert(state.iterations() == iter_count);
|
||||
}
|
||||
BENCHMARK(BM_KeepRunningBatch);
|
||||
|
||||
void BM_RangedFor(benchmark::State& state) {
|
||||
size_t iter_count = 0;
|
||||
for (auto _ : state) {
|
||||
@@ -115,4 +127,10 @@ void BM_RangedFor(benchmark::State& state) {
|
||||
}
|
||||
BENCHMARK(BM_RangedFor);
|
||||
|
||||
// Ensure that StateIterator provides all the necessary typedefs required to
|
||||
// instantiate std::iterator_traits.
|
||||
static_assert(std::is_same<
|
||||
typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
|
||||
typename benchmark::State::StateIterator::value_type>::value, "");
|
||||
|
||||
BENCHMARK_MAIN();
|
||||
|
||||
33
utils/google-benchmark/test/benchmark_gtest.cc
Normal file
33
utils/google-benchmark/test/benchmark_gtest.cc
Normal file
@@ -0,0 +1,33 @@
|
||||
#include <vector>
|
||||
|
||||
#include "../src/benchmark_register.h"
|
||||
#include "gmock/gmock.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
namespace {
|
||||
|
||||
TEST(AddRangeTest, Simple) {
|
||||
std::vector<int> dst;
|
||||
AddRange(&dst, 1, 2, 2);
|
||||
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
|
||||
}
|
||||
|
||||
TEST(AddRangeTest, Simple64) {
|
||||
std::vector<int64_t> dst;
|
||||
AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2);
|
||||
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
|
||||
}
|
||||
|
||||
TEST(AddRangeTest, Advanced) {
|
||||
std::vector<int> dst;
|
||||
AddRange(&dst, 5, 15, 2);
|
||||
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
|
||||
}
|
||||
|
||||
TEST(AddRangeTest, Advanced64) {
|
||||
std::vector<int64_t> dst;
|
||||
AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2);
|
||||
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
|
||||
}
|
||||
|
||||
} // end namespace
|
||||
@@ -40,8 +40,8 @@ double CalculatePi(int depth) {
|
||||
return (pi - 1.0) * 4;
|
||||
}
|
||||
|
||||
std::set<int> ConstructRandomSet(int size) {
|
||||
std::set<int> s;
|
||||
std::set<int64_t> ConstructRandomSet(int64_t size) {
|
||||
std::set<int64_t> s;
|
||||
for (int i = 0; i < size; ++i) s.insert(s.end(), i);
|
||||
return s;
|
||||
}
|
||||
@@ -64,7 +64,7 @@ BENCHMARK(BM_Factorial)->UseRealTime();
|
||||
|
||||
static void BM_CalculatePiRange(benchmark::State& state) {
|
||||
double pi = 0.0;
|
||||
for (auto _ : state) pi = CalculatePi(state.range(0));
|
||||
for (auto _ : state) pi = CalculatePi(static_cast<int>(state.range(0)));
|
||||
std::stringstream ss;
|
||||
ss << pi;
|
||||
state.SetLabel(ss.str());
|
||||
@@ -74,7 +74,7 @@ BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
|
||||
static void BM_CalculatePi(benchmark::State& state) {
|
||||
static const int depth = 1024;
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(CalculatePi(depth));
|
||||
benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth)));
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_CalculatePi)->Threads(8);
|
||||
@@ -82,7 +82,7 @@ BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
|
||||
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
|
||||
|
||||
static void BM_SetInsert(benchmark::State& state) {
|
||||
std::set<int> data;
|
||||
std::set<int64_t> data;
|
||||
for (auto _ : state) {
|
||||
state.PauseTiming();
|
||||
data = ConstructRandomSet(state.range(0));
|
||||
@@ -103,9 +103,9 @@ static void BM_Sequential(benchmark::State& state) {
|
||||
ValueType v = 42;
|
||||
for (auto _ : state) {
|
||||
Container c;
|
||||
for (int i = state.range(0); --i;) c.push_back(v);
|
||||
for (int64_t i = state.range(0); --i;) c.push_back(v);
|
||||
}
|
||||
const size_t items_processed = state.iterations() * state.range(0);
|
||||
const int64_t items_processed = state.iterations() * state.range(0);
|
||||
state.SetItemsProcessed(items_processed);
|
||||
state.SetBytesProcessed(items_processed * sizeof(v));
|
||||
}
|
||||
@@ -118,8 +118,9 @@ BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
|
||||
#endif
|
||||
|
||||
static void BM_StringCompare(benchmark::State& state) {
|
||||
std::string s1(state.range(0), '-');
|
||||
std::string s2(state.range(0), '-');
|
||||
size_t len = static_cast<size_t>(state.range(0));
|
||||
std::string s1(len, '-');
|
||||
std::string s2(len, '-');
|
||||
for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2));
|
||||
}
|
||||
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
|
||||
@@ -154,13 +155,13 @@ static void BM_LongTest(benchmark::State& state) {
|
||||
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
|
||||
|
||||
static void BM_ParallelMemset(benchmark::State& state) {
|
||||
int size = state.range(0) / static_cast<int>(sizeof(int));
|
||||
int thread_size = size / state.threads;
|
||||
int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int));
|
||||
int thread_size = static_cast<int>(size) / state.threads;
|
||||
int from = thread_size * state.thread_index;
|
||||
int to = from + thread_size;
|
||||
|
||||
if (state.thread_index == 0) {
|
||||
test_vector = new std::vector<int>(size);
|
||||
test_vector = new std::vector<int>(static_cast<size_t>(size));
|
||||
}
|
||||
|
||||
for (auto _ : state) {
|
||||
@@ -178,8 +179,8 @@ static void BM_ParallelMemset(benchmark::State& state) {
|
||||
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
|
||||
|
||||
static void BM_ManualTiming(benchmark::State& state) {
|
||||
size_t slept_for = 0;
|
||||
int microseconds = state.range(0);
|
||||
int64_t slept_for = 0;
|
||||
int64_t microseconds = state.range(0);
|
||||
std::chrono::duration<double, std::micro> sleep_duration{
|
||||
static_cast<double>(microseconds)};
|
||||
|
||||
|
||||
64
utils/google-benchmark/test/clobber_memory_assembly_test.cc
Normal file
64
utils/google-benchmark/test/clobber_memory_assembly_test.cc
Normal file
@@ -0,0 +1,64 @@
|
||||
#include <benchmark/benchmark.h>
|
||||
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic ignored "-Wreturn-type"
|
||||
#endif
|
||||
|
||||
extern "C" {
|
||||
|
||||
extern int ExternInt;
|
||||
extern int ExternInt2;
|
||||
extern int ExternInt3;
|
||||
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_basic:
|
||||
extern "C" void test_basic() {
|
||||
int x;
|
||||
benchmark::DoNotOptimize(&x);
|
||||
x = 101;
|
||||
benchmark::ClobberMemory();
|
||||
// CHECK: leaq [[DEST:[^,]+]], %rax
|
||||
// CHECK: movl $101, [[DEST]]
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_redundant_store:
|
||||
extern "C" void test_redundant_store() {
|
||||
ExternInt = 3;
|
||||
benchmark::ClobberMemory();
|
||||
ExternInt = 51;
|
||||
// CHECK-DAG: ExternInt
|
||||
// CHECK-DAG: movl $3
|
||||
// CHECK: movl $51
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_redundant_read:
|
||||
extern "C" void test_redundant_read() {
|
||||
int x;
|
||||
benchmark::DoNotOptimize(&x);
|
||||
x = ExternInt;
|
||||
benchmark::ClobberMemory();
|
||||
x = ExternInt2;
|
||||
// CHECK: leaq [[DEST:[^,]+]], %rax
|
||||
// CHECK: ExternInt(%rip)
|
||||
// CHECK: movl %eax, [[DEST]]
|
||||
// CHECK-NOT: ExternInt2
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_redundant_read2:
|
||||
extern "C" void test_redundant_read2() {
|
||||
int x;
|
||||
benchmark::DoNotOptimize(&x);
|
||||
x = ExternInt;
|
||||
benchmark::ClobberMemory();
|
||||
x = ExternInt2;
|
||||
benchmark::ClobberMemory();
|
||||
// CHECK: leaq [[DEST:[^,]+]], %rax
|
||||
// CHECK: ExternInt(%rip)
|
||||
// CHECK: movl %eax, [[DEST]]
|
||||
// CHECK: ExternInt2(%rip)
|
||||
// CHECK: movl %eax, [[DEST]]
|
||||
// CHECK: ret
|
||||
}
|
||||
@@ -12,9 +12,10 @@ namespace {
|
||||
#define ADD_COMPLEXITY_CASES(...) \
|
||||
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
|
||||
|
||||
int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name,
|
||||
std::string big_o) {
|
||||
SetSubstitutions({{"%bigo_name", big_o_test_name},
|
||||
int AddComplexityTest(std::string test_name, std::string big_o_test_name,
|
||||
std::string rms_test_name, std::string big_o) {
|
||||
SetSubstitutions({{"%name", test_name},
|
||||
{"%bigo_name", big_o_test_name},
|
||||
{"%rms_name", rms_test_name},
|
||||
{"%bigo_str", "[ ]* %float " + big_o},
|
||||
{"%bigo", big_o},
|
||||
@@ -25,12 +26,18 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name,
|
||||
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
|
||||
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
|
||||
AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"},
|
||||
{"\"run_name\": \"%name\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"BigO\",$", MR_Next},
|
||||
{"\"cpu_coefficient\": %float,$", MR_Next},
|
||||
{"\"real_coefficient\": %float,$", MR_Next},
|
||||
{"\"big_o\": \"%bigo\",$", MR_Next},
|
||||
{"\"time_unit\": \"ns\"$", MR_Next},
|
||||
{"}", MR_Next},
|
||||
{"\"name\": \"%rms_name\",$"},
|
||||
{"\"run_name\": \"%name\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"RMS\",$", MR_Next},
|
||||
{"\"rms\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
|
||||
@@ -55,10 +62,11 @@ void BM_Complexity_O1(benchmark::State& state) {
|
||||
}
|
||||
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
|
||||
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
|
||||
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int) {
|
||||
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int64_t) {
|
||||
return 1.0;
|
||||
});
|
||||
|
||||
const char *one_test_name = "BM_Complexity_O1";
|
||||
const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
|
||||
const char *rms_o_1_test_name = "BM_Complexity_O1_RMS";
|
||||
const char *enum_big_o_1 = "\\([0-9]+\\)";
|
||||
@@ -69,31 +77,34 @@ const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
|
||||
const char *lambda_big_o_1 = "f\\(N\\)";
|
||||
|
||||
// Add enum tests
|
||||
ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, enum_big_o_1);
|
||||
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
||||
enum_big_o_1);
|
||||
|
||||
// Add auto enum tests
|
||||
ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, auto_big_o_1);
|
||||
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
||||
auto_big_o_1);
|
||||
|
||||
// Add lambda tests
|
||||
ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1);
|
||||
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
||||
lambda_big_o_1);
|
||||
|
||||
// ========================================================================= //
|
||||
// --------------------------- Testing BigO O(N) --------------------------- //
|
||||
// ========================================================================= //
|
||||
|
||||
std::vector<int> ConstructRandomVector(int size) {
|
||||
std::vector<int> ConstructRandomVector(int64_t size) {
|
||||
std::vector<int> v;
|
||||
v.reserve(size);
|
||||
v.reserve(static_cast<int>(size));
|
||||
for (int i = 0; i < size; ++i) {
|
||||
v.push_back(std::rand() % size);
|
||||
v.push_back(static_cast<int>(std::rand() % size));
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
void BM_Complexity_O_N(benchmark::State& state) {
|
||||
auto v = ConstructRandomVector(state.range(0));
|
||||
const int item_not_in_vector =
|
||||
state.range(0) * 2; // Test worst case scenario (item not in vector)
|
||||
// Test worst case scenario (item not in vector)
|
||||
const int64_t item_not_in_vector = state.range(0) * 2;
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
|
||||
}
|
||||
@@ -106,22 +117,25 @@ BENCHMARK(BM_Complexity_O_N)
|
||||
BENCHMARK(BM_Complexity_O_N)
|
||||
->RangeMultiplier(2)
|
||||
->Range(1 << 10, 1 << 16)
|
||||
->Complexity([](int n) -> double { return n; });
|
||||
->Complexity([](int64_t n) -> double { return static_cast<double>(n); });
|
||||
BENCHMARK(BM_Complexity_O_N)
|
||||
->RangeMultiplier(2)
|
||||
->Range(1 << 10, 1 << 16)
|
||||
->Complexity();
|
||||
|
||||
const char *n_test_name = "BM_Complexity_O_N";
|
||||
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
|
||||
const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS";
|
||||
const char *enum_auto_big_o_n = "N";
|
||||
const char *lambda_big_o_n = "f\\(N\\)";
|
||||
|
||||
// Add enum tests
|
||||
ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n);
|
||||
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
|
||||
enum_auto_big_o_n);
|
||||
|
||||
// Add lambda tests
|
||||
ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
|
||||
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
|
||||
lambda_big_o_n);
|
||||
|
||||
// ========================================================================= //
|
||||
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
|
||||
@@ -134,6 +148,7 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) {
|
||||
}
|
||||
state.SetComplexityN(state.range(0));
|
||||
}
|
||||
static const double kLog2E = 1.44269504088896340736;
|
||||
BENCHMARK(BM_Complexity_O_N_log_N)
|
||||
->RangeMultiplier(2)
|
||||
->Range(1 << 10, 1 << 16)
|
||||
@@ -141,24 +156,25 @@ BENCHMARK(BM_Complexity_O_N_log_N)
|
||||
BENCHMARK(BM_Complexity_O_N_log_N)
|
||||
->RangeMultiplier(2)
|
||||
->Range(1 << 10, 1 << 16)
|
||||
->Complexity([](int n) { return n * log2(n); });
|
||||
->Complexity([](int64_t n) { return kLog2E * n * log(static_cast<double>(n)); });
|
||||
BENCHMARK(BM_Complexity_O_N_log_N)
|
||||
->RangeMultiplier(2)
|
||||
->Range(1 << 10, 1 << 16)
|
||||
->Complexity();
|
||||
|
||||
const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
|
||||
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
|
||||
const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
|
||||
const char *enum_auto_big_o_n_lg_n = "NlgN";
|
||||
const char *lambda_big_o_n_lg_n = "f\\(N\\)";
|
||||
|
||||
// Add enum tests
|
||||
ADD_COMPLEXITY_CASES(big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name,
|
||||
enum_auto_big_o_n_lg_n);
|
||||
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
|
||||
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n);
|
||||
|
||||
// Add lambda tests
|
||||
ADD_COMPLEXITY_CASES(big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name,
|
||||
lambda_big_o_n_lg_n);
|
||||
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
|
||||
rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n);
|
||||
|
||||
// ========================================================================= //
|
||||
// --------------------------- TEST CASES END ------------------------------ //
|
||||
|
||||
43
utils/google-benchmark/test/display_aggregates_only_test.cc
Normal file
43
utils/google-benchmark/test/display_aggregates_only_test.cc
Normal file
@@ -0,0 +1,43 @@
|
||||
|
||||
#undef NDEBUG
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "output_test.h"
|
||||
|
||||
// Ok this test is super ugly. We want to check what happens with the file
|
||||
// reporter in the presence of DisplayAggregatesOnly().
|
||||
// We do not care about console output, the normal tests check that already.
|
||||
|
||||
void BM_SummaryRepeat(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly();
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
const std::string output = GetFileReporterOutput(argc, argv);
|
||||
|
||||
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 6 ||
|
||||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 ||
|
||||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
|
||||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
|
||||
1 ||
|
||||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
|
||||
1) {
|
||||
std::cout << "Precondition mismatch. Expected to only find 6 "
|
||||
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
|
||||
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
|
||||
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
|
||||
"\"name\": \"BM_SummaryRepeat/repeats:3\", "
|
||||
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
|
||||
"\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
|
||||
"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
|
||||
"output:\n";
|
||||
std::cout << output;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
163
utils/google-benchmark/test/donotoptimize_assembly_test.cc
Normal file
163
utils/google-benchmark/test/donotoptimize_assembly_test.cc
Normal file
@@ -0,0 +1,163 @@
|
||||
#include <benchmark/benchmark.h>
|
||||
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic ignored "-Wreturn-type"
|
||||
#endif
|
||||
|
||||
extern "C" {
|
||||
|
||||
extern int ExternInt;
|
||||
extern int ExternInt2;
|
||||
extern int ExternInt3;
|
||||
|
||||
inline int Add42(int x) { return x + 42; }
|
||||
|
||||
struct NotTriviallyCopyable {
|
||||
NotTriviallyCopyable();
|
||||
explicit NotTriviallyCopyable(int x) : value(x) {}
|
||||
NotTriviallyCopyable(NotTriviallyCopyable const&);
|
||||
int value;
|
||||
};
|
||||
|
||||
struct Large {
|
||||
int value;
|
||||
int data[2];
|
||||
};
|
||||
|
||||
}
|
||||
// CHECK-LABEL: test_with_rvalue:
|
||||
extern "C" void test_with_rvalue() {
|
||||
benchmark::DoNotOptimize(Add42(0));
|
||||
// CHECK: movl $42, %eax
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_with_large_rvalue:
|
||||
extern "C" void test_with_large_rvalue() {
|
||||
benchmark::DoNotOptimize(Large{ExternInt, {ExternInt, ExternInt}});
|
||||
// CHECK: ExternInt(%rip)
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]]
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_with_non_trivial_rvalue:
|
||||
extern "C" void test_with_non_trivial_rvalue() {
|
||||
benchmark::DoNotOptimize(NotTriviallyCopyable(ExternInt));
|
||||
// CHECK: mov{{l|q}} ExternInt(%rip)
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_with_lvalue:
|
||||
extern "C" void test_with_lvalue() {
|
||||
int x = 101;
|
||||
benchmark::DoNotOptimize(x);
|
||||
// CHECK-GNU: movl $101, %eax
|
||||
// CHECK-CLANG: movl $101, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_with_large_lvalue:
|
||||
extern "C" void test_with_large_lvalue() {
|
||||
Large L{ExternInt, {ExternInt, ExternInt}};
|
||||
benchmark::DoNotOptimize(L);
|
||||
// CHECK: ExternInt(%rip)
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_with_non_trivial_lvalue:
|
||||
extern "C" void test_with_non_trivial_lvalue() {
|
||||
NotTriviallyCopyable NTC(ExternInt);
|
||||
benchmark::DoNotOptimize(NTC);
|
||||
// CHECK: ExternInt(%rip)
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_with_const_lvalue:
|
||||
extern "C" void test_with_const_lvalue() {
|
||||
const int x = 123;
|
||||
benchmark::DoNotOptimize(x);
|
||||
// CHECK: movl $123, %eax
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_with_large_const_lvalue:
|
||||
extern "C" void test_with_large_const_lvalue() {
|
||||
const Large L{ExternInt, {ExternInt, ExternInt}};
|
||||
benchmark::DoNotOptimize(L);
|
||||
// CHECK: ExternInt(%rip)
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||
// CHECK: movl %eax, -{{[0-9]+}}(%[[REG]])
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_with_non_trivial_const_lvalue:
|
||||
extern "C" void test_with_non_trivial_const_lvalue() {
|
||||
const NotTriviallyCopyable Obj(ExternInt);
|
||||
benchmark::DoNotOptimize(Obj);
|
||||
// CHECK: mov{{q|l}} ExternInt(%rip)
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_div_by_two:
|
||||
extern "C" int test_div_by_two(int input) {
|
||||
int divisor = 2;
|
||||
benchmark::DoNotOptimize(divisor);
|
||||
return input / divisor;
|
||||
// CHECK: movl $2, [[DEST:.*]]
|
||||
// CHECK: idivl [[DEST]]
|
||||
// CHECK: ret
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_inc_integer:
|
||||
extern "C" int test_inc_integer() {
|
||||
int x = 0;
|
||||
for (int i=0; i < 5; ++i)
|
||||
benchmark::DoNotOptimize(++x);
|
||||
// CHECK: movl $1, [[DEST:.*]]
|
||||
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
|
||||
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
|
||||
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
|
||||
// CHECK: {{(addl \$1,|incl)}} [[DEST]]
|
||||
// CHECK-CLANG: movl [[DEST]], %eax
|
||||
// CHECK: ret
|
||||
return x;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_pointer_rvalue
|
||||
extern "C" void test_pointer_rvalue() {
|
||||
// CHECK: movl $42, [[DEST:.*]]
|
||||
// CHECK: leaq [[DEST]], %rax
|
||||
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||
// CHECK: ret
|
||||
int x = 42;
|
||||
benchmark::DoNotOptimize(&x);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_pointer_const_lvalue:
|
||||
extern "C" void test_pointer_const_lvalue() {
|
||||
// CHECK: movl $42, [[DEST:.*]]
|
||||
// CHECK: leaq [[DEST]], %rax
|
||||
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]])
|
||||
// CHECK: ret
|
||||
int x = 42;
|
||||
int * const xp = &x;
|
||||
benchmark::DoNotOptimize(xp);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_pointer_lvalue:
|
||||
extern "C" void test_pointer_lvalue() {
|
||||
// CHECK: movl $42, [[DEST:.*]]
|
||||
// CHECK: leaq [[DEST]], %rax
|
||||
// CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z+]+]])
|
||||
// CHECK: ret
|
||||
int x = 42;
|
||||
int *xp = &x;
|
||||
benchmark::DoNotOptimize(xp);
|
||||
}
|
||||
@@ -28,13 +28,13 @@ private:
|
||||
int main(int, char*[]) {
|
||||
// this test verifies compilation of DoNotOptimize() for some types
|
||||
|
||||
char buffer8[8];
|
||||
char buffer8[8] = "";
|
||||
benchmark::DoNotOptimize(buffer8);
|
||||
|
||||
char buffer20[20];
|
||||
char buffer20[20] = "";
|
||||
benchmark::DoNotOptimize(buffer20);
|
||||
|
||||
char buffer1024[1024];
|
||||
char buffer1024[1024] = "";
|
||||
benchmark::DoNotOptimize(buffer1024);
|
||||
benchmark::DoNotOptimize(&buffer1024[0]);
|
||||
|
||||
|
||||
8
utils/google-benchmark/test/link_main_test.cc
Normal file
8
utils/google-benchmark/test/link_main_test.cc
Normal file
@@ -0,0 +1,8 @@
|
||||
#include "benchmark/benchmark.h"
|
||||
|
||||
void BM_empty(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_empty);
|
||||
@@ -8,7 +8,7 @@ namespace {
|
||||
std::map<int, int> ConstructRandomMap(int size) {
|
||||
std::map<int, int> m;
|
||||
for (int i = 0; i < size; ++i) {
|
||||
m.insert(std::make_pair(rand() % size, rand() % size));
|
||||
m.insert(std::make_pair(std::rand() % size, std::rand() % size));
|
||||
}
|
||||
return m;
|
||||
}
|
||||
@@ -17,14 +17,14 @@ std::map<int, int> ConstructRandomMap(int size) {
|
||||
|
||||
// Basic version.
|
||||
static void BM_MapLookup(benchmark::State& state) {
|
||||
const int size = state.range(0);
|
||||
const int size = static_cast<int>(state.range(0));
|
||||
std::map<int, int> m;
|
||||
for (auto _ : state) {
|
||||
state.PauseTiming();
|
||||
m = ConstructRandomMap(size);
|
||||
state.ResumeTiming();
|
||||
for (int i = 0; i < size; ++i) {
|
||||
benchmark::DoNotOptimize(m.find(rand() % size));
|
||||
benchmark::DoNotOptimize(m.find(std::rand() % size));
|
||||
}
|
||||
}
|
||||
state.SetItemsProcessed(state.iterations() * size);
|
||||
@@ -35,7 +35,7 @@ BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12);
|
||||
class MapFixture : public ::benchmark::Fixture {
|
||||
public:
|
||||
void SetUp(const ::benchmark::State& st) {
|
||||
m = ConstructRandomMap(st.range(0));
|
||||
m = ConstructRandomMap(static_cast<int>(st.range(0)));
|
||||
}
|
||||
|
||||
void TearDown(const ::benchmark::State&) { m.clear(); }
|
||||
@@ -44,10 +44,10 @@ class MapFixture : public ::benchmark::Fixture {
|
||||
};
|
||||
|
||||
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
|
||||
const int size = state.range(0);
|
||||
const int size = static_cast<int>(state.range(0));
|
||||
for (auto _ : state) {
|
||||
for (int i = 0; i < size; ++i) {
|
||||
benchmark::DoNotOptimize(m.find(rand() % size));
|
||||
benchmark::DoNotOptimize(m.find(std::rand() % size));
|
||||
}
|
||||
}
|
||||
state.SetItemsProcessed(state.iterations() * size);
|
||||
|
||||
42
utils/google-benchmark/test/memory_manager_test.cc
Normal file
42
utils/google-benchmark/test/memory_manager_test.cc
Normal file
@@ -0,0 +1,42 @@
|
||||
#include <memory>
|
||||
|
||||
#include "../src/check.h"
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "output_test.h"
|
||||
|
||||
class TestMemoryManager : public benchmark::MemoryManager {
|
||||
void Start() {}
|
||||
void Stop(Result* result) {
|
||||
result->num_allocs = 42;
|
||||
result->max_bytes_used = 42000;
|
||||
}
|
||||
};
|
||||
|
||||
void BM_empty(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_empty);
|
||||
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_empty %console_report$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"},
|
||||
{"\"run_name\": \"BM_empty\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"allocs_per_iter\": %float,$", MR_Next},
|
||||
{"\"max_bytes_used\": 42000$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}});
|
||||
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
std::unique_ptr<benchmark::MemoryManager> mm(new TestMemoryManager());
|
||||
|
||||
benchmark::RegisterMemoryManager(mm.get());
|
||||
RunOutputTests(argc, argv);
|
||||
benchmark::RegisterMemoryManager(nullptr);
|
||||
}
|
||||
@@ -1,7 +1,9 @@
|
||||
#include "benchmark/benchmark.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
||||
class MultipleRangesFixture : public ::benchmark::Fixture {
|
||||
public:
|
||||
@@ -27,25 +29,46 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
|
||||
{7, 6, 3}}) {}
|
||||
|
||||
void SetUp(const ::benchmark::State& state) {
|
||||
std::vector<int> ranges = {state.range(0), state.range(1), state.range(2)};
|
||||
std::vector<int64_t> ranges = {state.range(0), state.range(1),
|
||||
state.range(2)};
|
||||
|
||||
assert(expectedValues.find(ranges) != expectedValues.end());
|
||||
|
||||
actualValues.insert(ranges);
|
||||
}
|
||||
|
||||
// NOTE: This is not TearDown as we want to check after _all_ runs are
|
||||
// complete.
|
||||
virtual ~MultipleRangesFixture() {
|
||||
assert(actualValues.size() == expectedValues.size());
|
||||
if (actualValues.size() != expectedValues.size()) {
|
||||
std::cout << "EXPECTED\n";
|
||||
for (auto v : expectedValues) {
|
||||
std::cout << "{";
|
||||
for (int64_t iv : v) {
|
||||
std::cout << iv << ", ";
|
||||
}
|
||||
std::cout << "}\n";
|
||||
}
|
||||
std::cout << "ACTUAL\n";
|
||||
for (auto v : actualValues) {
|
||||
std::cout << "{";
|
||||
for (int64_t iv : v) {
|
||||
std::cout << iv << ", ";
|
||||
}
|
||||
std::cout << "}\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::set<std::vector<int>> expectedValues;
|
||||
std::set<std::vector<int>> actualValues;
|
||||
std::set<std::vector<int64_t>> expectedValues;
|
||||
std::set<std::vector<int64_t>> actualValues;
|
||||
};
|
||||
|
||||
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
int product = state.range(0) * state.range(1) * state.range(2);
|
||||
for (int x = 0; x < product; x++) {
|
||||
int64_t product = state.range(0) * state.range(1) * state.range(2);
|
||||
for (int64_t x = 0; x < product; x++) {
|
||||
benchmark::DoNotOptimize(x);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
#define TEST_OUTPUT_TEST_H
|
||||
|
||||
#undef NDEBUG
|
||||
#include <functional>
|
||||
#include <initializer_list>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include <functional>
|
||||
#include <sstream>
|
||||
|
||||
#include "../src/re.h"
|
||||
#include "benchmark/benchmark.h"
|
||||
@@ -60,6 +60,13 @@ int SetSubstitutions(
|
||||
// Run all output tests.
|
||||
void RunOutputTests(int argc, char* argv[]);
|
||||
|
||||
// Count the number of 'pat' substrings in the 'haystack' string.
|
||||
int SubstrCnt(const std::string& haystack, const std::string& pat);
|
||||
|
||||
// Run registered benchmarks with file reporter enabled, and return the content
|
||||
// outputted by the file reporter.
|
||||
std::string GetFileReporterOutput(int argc, char* argv[]);
|
||||
|
||||
// ========================================================================= //
|
||||
// ------------------------- Results checking ------------------------------ //
|
||||
// ========================================================================= //
|
||||
@@ -73,26 +80,27 @@ void RunOutputTests(int argc, char* argv[]);
|
||||
// will be the subject of a call to checker_function
|
||||
// checker_function: should be of type ResultsCheckFn (see below)
|
||||
#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \
|
||||
size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function)
|
||||
size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function)
|
||||
|
||||
struct Results;
|
||||
typedef std::function< void(Results const&) > ResultsCheckFn;
|
||||
typedef std::function<void(Results const&)> ResultsCheckFn;
|
||||
|
||||
size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn);
|
||||
|
||||
// Class holding the results of a benchmark.
|
||||
// It is passed in calls to checker functions.
|
||||
struct Results {
|
||||
|
||||
// the benchmark name
|
||||
std::string name;
|
||||
// the benchmark fields
|
||||
std::map< std::string, std::string > values;
|
||||
std::map<std::string, std::string> values;
|
||||
|
||||
Results(const std::string& n) : name(n) {}
|
||||
|
||||
int NumThreads() const;
|
||||
|
||||
double NumIterations() const;
|
||||
|
||||
typedef enum { kCpuTime, kRealTime } BenchmarkTime;
|
||||
|
||||
// get cpu_time or real_time in seconds
|
||||
@@ -102,18 +110,18 @@ struct Results {
|
||||
// it is better to use fuzzy float checks for this, as the float
|
||||
// ASCII formatting is lossy.
|
||||
double DurationRealTime() const {
|
||||
return GetAs< double >("iterations") * GetTime(kRealTime);
|
||||
return NumIterations() * GetTime(kRealTime);
|
||||
}
|
||||
// get the cpu_time duration of the benchmark in seconds
|
||||
double DurationCPUTime() const {
|
||||
return GetAs< double >("iterations") * GetTime(kCpuTime);
|
||||
return NumIterations() * GetTime(kCpuTime);
|
||||
}
|
||||
|
||||
// get the string for a result by name, or nullptr if the name
|
||||
// is not found
|
||||
const std::string* Get(const char* entry_name) const {
|
||||
auto it = values.find(entry_name);
|
||||
if(it == values.end()) return nullptr;
|
||||
if (it == values.end()) return nullptr;
|
||||
return &it->second;
|
||||
}
|
||||
|
||||
@@ -126,15 +134,15 @@ struct Results {
|
||||
// as a double, and only then converted to the asked type.
|
||||
template <class T>
|
||||
T GetCounterAs(const char* entry_name) const {
|
||||
double dval = GetAs< double >(entry_name);
|
||||
T tval = static_cast< T >(dval);
|
||||
double dval = GetAs<double>(entry_name);
|
||||
T tval = static_cast<T>(dval);
|
||||
return tval;
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
T Results::GetAs(const char* entry_name) const {
|
||||
auto *sv = Get(entry_name);
|
||||
auto* sv = Get(entry_name);
|
||||
CHECK(sv != nullptr && !sv->empty());
|
||||
std::stringstream ss;
|
||||
ss << *sv;
|
||||
@@ -148,6 +156,8 @@ T Results::GetAs(const char* entry_name) const {
|
||||
// Macros to help in result checking. Do not use them with arguments causing
|
||||
// side-effects.
|
||||
|
||||
// clang-format off
|
||||
|
||||
#define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \
|
||||
CONCAT(CHECK_, relationship) \
|
||||
(entry.getfn< var_type >(var_name), (value)) << "\n" \
|
||||
@@ -188,6 +198,8 @@ T Results::GetAs(const char* entry_name) const {
|
||||
#define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \
|
||||
_CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor)
|
||||
|
||||
// clang-format on
|
||||
|
||||
// ========================================================================= //
|
||||
// --------------------------- Misc Utilities ------------------------------ //
|
||||
// ========================================================================= //
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <cstring>
|
||||
#include <streambuf>
|
||||
|
||||
#include "../src/benchmark_api_internal.h"
|
||||
#include "../src/check.h" // NOTE: check.h is for internal use only!
|
||||
#include "../src/re.h" // NOTE: re.h is for internal use only
|
||||
#include "output_test.h"
|
||||
#include "../src/benchmark_api_internal.h"
|
||||
|
||||
// ========================================================================= //
|
||||
// ------------------------------ Internals -------------------------------- //
|
||||
@@ -33,6 +36,7 @@ TestCaseList& GetTestCaseList(TestCaseID ID) {
|
||||
|
||||
SubMap& GetSubstitutions() {
|
||||
// Don't use 'dec_re' from header because it may not yet be initialized.
|
||||
// clang-format off
|
||||
static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?";
|
||||
static SubMap map = {
|
||||
{"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"},
|
||||
@@ -40,9 +44,11 @@ SubMap& GetSubstitutions() {
|
||||
{"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"},
|
||||
{"%int", "[ ]*[0-9]+"},
|
||||
{" %s ", "[ ]+"},
|
||||
{"%time", "[ ]*[0-9]{1,5} ns"},
|
||||
{"%console_report", "[ ]*[0-9]{1,5} ns [ ]*[0-9]{1,5} ns [ ]*[0-9]+"},
|
||||
{"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"},
|
||||
{"%time", "[ ]*[0-9]+ ns"},
|
||||
{"%console_report", "[ ]*[0-9]+ ns [ ]*[0-9]+ ns [ ]*[0-9]+"},
|
||||
{"%console_time_only_report", "[ ]*[0-9]+ ns [ ]*[0-9]+ ns"},
|
||||
{"%console_us_report", "[ ]*[0-9]+ us [ ]*[0-9]+ us [ ]*[0-9]+"},
|
||||
{"%console_us_time_only_report", "[ ]*[0-9]+ us [ ]*[0-9]+ us"},
|
||||
{"%csv_header",
|
||||
"name,iterations,real_time,cpu_time,time_unit,bytes_per_second,"
|
||||
"items_per_second,label,error_occurred,error_message"},
|
||||
@@ -57,6 +63,7 @@ SubMap& GetSubstitutions() {
|
||||
"," + safe_dec_re + ",,,"},
|
||||
{"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"},
|
||||
{"%csv_label_report_end", ",,"}};
|
||||
// clang-format on
|
||||
return map;
|
||||
}
|
||||
|
||||
@@ -147,9 +154,9 @@ class TestReporter : public benchmark::BenchmarkReporter {
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<benchmark::BenchmarkReporter *> reporters_;
|
||||
std::vector<benchmark::BenchmarkReporter*> reporters_;
|
||||
};
|
||||
}
|
||||
} // namespace
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
@@ -163,28 +170,25 @@ namespace internal {
|
||||
// It works by parsing the CSV output to read the results.
|
||||
class ResultsChecker {
|
||||
public:
|
||||
|
||||
struct PatternAndFn : public TestCase { // reusing TestCase for its regexes
|
||||
struct PatternAndFn : public TestCase { // reusing TestCase for its regexes
|
||||
PatternAndFn(const std::string& rx, ResultsCheckFn fn_)
|
||||
: TestCase(rx), fn(fn_) {}
|
||||
: TestCase(rx), fn(fn_) {}
|
||||
ResultsCheckFn fn;
|
||||
};
|
||||
|
||||
std::vector< PatternAndFn > check_patterns;
|
||||
std::vector< Results > results;
|
||||
std::vector< std::string > field_names;
|
||||
std::vector<PatternAndFn> check_patterns;
|
||||
std::vector<Results> results;
|
||||
std::vector<std::string> field_names;
|
||||
|
||||
void Add(const std::string& entry_pattern, ResultsCheckFn fn);
|
||||
|
||||
void CheckResults(std::stringstream& output);
|
||||
|
||||
private:
|
||||
|
||||
void SetHeader_(const std::string& csv_header);
|
||||
void SetValues_(const std::string& entry_csv_line);
|
||||
|
||||
std::vector< std::string > SplitCsv_(const std::string& line);
|
||||
|
||||
std::vector<std::string> SplitCsv_(const std::string& line);
|
||||
};
|
||||
|
||||
// store the static ResultsChecker in a function to prevent initialization
|
||||
@@ -207,7 +211,7 @@ void ResultsChecker::CheckResults(std::stringstream& output) {
|
||||
// clear before calling tellg()
|
||||
output.clear();
|
||||
// seek to zero only when needed
|
||||
if(output.tellg() > start) output.seekg(start);
|
||||
if (output.tellg() > start) output.seekg(start);
|
||||
// and just in case
|
||||
output.clear();
|
||||
}
|
||||
@@ -218,18 +222,18 @@ void ResultsChecker::CheckResults(std::stringstream& output) {
|
||||
CHECK(output.good());
|
||||
std::getline(output, line);
|
||||
if (on_first) {
|
||||
SetHeader_(line); // this is important
|
||||
SetHeader_(line); // this is important
|
||||
on_first = false;
|
||||
continue;
|
||||
}
|
||||
SetValues_(line);
|
||||
}
|
||||
// finally we can call the subscribed check functions
|
||||
for(const auto& p : check_patterns) {
|
||||
for (const auto& p : check_patterns) {
|
||||
VLOG(2) << "--------------------------------\n";
|
||||
VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n";
|
||||
for(const auto& r : results) {
|
||||
if(!p.regex->Match(r.name)) {
|
||||
for (const auto& r : results) {
|
||||
if (!p.regex->Match(r.name)) {
|
||||
VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n";
|
||||
continue;
|
||||
} else {
|
||||
@@ -249,51 +253,50 @@ void ResultsChecker::SetHeader_(const std::string& csv_header) {
|
||||
|
||||
// set the values for a benchmark
|
||||
void ResultsChecker::SetValues_(const std::string& entry_csv_line) {
|
||||
if(entry_csv_line.empty()) return; // some lines are empty
|
||||
if (entry_csv_line.empty()) return; // some lines are empty
|
||||
CHECK(!field_names.empty());
|
||||
auto vals = SplitCsv_(entry_csv_line);
|
||||
CHECK_EQ(vals.size(), field_names.size());
|
||||
results.emplace_back(vals[0]); // vals[0] is the benchmark name
|
||||
auto &entry = results.back();
|
||||
results.emplace_back(vals[0]); // vals[0] is the benchmark name
|
||||
auto& entry = results.back();
|
||||
for (size_t i = 1, e = vals.size(); i < e; ++i) {
|
||||
entry.values[field_names[i]] = vals[i];
|
||||
}
|
||||
}
|
||||
|
||||
// a quick'n'dirty csv splitter (eliminating quotes)
|
||||
std::vector< std::string > ResultsChecker::SplitCsv_(const std::string& line) {
|
||||
std::vector< std::string > out;
|
||||
if(line.empty()) return out;
|
||||
if(!field_names.empty()) out.reserve(field_names.size());
|
||||
std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) {
|
||||
std::vector<std::string> out;
|
||||
if (line.empty()) return out;
|
||||
if (!field_names.empty()) out.reserve(field_names.size());
|
||||
size_t prev = 0, pos = line.find_first_of(','), curr = pos;
|
||||
while(pos != line.npos) {
|
||||
while (pos != line.npos) {
|
||||
CHECK(curr > 0);
|
||||
if(line[prev] == '"') ++prev;
|
||||
if(line[curr-1] == '"') --curr;
|
||||
out.push_back(line.substr(prev, curr-prev));
|
||||
if (line[prev] == '"') ++prev;
|
||||
if (line[curr - 1] == '"') --curr;
|
||||
out.push_back(line.substr(prev, curr - prev));
|
||||
prev = pos + 1;
|
||||
pos = line.find_first_of(',', pos + 1);
|
||||
curr = pos;
|
||||
}
|
||||
curr = line.size();
|
||||
if(line[prev] == '"') ++prev;
|
||||
if(line[curr-1] == '"') --curr;
|
||||
out.push_back(line.substr(prev, curr-prev));
|
||||
if (line[prev] == '"') ++prev;
|
||||
if (line[curr - 1] == '"') --curr;
|
||||
out.push_back(line.substr(prev, curr - prev));
|
||||
return out;
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
size_t AddChecker(const char* bm_name, ResultsCheckFn fn)
|
||||
{
|
||||
auto &rc = internal::GetResultsChecker();
|
||||
size_t AddChecker(const char* bm_name, ResultsCheckFn fn) {
|
||||
auto& rc = internal::GetResultsChecker();
|
||||
rc.Add(bm_name, fn);
|
||||
return rc.results.size();
|
||||
}
|
||||
|
||||
int Results::NumThreads() const {
|
||||
auto pos = name.find("/threads:");
|
||||
if(pos == name.npos) return 1;
|
||||
if (pos == name.npos) return 1;
|
||||
auto end = name.find('/', pos + 9);
|
||||
std::stringstream ss;
|
||||
ss << name.substr(pos + 9, end);
|
||||
@@ -303,19 +306,23 @@ int Results::NumThreads() const {
|
||||
return num;
|
||||
}
|
||||
|
||||
double Results::NumIterations() const {
|
||||
return GetAs<double>("iterations");
|
||||
}
|
||||
|
||||
double Results::GetTime(BenchmarkTime which) const {
|
||||
CHECK(which == kCpuTime || which == kRealTime);
|
||||
const char *which_str = which == kCpuTime ? "cpu_time" : "real_time";
|
||||
double val = GetAs< double >(which_str);
|
||||
const char* which_str = which == kCpuTime ? "cpu_time" : "real_time";
|
||||
double val = GetAs<double>(which_str);
|
||||
auto unit = Get("time_unit");
|
||||
CHECK(unit);
|
||||
if(*unit == "ns") {
|
||||
if (*unit == "ns") {
|
||||
return val * 1.e-9;
|
||||
} else if(*unit == "us") {
|
||||
} else if (*unit == "us") {
|
||||
return val * 1.e-6;
|
||||
} else if(*unit == "ms") {
|
||||
} else if (*unit == "ms") {
|
||||
return val * 1.e-3;
|
||||
} else if(*unit == "s") {
|
||||
} else if (*unit == "s") {
|
||||
return val;
|
||||
} else {
|
||||
CHECK(1 == 0) << "unknown time unit: " << *unit;
|
||||
@@ -333,7 +340,7 @@ TestCase::TestCase(std::string re, int rule)
|
||||
substituted_regex(internal::PerformSubstitutions(regex_str)),
|
||||
regex(std::make_shared<benchmark::Regex>()) {
|
||||
std::string err_str;
|
||||
regex->Init(substituted_regex,& err_str);
|
||||
regex->Init(substituted_regex, &err_str);
|
||||
CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex
|
||||
<< "\""
|
||||
<< "\n originally \"" << regex_str << "\""
|
||||
@@ -367,7 +374,7 @@ int SetSubstitutions(
|
||||
void RunOutputTests(int argc, char* argv[]) {
|
||||
using internal::GetTestCaseList;
|
||||
benchmark::Initialize(&argc, argv);
|
||||
auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/true);
|
||||
auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true);
|
||||
benchmark::ConsoleReporter CR(options);
|
||||
benchmark::JSONReporter JR;
|
||||
benchmark::CSVReporter CSVR;
|
||||
@@ -416,8 +423,42 @@ void RunOutputTests(int argc, char* argv[]) {
|
||||
|
||||
// now that we know the output is as expected, we can dispatch
|
||||
// the checks to subscribees.
|
||||
auto &csv = TestCases[2];
|
||||
auto& csv = TestCases[2];
|
||||
// would use == but gcc spits a warning
|
||||
CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
|
||||
internal::GetResultsChecker().CheckResults(csv.out_stream);
|
||||
}
|
||||
|
||||
int SubstrCnt(const std::string& haystack, const std::string& pat) {
|
||||
if (pat.length() == 0) return 0;
|
||||
int count = 0;
|
||||
for (size_t offset = haystack.find(pat); offset != std::string::npos;
|
||||
offset = haystack.find(pat, offset + pat.length()))
|
||||
++count;
|
||||
return count;
|
||||
}
|
||||
|
||||
std::string GetFileReporterOutput(int argc, char* argv[]) {
|
||||
std::vector<char*> new_argv(argv, argv + argc);
|
||||
assert(static_cast<decltype(new_argv)::size_type>(argc) == new_argv.size());
|
||||
|
||||
std::string tmp_file_name = std::tmpnam(nullptr);
|
||||
std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n';
|
||||
|
||||
std::string tmp = "--benchmark_out=";
|
||||
tmp += tmp_file_name;
|
||||
new_argv.emplace_back(const_cast<char*>(tmp.c_str()));
|
||||
|
||||
argc = int(new_argv.size());
|
||||
|
||||
benchmark::Initialize(&argc, new_argv.data());
|
||||
benchmark::RunSpecifiedBenchmarks();
|
||||
|
||||
// Read the output back from the file, and delete the file.
|
||||
std::ifstream tmp_stream(tmp_file_name);
|
||||
std::string output = std::string((std::istreambuf_iterator<char>(tmp_stream)),
|
||||
std::istreambuf_iterator<char>());
|
||||
std::remove(tmp_file_name.c_str());
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -29,14 +29,16 @@ struct TestCase {
|
||||
typedef benchmark::BenchmarkReporter::Run Run;
|
||||
|
||||
void CheckRun(Run const& run) const {
|
||||
CHECK(name == run.benchmark_name) << "expected " << name << " got "
|
||||
<< run.benchmark_name;
|
||||
// clang-format off
|
||||
CHECK(name == run.benchmark_name()) << "expected " << name << " got "
|
||||
<< run.benchmark_name();
|
||||
if (label) {
|
||||
CHECK(run.report_label == label) << "expected " << label << " got "
|
||||
<< run.report_label;
|
||||
} else {
|
||||
CHECK(run.report_label == "");
|
||||
}
|
||||
// clang-format on
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
39
utils/google-benchmark/test/report_aggregates_only_test.cc
Normal file
39
utils/google-benchmark/test/report_aggregates_only_test.cc
Normal file
@@ -0,0 +1,39 @@
|
||||
|
||||
#undef NDEBUG
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "output_test.h"
|
||||
|
||||
// Ok this test is super ugly. We want to check what happens with the file
|
||||
// reporter in the presence of ReportAggregatesOnly().
|
||||
// We do not care about console output, the normal tests check that already.
|
||||
|
||||
void BM_SummaryRepeat(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
const std::string output = GetFileReporterOutput(argc, argv);
|
||||
|
||||
if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 3 ||
|
||||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 ||
|
||||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") !=
|
||||
1 ||
|
||||
SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") !=
|
||||
1) {
|
||||
std::cout << "Precondition mismatch. Expected to only find three "
|
||||
"occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n"
|
||||
"\"name\": \"BM_SummaryRepeat/repeats:3_mean\", "
|
||||
"\"name\": \"BM_SummaryRepeat/repeats:3_median\", "
|
||||
"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"\nThe entire "
|
||||
"output:\n";
|
||||
std::cout << output;
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -9,24 +9,28 @@
|
||||
// ---------------------- Testing Prologue Output -------------------------- //
|
||||
// ========================================================================= //
|
||||
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
{{"^[-]+$", MR_Next},
|
||||
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next},
|
||||
{"^[-]+$", MR_Next}});
|
||||
ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next},
|
||||
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next},
|
||||
{"^[-]+$", MR_Next}});
|
||||
static int AddContextCases() {
|
||||
AddCases(TC_ConsoleErr,
|
||||
{
|
||||
{"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default},
|
||||
{"Run on \\(%int X %float MHz CPU s\\)", MR_Next},
|
||||
{"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
|
||||
{"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
|
||||
});
|
||||
AddCases(TC_JSONOut, {{"^\\{", MR_Default},
|
||||
{"\"context\":", MR_Next},
|
||||
{"\"date\": \"", MR_Next},
|
||||
{"\"num_cpus\": %int,$", MR_Next},
|
||||
{"\"mhz_per_cpu\": %float,$", MR_Next},
|
||||
{"\"cpu_scaling_enabled\": ", MR_Next},
|
||||
{"\"caches\": \\[$", MR_Next}});
|
||||
auto const& Caches = benchmark::CPUInfo::Get().caches;
|
||||
AddCases(TC_JSONOut,
|
||||
{{"^\\{", MR_Default},
|
||||
{"\"context\":", MR_Next},
|
||||
{"\"date\": \"", MR_Next},
|
||||
{"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",",
|
||||
MR_Next},
|
||||
{"\"num_cpus\": %int,$", MR_Next},
|
||||
{"\"mhz_per_cpu\": %float,$", MR_Next},
|
||||
{"\"cpu_scaling_enabled\": ", MR_Next},
|
||||
{"\"caches\": \\[$", MR_Next}});
|
||||
auto const& Info = benchmark::CPUInfo::Get();
|
||||
auto const& Caches = Info.caches;
|
||||
if (!Caches.empty()) {
|
||||
AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
|
||||
}
|
||||
@@ -43,8 +47,13 @@ static int AddContextCases() {
|
||||
{"\"num_sharing\": %int$", MR_Next},
|
||||
{"}[,]{0,1}$", MR_Next}});
|
||||
}
|
||||
|
||||
AddCases(TC_JSONOut, {{"],$"}});
|
||||
auto const& LoadAvg = Info.load_avg;
|
||||
if (!LoadAvg.empty()) {
|
||||
AddCases(TC_ConsoleErr,
|
||||
{{"Load Average: (%float, ){0,2}%float$", MR_Next}});
|
||||
}
|
||||
AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
|
||||
return 0;
|
||||
}
|
||||
int dummy_register = AddContextCases();
|
||||
@@ -62,6 +71,8 @@ BENCHMARK(BM_basic);
|
||||
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"},
|
||||
{"\"run_name\": \"BM_basic\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
@@ -80,9 +91,11 @@ void BM_bytes_per_second(benchmark::State& state) {
|
||||
}
|
||||
BENCHMARK(BM_bytes_per_second);
|
||||
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
{{"^BM_bytes_per_second %console_report +%float[kM]{0,1}B/s$"}});
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_bytes_per_second %console_report "
|
||||
"bytes_per_second=%float[kM]{0,1}/s$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
|
||||
{"\"run_name\": \"BM_bytes_per_second\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
@@ -102,9 +115,11 @@ void BM_items_per_second(benchmark::State& state) {
|
||||
}
|
||||
BENCHMARK(BM_items_per_second);
|
||||
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
{{"^BM_items_per_second %console_report +%float[kM]{0,1} items/s$"}});
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_items_per_second %console_report "
|
||||
"items_per_second=%float[kM]{0,1}/s$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
|
||||
{"\"run_name\": \"BM_items_per_second\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
@@ -126,6 +141,8 @@ BENCHMARK(BM_label);
|
||||
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"},
|
||||
{"\"run_name\": \"BM_label\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
@@ -147,6 +164,8 @@ void BM_error(benchmark::State& state) {
|
||||
BENCHMARK(BM_error);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"},
|
||||
{"\"run_name\": \"BM_error\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"error_occurred\": true,$", MR_Next},
|
||||
{"\"error_message\": \"message\",$", MR_Next}});
|
||||
|
||||
@@ -163,7 +182,9 @@ void BM_no_arg_name(benchmark::State& state) {
|
||||
}
|
||||
BENCHMARK(BM_no_arg_name)->Arg(3);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"},
|
||||
{"\"run_name\": \"BM_no_arg_name/3\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
|
||||
|
||||
// ========================================================================= //
|
||||
@@ -176,7 +197,9 @@ void BM_arg_name(benchmark::State& state) {
|
||||
}
|
||||
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"},
|
||||
{"\"run_name\": \"BM_arg_name/first:3\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
|
||||
|
||||
// ========================================================================= //
|
||||
@@ -190,7 +213,10 @@ void BM_arg_names(benchmark::State& state) {
|
||||
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
{{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
|
||||
{"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
|
||||
|
||||
// ========================================================================= //
|
||||
@@ -219,16 +245,33 @@ void BM_Repeat(benchmark::State& state) {
|
||||
}
|
||||
// need two repetitions min to be able to output any aggregate output
|
||||
BENCHMARK(BM_Repeat)->Repetitions(2);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"},
|
||||
{"^BM_Repeat/repeats:2 %console_report$"},
|
||||
{"^BM_Repeat/repeats:2_mean %console_report$"},
|
||||
{"^BM_Repeat/repeats:2_median %console_report$"},
|
||||
{"^BM_Repeat/repeats:2_stddev %console_report$"}});
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
{{"^BM_Repeat/repeats:2 %console_report$"},
|
||||
{"^BM_Repeat/repeats:2 %console_report$"},
|
||||
{"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"},
|
||||
{"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
|
||||
{"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:2\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:2_mean\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
||||
{"\"iterations\": 2,$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:2_median\",$"},
|
||||
{"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}});
|
||||
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"median\",$", MR_Next},
|
||||
{"\"iterations\": 2,$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:2_stddev\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
||||
{"\"iterations\": 2,$", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
|
||||
{"^\"BM_Repeat/repeats:2\",%csv_report$"},
|
||||
{"^\"BM_Repeat/repeats:2_mean\",%csv_report$"},
|
||||
@@ -236,18 +279,37 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
|
||||
{"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
|
||||
// but for two repetitions, mean and median is the same, so let's repeat..
|
||||
BENCHMARK(BM_Repeat)->Repetitions(3);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"},
|
||||
{"^BM_Repeat/repeats:3 %console_report$"},
|
||||
{"^BM_Repeat/repeats:3 %console_report$"},
|
||||
{"^BM_Repeat/repeats:3_mean %console_report$"},
|
||||
{"^BM_Repeat/repeats:3_median %console_report$"},
|
||||
{"^BM_Repeat/repeats:3_stddev %console_report$"}});
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
{{"^BM_Repeat/repeats:3 %console_report$"},
|
||||
{"^BM_Repeat/repeats:3 %console_report$"},
|
||||
{"^BM_Repeat/repeats:3 %console_report$"},
|
||||
{"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"},
|
||||
{"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"},
|
||||
{"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:3_mean\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:3_median\",$"},
|
||||
{"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}});
|
||||
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"median\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:3_stddev\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
||||
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
||||
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
||||
@@ -256,20 +318,41 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
||||
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
|
||||
// median differs between even/odd number of repetitions, so just to be sure
|
||||
BENCHMARK(BM_Repeat)->Repetitions(4);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"},
|
||||
{"^BM_Repeat/repeats:4 %console_report$"},
|
||||
{"^BM_Repeat/repeats:4 %console_report$"},
|
||||
{"^BM_Repeat/repeats:4 %console_report$"},
|
||||
{"^BM_Repeat/repeats:4_mean %console_report$"},
|
||||
{"^BM_Repeat/repeats:4_median %console_report$"},
|
||||
{"^BM_Repeat/repeats:4_stddev %console_report$"}});
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
{{"^BM_Repeat/repeats:4 %console_report$"},
|
||||
{"^BM_Repeat/repeats:4 %console_report$"},
|
||||
{"^BM_Repeat/repeats:4 %console_report$"},
|
||||
{"^BM_Repeat/repeats:4 %console_report$"},
|
||||
{"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"},
|
||||
{"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"},
|
||||
{"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:4_mean\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
||||
{"\"iterations\": 4,$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:4_median\",$"},
|
||||
{"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}});
|
||||
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"median\",$", MR_Next},
|
||||
{"\"iterations\": 4,$", MR_Next},
|
||||
{"\"name\": \"BM_Repeat/repeats:4_stddev\",$"},
|
||||
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
||||
{"\"iterations\": 4,$", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
|
||||
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
|
||||
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
|
||||
@@ -286,7 +369,9 @@ void BM_RepeatOnce(benchmark::State& state) {
|
||||
}
|
||||
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"},
|
||||
{"\"run_name\": \"BM_RepeatOnce/repeats:1\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
|
||||
|
||||
// Test that non-aggregate data is not reported
|
||||
@@ -295,20 +380,72 @@ void BM_SummaryRepeat(benchmark::State& state) {
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
ADD_CASES(
|
||||
TC_ConsoleOut,
|
||||
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
||||
{"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"},
|
||||
{"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"},
|
||||
{"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
||||
{"^BM_SummaryRepeat/repeats:3_mean %console_report$"},
|
||||
{"^BM_SummaryRepeat/repeats:3_median %console_report$"},
|
||||
{"^BM_SummaryRepeat/repeats:3_stddev %console_report$"}});
|
||||
ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
||||
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
|
||||
{"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
|
||||
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}});
|
||||
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
|
||||
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next},
|
||||
{"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
|
||||
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"median\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next},
|
||||
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"},
|
||||
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
||||
{"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
|
||||
{"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"},
|
||||
{"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}});
|
||||
|
||||
// Test that non-aggregate data is not displayed.
|
||||
// NOTE: this test is kinda bad. we are only testing the display output.
|
||||
// But we don't check that the file output still contains everything...
|
||||
void BM_SummaryDisplay(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly();
|
||||
ADD_CASES(
|
||||
TC_ConsoleOut,
|
||||
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
|
||||
{"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"},
|
||||
{"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"},
|
||||
{"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
|
||||
{"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
|
||||
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
||||
{"\"iterations\": 2,$", MR_Next},
|
||||
{"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"},
|
||||
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"median\",$", MR_Next},
|
||||
{"\"iterations\": 2,$", MR_Next},
|
||||
{"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"},
|
||||
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
||||
{"\"iterations\": 2,$", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut,
|
||||
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
|
||||
{"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
|
||||
{"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
|
||||
{"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
|
||||
|
||||
// Test repeats with custom time unit.
|
||||
void BM_RepeatTimeUnit(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
}
|
||||
@@ -317,18 +454,34 @@ BENCHMARK(BM_RepeatTimeUnit)
|
||||
->Repetitions(3)
|
||||
->ReportAggregatesOnly()
|
||||
->Unit(benchmark::kMicrosecond);
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
ADD_CASES(
|
||||
TC_ConsoleOut,
|
||||
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
||||
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"},
|
||||
{"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ "
|
||||
"]*3$"},
|
||||
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ "
|
||||
"]*3$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
||||
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_report$"},
|
||||
{"^BM_RepeatTimeUnit/repeats:3_median %console_us_report$"},
|
||||
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_report$"}});
|
||||
ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
||||
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
|
||||
{"\"time_unit\": \"us\",?$"},
|
||||
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
|
||||
{"\"time_unit\": \"us\",?$"},
|
||||
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
|
||||
{"\"time_unit\": \"us\",?$"}});
|
||||
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
|
||||
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next},
|
||||
{"\"time_unit\": \"us\",?$"},
|
||||
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
|
||||
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"median\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next},
|
||||
{"\"time_unit\": \"us\",?$"},
|
||||
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
|
||||
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next},
|
||||
{"\"time_unit\": \"us\",?$"}});
|
||||
ADD_CASES(TC_CSVOut,
|
||||
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
||||
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
|
||||
@@ -344,34 +497,92 @@ const auto UserStatistics = [](const std::vector<double>& v) {
|
||||
};
|
||||
void BM_UserStats(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
state.SetIterationTime(150 / 10e8);
|
||||
}
|
||||
}
|
||||
// clang-format off
|
||||
BENCHMARK(BM_UserStats)
|
||||
->Repetitions(3)
|
||||
->ComputeStatistics("", UserStatistics);
|
||||
->Repetitions(3)
|
||||
->Iterations(5)
|
||||
->UseManualTime()
|
||||
->ComputeStatistics("", UserStatistics);
|
||||
// clang-format on
|
||||
|
||||
// check that user-provided stats is calculated, and is after the default-ones
|
||||
// empty string as name is intentional, it would sort before anything else
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/repeats:3 %console_report$"},
|
||||
{"^BM_UserStats/repeats:3 %console_report$"},
|
||||
{"^BM_UserStats/repeats:3 %console_report$"},
|
||||
{"^BM_UserStats/repeats:3_mean %console_report$"},
|
||||
{"^BM_UserStats/repeats:3_median %console_report$"},
|
||||
{"^BM_UserStats/repeats:3_stddev %console_report$"},
|
||||
{"^BM_UserStats/repeats:3_ %console_report$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/repeats:3\",$"},
|
||||
{"\"name\": \"BM_UserStats/repeats:3\",$"},
|
||||
{"\"name\": \"BM_UserStats/repeats:3\",$"},
|
||||
{"\"name\": \"BM_UserStats/repeats:3_mean\",$"},
|
||||
{"\"name\": \"BM_UserStats/repeats:3_median\",$"},
|
||||
{"\"name\": \"BM_UserStats/repeats:3_stddev\",$"},
|
||||
{"\"name\": \"BM_UserStats/repeats:3_\",$"}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/repeats:3\",%csv_report$"},
|
||||
{"^\"BM_UserStats/repeats:3\",%csv_report$"},
|
||||
{"^\"BM_UserStats/repeats:3\",%csv_report$"},
|
||||
{"^\"BM_UserStats/repeats:3_mean\",%csv_report$"},
|
||||
{"^\"BM_UserStats/repeats:3_median\",%csv_report$"},
|
||||
{"^\"BM_UserStats/repeats:3_stddev\",%csv_report$"},
|
||||
{"^\"BM_UserStats/repeats:3_\",%csv_report$"}});
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
|
||||
"]* 150 ns %time [ ]*5$"},
|
||||
{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
|
||||
"]* 150 ns %time [ ]*5$"},
|
||||
{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
|
||||
"]* 150 ns %time [ ]*5$"},
|
||||
{"^BM_UserStats/iterations:5/repeats:3/"
|
||||
"manual_time_mean [ ]* 150 ns %time [ ]*3$"},
|
||||
{"^BM_UserStats/iterations:5/repeats:3/"
|
||||
"manual_time_median [ ]* 150 ns %time [ ]*3$"},
|
||||
{"^BM_UserStats/iterations:5/repeats:3/"
|
||||
"manual_time_stddev [ ]* 0 ns %time [ ]*3$"},
|
||||
{"^BM_UserStats/iterations:5/repeats:3/manual_time_ "
|
||||
"[ ]* 150 ns %time [ ]*3$"}});
|
||||
ADD_CASES(
|
||||
TC_JSONOut,
|
||||
{{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
|
||||
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
||||
MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": 5,$", MR_Next},
|
||||
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
||||
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
|
||||
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
||||
MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": 5,$", MR_Next},
|
||||
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
||||
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
|
||||
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
||||
MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": 5,$", MR_Next},
|
||||
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
||||
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
|
||||
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
||||
MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next},
|
||||
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
||||
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
|
||||
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
||||
MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"median\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next},
|
||||
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
||||
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
|
||||
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
||||
MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
|
||||
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
||||
MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"\",$", MR_Next},
|
||||
{"\"iterations\": 3,$", MR_Next},
|
||||
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
|
||||
ADD_CASES(
|
||||
TC_CSVOut,
|
||||
{{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
|
||||
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
|
||||
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
|
||||
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
|
||||
{"^\"BM_UserStats/iterations:5/repeats:3/"
|
||||
"manual_time_median\",%csv_report$"},
|
||||
{"^\"BM_UserStats/iterations:5/repeats:3/"
|
||||
"manual_time_stddev\",%csv_report$"},
|
||||
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}});
|
||||
|
||||
// ========================================================================= //
|
||||
// --------------------------- TEST CASES END ------------------------------ //
|
||||
|
||||
@@ -33,8 +33,8 @@ struct TestCase {
|
||||
typedef benchmark::BenchmarkReporter::Run Run;
|
||||
|
||||
void CheckRun(Run const& run) const {
|
||||
CHECK(name == run.benchmark_name) << "expected " << name << " got "
|
||||
<< run.benchmark_name;
|
||||
CHECK(name == run.benchmark_name())
|
||||
<< "expected " << name << " got " << run.benchmark_name();
|
||||
CHECK(error_occurred == run.error_occurred);
|
||||
CHECK(error_message == run.error_message);
|
||||
if (error_occurred) {
|
||||
@@ -70,6 +70,15 @@ void BM_error_before_running(benchmark::State& state) {
|
||||
BENCHMARK(BM_error_before_running);
|
||||
ADD_CASES("BM_error_before_running", {{"", true, "error message"}});
|
||||
|
||||
void BM_error_before_running_batch(benchmark::State& state) {
|
||||
state.SkipWithError("error message");
|
||||
while (state.KeepRunningBatch(17)) {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_error_before_running_batch);
|
||||
ADD_CASES("BM_error_before_running_batch", {{"", true, "error message"}});
|
||||
|
||||
void BM_error_before_running_range_for(benchmark::State& state) {
|
||||
state.SkipWithError("error message");
|
||||
for (auto _ : state) {
|
||||
@@ -114,7 +123,7 @@ void BM_error_during_running_ranged_for(benchmark::State& state) {
|
||||
// Test the unfortunate but documented behavior that the ranged-for loop
|
||||
// doesn't automatically terminate when SkipWithError is set.
|
||||
assert(++It != End);
|
||||
break; // Required behavior
|
||||
break; // Required behavior
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -123,8 +132,6 @@ ADD_CASES("BM_error_during_running_ranged_for",
|
||||
{{"/1/iterations:5", true, "error message"},
|
||||
{"/2/iterations:5", false, ""}});
|
||||
|
||||
|
||||
|
||||
void BM_error_after_running(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
benchmark::DoNotOptimize(state.iterations());
|
||||
|
||||
68
utils/google-benchmark/test/state_assembly_test.cc
Normal file
68
utils/google-benchmark/test/state_assembly_test.cc
Normal file
@@ -0,0 +1,68 @@
|
||||
#include <benchmark/benchmark.h>
|
||||
|
||||
#ifdef __clang__
|
||||
#pragma clang diagnostic ignored "-Wreturn-type"
|
||||
#endif
|
||||
|
||||
// clang-format off
|
||||
extern "C" {
|
||||
extern int ExternInt;
|
||||
benchmark::State& GetState();
|
||||
void Fn();
|
||||
}
|
||||
// clang-format on
|
||||
|
||||
using benchmark::State;
|
||||
|
||||
// CHECK-LABEL: test_for_auto_loop:
|
||||
extern "C" int test_for_auto_loop() {
|
||||
State& S = GetState();
|
||||
int x = 42;
|
||||
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
|
||||
// CHECK-NEXT: testq %rbx, %rbx
|
||||
// CHECK-NEXT: je [[LOOP_END:.*]]
|
||||
|
||||
for (auto _ : S) {
|
||||
// CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]:
|
||||
// CHECK-GNU-NEXT: subq $1, %rbx
|
||||
// CHECK-CLANG-NEXT: {{(addq \$1,|incq)}} %rax
|
||||
// CHECK-NEXT: jne .L[[LOOP_HEAD]]
|
||||
benchmark::DoNotOptimize(x);
|
||||
}
|
||||
// CHECK: [[LOOP_END]]:
|
||||
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
|
||||
|
||||
// CHECK: movl $101, %eax
|
||||
// CHECK: ret
|
||||
return 101;
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_while_loop:
|
||||
extern "C" int test_while_loop() {
|
||||
State& S = GetState();
|
||||
int x = 42;
|
||||
|
||||
// CHECK: j{{(e|mp)}} .L[[LOOP_HEADER:[a-zA-Z0-9_]+]]
|
||||
// CHECK-NEXT: .L[[LOOP_BODY:[a-zA-Z0-9_]+]]:
|
||||
while (S.KeepRunning()) {
|
||||
// CHECK-GNU-NEXT: subq $1, %[[IREG:[a-z]+]]
|
||||
// CHECK-CLANG-NEXT: {{(addq \$-1,|decq)}} %[[IREG:[a-z]+]]
|
||||
// CHECK: movq %[[IREG]], [[DEST:.*]]
|
||||
benchmark::DoNotOptimize(x);
|
||||
}
|
||||
// CHECK-DAG: movq [[DEST]], %[[IREG]]
|
||||
// CHECK-DAG: testq %[[IREG]], %[[IREG]]
|
||||
// CHECK-DAG: jne .L[[LOOP_BODY]]
|
||||
// CHECK-DAG: .L[[LOOP_HEADER]]:
|
||||
|
||||
// CHECK: cmpb $0
|
||||
// CHECK-NEXT: jne .L[[LOOP_END:[a-zA-Z0-9_]+]]
|
||||
// CHECK: [[CALL:call(q)*]] _ZN9benchmark5State16StartKeepRunningEv
|
||||
|
||||
// CHECK: .L[[LOOP_END]]:
|
||||
// CHECK: [[CALL]] _ZN9benchmark5State17FinishKeepRunningEv
|
||||
|
||||
// CHECK: movl $101, %eax
|
||||
// CHECK: ret
|
||||
return 101;
|
||||
}
|
||||
28
utils/google-benchmark/test/statistics_gtest.cc
Normal file
28
utils/google-benchmark/test/statistics_gtest.cc
Normal file
@@ -0,0 +1,28 @@
|
||||
//===---------------------------------------------------------------------===//
|
||||
// statistics_test - Unit tests for src/statistics.cc
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
#include "../src/statistics.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
namespace {
|
||||
TEST(StatisticsTest, Mean) {
|
||||
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0);
|
||||
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5);
|
||||
EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0);
|
||||
}
|
||||
|
||||
TEST(StatisticsTest, Median) {
|
||||
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0);
|
||||
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5);
|
||||
EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0);
|
||||
}
|
||||
|
||||
TEST(StatisticsTest, StdDev) {
|
||||
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0);
|
||||
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0);
|
||||
EXPECT_FLOAT_EQ(benchmark::StatisticsStdDev({1.5, 2.4, 3.3, 4.2, 5.1}),
|
||||
1.42302495);
|
||||
}
|
||||
|
||||
} // end namespace
|
||||
@@ -1,61 +0,0 @@
|
||||
//===---------------------------------------------------------------------===//
|
||||
// statistics_test - Unit tests for src/statistics.cc
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
#include "../src/statistics.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
namespace {
|
||||
TEST(StatisticsTest, Mean) {
|
||||
std::vector<double> Inputs;
|
||||
{
|
||||
Inputs = {42, 42, 42, 42};
|
||||
double Res = benchmark::StatisticsMean(Inputs);
|
||||
EXPECT_DOUBLE_EQ(Res, 42.0);
|
||||
}
|
||||
{
|
||||
Inputs = {1, 2, 3, 4};
|
||||
double Res = benchmark::StatisticsMean(Inputs);
|
||||
EXPECT_DOUBLE_EQ(Res, 2.5);
|
||||
}
|
||||
{
|
||||
Inputs = {1, 2, 5, 10, 10, 14};
|
||||
double Res = benchmark::StatisticsMean(Inputs);
|
||||
EXPECT_DOUBLE_EQ(Res, 7.0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(StatisticsTest, Median) {
|
||||
std::vector<double> Inputs;
|
||||
{
|
||||
Inputs = {42, 42, 42, 42};
|
||||
double Res = benchmark::StatisticsMedian(Inputs);
|
||||
EXPECT_DOUBLE_EQ(Res, 42.0);
|
||||
}
|
||||
{
|
||||
Inputs = {1, 2, 3, 4};
|
||||
double Res = benchmark::StatisticsMedian(Inputs);
|
||||
EXPECT_DOUBLE_EQ(Res, 2.5);
|
||||
}
|
||||
{
|
||||
Inputs = {1, 2, 5, 10, 10};
|
||||
double Res = benchmark::StatisticsMedian(Inputs);
|
||||
EXPECT_DOUBLE_EQ(Res, 5.0);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(StatisticsTest, StdDev) {
|
||||
std::vector<double> Inputs;
|
||||
{
|
||||
Inputs = {101, 101, 101, 101};
|
||||
double Res = benchmark::StatisticsStdDev(Inputs);
|
||||
EXPECT_DOUBLE_EQ(Res, 0.0);
|
||||
}
|
||||
{
|
||||
Inputs = {1, 2, 3};
|
||||
double Res = benchmark::StatisticsStdDev(Inputs);
|
||||
EXPECT_DOUBLE_EQ(Res, 1.0);
|
||||
}
|
||||
}
|
||||
|
||||
} // end namespace
|
||||
146
utils/google-benchmark/test/string_util_gtest.cc
Normal file
146
utils/google-benchmark/test/string_util_gtest.cc
Normal file
@@ -0,0 +1,146 @@
|
||||
//===---------------------------------------------------------------------===//
|
||||
// statistics_test - Unit tests for src/statistics.cc
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
#include "../src/string_util.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
namespace {
|
||||
TEST(StringUtilTest, stoul) {
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(0, benchmark::stoul("0", &pos));
|
||||
EXPECT_EQ(1, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(7, benchmark::stoul("7", &pos));
|
||||
EXPECT_EQ(1, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(135, benchmark::stoul("135", &pos));
|
||||
EXPECT_EQ(3, pos);
|
||||
}
|
||||
#if ULONG_MAX == 0xFFFFFFFFul
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos));
|
||||
EXPECT_EQ(10, pos);
|
||||
}
|
||||
#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, benchmark::stoul("18446744073709551615", &pos));
|
||||
EXPECT_EQ(20, pos);
|
||||
}
|
||||
#endif
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(10, benchmark::stoul("1010", &pos, 2));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(520, benchmark::stoul("1010", &pos, 8));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(1010, benchmark::stoul("1010", &pos, 10));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(4112, benchmark::stoul("1010", &pos, 16));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(0xBEEF, benchmark::stoul("BEEF", &pos, 16));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(StringUtilTest, stoi) {
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(0, benchmark::stoi("0", &pos));
|
||||
EXPECT_EQ(1, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(-17, benchmark::stoi("-17", &pos));
|
||||
EXPECT_EQ(3, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(1357, benchmark::stoi("1357", &pos));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(StringUtilTest, stod) {
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(0.0, benchmark::stod("0", &pos));
|
||||
EXPECT_EQ(1, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(-84.0, benchmark::stod("-84", &pos));
|
||||
EXPECT_EQ(3, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(1234.0, benchmark::stod("1234", &pos));
|
||||
EXPECT_EQ(4, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
EXPECT_EQ(1.5, benchmark::stod("1.5", &pos));
|
||||
EXPECT_EQ(3, pos);
|
||||
}
|
||||
{
|
||||
size_t pos = 0;
|
||||
/* Note: exactly representable as double */
|
||||
EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos));
|
||||
EXPECT_EQ(8, pos);
|
||||
}
|
||||
{
|
||||
ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument);
|
||||
}
|
||||
}
|
||||
|
||||
} // end namespace
|
||||
@@ -4,15 +4,15 @@
|
||||
#include <cassert>
|
||||
#include <memory>
|
||||
|
||||
template<typename T>
|
||||
template <typename T>
|
||||
class MyFixture : public ::benchmark::Fixture {
|
||||
public:
|
||||
public:
|
||||
MyFixture() : data(0) {}
|
||||
|
||||
T data;
|
||||
};
|
||||
|
||||
BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st) {
|
||||
BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) {
|
||||
for (auto _ : st) {
|
||||
data += 1;
|
||||
}
|
||||
|
||||
@@ -7,9 +7,11 @@
|
||||
// @todo: <jpmag> this checks the full output at once; the rule for
|
||||
// CounterSet1 was failing because it was not matching "^[-]+$".
|
||||
// @todo: <jpmag> check that the counters are vertically aligned.
|
||||
ADD_CASES(TC_ConsoleOut, {
|
||||
// keeping these lines long improves readability, so:
|
||||
// clang-format off
|
||||
ADD_CASES(
|
||||
TC_ConsoleOut,
|
||||
{
|
||||
// keeping these lines long improves readability, so:
|
||||
// clang-format off
|
||||
{"^[-]+$", MR_Next},
|
||||
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next},
|
||||
{"^[-]+$", MR_Next},
|
||||
@@ -44,8 +46,8 @@ ADD_CASES(TC_ConsoleOut, {
|
||||
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
||||
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
||||
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"},
|
||||
// clang-format on
|
||||
});
|
||||
// clang-format on
|
||||
});
|
||||
ADD_CASES(TC_CSVOut, {{"%csv_header,"
|
||||
"\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}});
|
||||
|
||||
@@ -58,27 +60,30 @@ void BM_Counters_Tabular(benchmark::State& state) {
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters.insert({
|
||||
{"Foo", { 1, bm::Counter::kAvgThreads}},
|
||||
{"Bar", { 2, bm::Counter::kAvgThreads}},
|
||||
{"Baz", { 4, bm::Counter::kAvgThreads}},
|
||||
{"Bat", { 8, bm::Counter::kAvgThreads}},
|
||||
{"Frob", {16, bm::Counter::kAvgThreads}},
|
||||
{"Lob", {32, bm::Counter::kAvgThreads}},
|
||||
{"Foo", {1, bm::Counter::kAvgThreads}},
|
||||
{"Bar", {2, bm::Counter::kAvgThreads}},
|
||||
{"Baz", {4, bm::Counter::kAvgThreads}},
|
||||
{"Bat", {8, bm::Counter::kAvgThreads}},
|
||||
{"Frob", {16, bm::Counter::kAvgThreads}},
|
||||
{"Lob", {32, bm::Counter::kAvgThreads}},
|
||||
});
|
||||
}
|
||||
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16);
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"Bar\": %float,$", MR_Next},
|
||||
{"\"Bat\": %float,$", MR_Next},
|
||||
{"\"Baz\": %float,$", MR_Next},
|
||||
{"\"Foo\": %float,$", MR_Next},
|
||||
{"\"Frob\": %float,$", MR_Next},
|
||||
{"\"Lob\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"},
|
||||
{"\"run_name\": \"BM_Counters_Tabular/threads:%int\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"Bar\": %float,$", MR_Next},
|
||||
{"\"Bat\": %float,$", MR_Next},
|
||||
{"\"Baz\": %float,$", MR_Next},
|
||||
{"\"Foo\": %float,$", MR_Next},
|
||||
{"\"Frob\": %float,$", MR_Next},
|
||||
{"\"Lob\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report,"
|
||||
"%float,%float,%float,%float,%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
@@ -102,39 +107,43 @@ void BM_CounterRates_Tabular(benchmark::State& state) {
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters.insert({
|
||||
{"Foo", { 1, bm::Counter::kAvgThreadsRate}},
|
||||
{"Bar", { 2, bm::Counter::kAvgThreadsRate}},
|
||||
{"Baz", { 4, bm::Counter::kAvgThreadsRate}},
|
||||
{"Bat", { 8, bm::Counter::kAvgThreadsRate}},
|
||||
{"Frob", {16, bm::Counter::kAvgThreadsRate}},
|
||||
{"Lob", {32, bm::Counter::kAvgThreadsRate}},
|
||||
{"Foo", {1, bm::Counter::kAvgThreadsRate}},
|
||||
{"Bar", {2, bm::Counter::kAvgThreadsRate}},
|
||||
{"Baz", {4, bm::Counter::kAvgThreadsRate}},
|
||||
{"Bat", {8, bm::Counter::kAvgThreadsRate}},
|
||||
{"Frob", {16, bm::Counter::kAvgThreadsRate}},
|
||||
{"Lob", {32, bm::Counter::kAvgThreadsRate}},
|
||||
});
|
||||
}
|
||||
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"Bar\": %float,$", MR_Next},
|
||||
{"\"Bat\": %float,$", MR_Next},
|
||||
{"\"Baz\": %float,$", MR_Next},
|
||||
{"\"Foo\": %float,$", MR_Next},
|
||||
{"\"Frob\": %float,$", MR_Next},
|
||||
{"\"Lob\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
|
||||
{"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$",
|
||||
MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"Bar\": %float,$", MR_Next},
|
||||
{"\"Bat\": %float,$", MR_Next},
|
||||
{"\"Baz\": %float,$", MR_Next},
|
||||
{"\"Foo\": %float,$", MR_Next},
|
||||
{"\"Frob\": %float,$", MR_Next},
|
||||
{"\"Lob\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report,"
|
||||
"%float,%float,%float,%float,%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckTabularRate(Results const& e) {
|
||||
double t = e.DurationCPUTime();
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1./t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2./t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4./t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8./t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16./t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32./t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001);
|
||||
}
|
||||
CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int",
|
||||
&CheckTabularRate);
|
||||
@@ -149,21 +158,24 @@ void BM_CounterSet0_Tabular(benchmark::State& state) {
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters.insert({
|
||||
{"Foo", {10, bm::Counter::kAvgThreads}},
|
||||
{"Bar", {20, bm::Counter::kAvgThreads}},
|
||||
{"Baz", {40, bm::Counter::kAvgThreads}},
|
||||
{"Foo", {10, bm::Counter::kAvgThreads}},
|
||||
{"Bar", {20, bm::Counter::kAvgThreads}},
|
||||
{"Baz", {40, bm::Counter::kAvgThreads}},
|
||||
});
|
||||
}
|
||||
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"Bar\": %float,$", MR_Next},
|
||||
{"\"Baz\": %float,$", MR_Next},
|
||||
{"\"Foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
|
||||
{"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"Bar\": %float,$", MR_Next},
|
||||
{"\"Baz\": %float,$", MR_Next},
|
||||
{"\"Foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report,"
|
||||
"%float,,%float,%float,,"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
@@ -181,21 +193,24 @@ void BM_CounterSet1_Tabular(benchmark::State& state) {
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters.insert({
|
||||
{"Foo", {15, bm::Counter::kAvgThreads}},
|
||||
{"Bar", {25, bm::Counter::kAvgThreads}},
|
||||
{"Baz", {45, bm::Counter::kAvgThreads}},
|
||||
{"Foo", {15, bm::Counter::kAvgThreads}},
|
||||
{"Bar", {25, bm::Counter::kAvgThreads}},
|
||||
{"Baz", {45, bm::Counter::kAvgThreads}},
|
||||
});
|
||||
}
|
||||
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"Bar\": %float,$", MR_Next},
|
||||
{"\"Baz\": %float,$", MR_Next},
|
||||
{"\"Foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
|
||||
{"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"Bar\": %float,$", MR_Next},
|
||||
{"\"Baz\": %float,$", MR_Next},
|
||||
{"\"Foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report,"
|
||||
"%float,,%float,%float,,"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
@@ -217,21 +232,24 @@ void BM_CounterSet2_Tabular(benchmark::State& state) {
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters.insert({
|
||||
{"Foo", {10, bm::Counter::kAvgThreads}},
|
||||
{"Bat", {30, bm::Counter::kAvgThreads}},
|
||||
{"Baz", {40, bm::Counter::kAvgThreads}},
|
||||
{"Foo", {10, bm::Counter::kAvgThreads}},
|
||||
{"Bat", {30, bm::Counter::kAvgThreads}},
|
||||
{"Baz", {40, bm::Counter::kAvgThreads}},
|
||||
});
|
||||
}
|
||||
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"Bat\": %float,$", MR_Next},
|
||||
{"\"Baz\": %float,$", MR_Next},
|
||||
{"\"Foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
|
||||
{"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"Bat\": %float,$", MR_Next},
|
||||
{"\"Baz\": %float,$", MR_Next},
|
||||
{"\"Foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report,"
|
||||
",%float,%float,%float,,"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
|
||||
@@ -8,12 +8,16 @@
|
||||
// ---------------------- Testing Prologue Output -------------------------- //
|
||||
// ========================================================================= //
|
||||
|
||||
// clang-format off
|
||||
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
{{"^[-]+$", MR_Next},
|
||||
{"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next},
|
||||
{"^[-]+$", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
|
||||
|
||||
// clang-format on
|
||||
|
||||
// ========================================================================= //
|
||||
// ------------------------- Simple Counters Output ------------------------ //
|
||||
// ========================================================================= //
|
||||
@@ -25,8 +29,11 @@ void BM_Counters_Simple(benchmark::State& state) {
|
||||
state.counters["bar"] = 2 * (double)state.iterations();
|
||||
}
|
||||
BENCHMARK(BM_Counters_Simple);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
{{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
|
||||
{"\"run_name\": \"BM_Counters_Simple\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
@@ -38,10 +45,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckSimple(Results const& e) {
|
||||
double its = e.GetAs< double >("iterations");
|
||||
double its = e.NumIterations();
|
||||
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
|
||||
// check that the value of bar is within 0.1% of the expected value
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2.*its, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
|
||||
}
|
||||
CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
|
||||
|
||||
@@ -49,7 +56,9 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
|
||||
// --------------------- Counters+Items+Bytes/s Output --------------------- //
|
||||
// ========================================================================= //
|
||||
|
||||
namespace { int num_calls1 = 0; }
|
||||
namespace {
|
||||
int num_calls1 = 0;
|
||||
}
|
||||
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
}
|
||||
@@ -59,30 +68,33 @@ void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
|
||||
state.SetItemsProcessed(150);
|
||||
}
|
||||
BENCHMARK(BM_Counters_WithBytesAndItemsPSec);
|
||||
ADD_CASES(TC_ConsoleOut,
|
||||
{{"^BM_Counters_WithBytesAndItemsPSec %console_report "
|
||||
"bar=%hrfloat foo=%hrfloat +%hrfloatB/s +%hrfloat items/s$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bytes_per_second\": %float,$", MR_Next},
|
||||
{"\"items_per_second\": %float,$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
|
||||
"bar=%hrfloat bytes_per_second=%hrfloat/s "
|
||||
"foo=%hrfloat items_per_second=%hrfloat/s$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
|
||||
{"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"bytes_per_second\": %float,$", MR_Next},
|
||||
{"\"foo\": %float,$", MR_Next},
|
||||
{"\"items_per_second\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\","
|
||||
"%csv_bytes_items_report,%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckBytesAndItemsPSec(Results const& e) {
|
||||
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
||||
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
||||
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
|
||||
CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1);
|
||||
// check that the values are within 0.1% of the expected values
|
||||
CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364./t, 0.001);
|
||||
CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150./t, 0.001);
|
||||
CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001);
|
||||
CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001);
|
||||
}
|
||||
CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
|
||||
&CheckBytesAndItemsPSec);
|
||||
@@ -99,8 +111,12 @@ void BM_Counters_Rate(benchmark::State& state) {
|
||||
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate};
|
||||
}
|
||||
BENCHMARK(BM_Counters_Rate);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
||||
ADD_CASES(
|
||||
TC_ConsoleOut,
|
||||
{{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
|
||||
{"\"run_name\": \"BM_Counters_Rate\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
@@ -112,10 +128,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckRate(Results const& e) {
|
||||
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
||||
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
||||
// check that the values are within 0.1% of the expected values
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1./t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2./t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001);
|
||||
}
|
||||
CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
|
||||
|
||||
@@ -130,16 +146,22 @@ void BM_Counters_Threads(benchmark::State& state) {
|
||||
state.counters["bar"] = 2;
|
||||
}
|
||||
BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report "
|
||||
"bar=%hrfloat foo=%hrfloat$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
|
||||
{"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(
|
||||
TC_CSVOut,
|
||||
{{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckThreads(Results const& e) {
|
||||
@@ -160,16 +182,22 @@ void BM_Counters_AvgThreads(benchmark::State& state) {
|
||||
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads};
|
||||
}
|
||||
BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int "
|
||||
"%console_report bar=%hrfloat foo=%hrfloat$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
|
||||
{"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(
|
||||
TC_CSVOut,
|
||||
{{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckAvgThreads(Results const& e) {
|
||||
@@ -191,25 +219,188 @@ void BM_Counters_AvgThreadsRate(benchmark::State& state) {
|
||||
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate};
|
||||
}
|
||||
BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
||||
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/threads:%int\",%csv_report,%float,%float$"}});
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int "
|
||||
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
|
||||
{"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$",
|
||||
MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/"
|
||||
"threads:%int\",%csv_report,%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckAvgThreadsRate(Results const& e) {
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1./e.DurationCPUTime(), 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2./e.DurationCPUTime(), 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001);
|
||||
}
|
||||
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int",
|
||||
&CheckAvgThreadsRate);
|
||||
|
||||
// ========================================================================= //
|
||||
// ------------------- IterationInvariant Counters Output ------------------ //
|
||||
// ========================================================================= //
|
||||
|
||||
void BM_Counters_IterationInvariant(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant};
|
||||
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant};
|
||||
}
|
||||
BENCHMARK(BM_Counters_IterationInvariant);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report "
|
||||
"bar=%hrfloat foo=%hrfloat$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_IterationInvariant\",$"},
|
||||
{"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut,
|
||||
{{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckIterationInvariant(Results const& e) {
|
||||
double its = e.NumIterations();
|
||||
// check that the values are within 0.1% of the expected value
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
|
||||
}
|
||||
CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant",
|
||||
&CheckIterationInvariant);
|
||||
|
||||
// ========================================================================= //
|
||||
// ----------------- IterationInvariantRate Counters Output ---------------- //
|
||||
// ========================================================================= //
|
||||
|
||||
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters["foo"] =
|
||||
bm::Counter{1, bm::Counter::kIsIterationInvariantRate};
|
||||
state.counters["bar"] =
|
||||
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
|
||||
}
|
||||
BENCHMARK(BM_Counters_kIsIterationInvariantRate);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate "
|
||||
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
|
||||
{"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$",
|
||||
MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report,"
|
||||
"%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckIsIterationInvariantRate(Results const& e) {
|
||||
double its = e.NumIterations();
|
||||
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
||||
// check that the values are within 0.1% of the expected values
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001);
|
||||
}
|
||||
CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate",
|
||||
&CheckIsIterationInvariantRate);
|
||||
|
||||
// ========================================================================= //
|
||||
// ------------------- AvgIterations Counters Output ------------------ //
|
||||
// ========================================================================= //
|
||||
|
||||
void BM_Counters_AvgIterations(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations};
|
||||
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations};
|
||||
}
|
||||
BENCHMARK(BM_Counters_AvgIterations);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report "
|
||||
"bar=%hrfloat foo=%hrfloat$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_AvgIterations\",$"},
|
||||
{"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut,
|
||||
{{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckAvgIterations(Results const& e) {
|
||||
double its = e.NumIterations();
|
||||
// check that the values are within 0.1% of the expected value
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001);
|
||||
}
|
||||
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
|
||||
|
||||
// ========================================================================= //
|
||||
// ----------------- AvgIterationsRate Counters Output ---------------- //
|
||||
// ========================================================================= //
|
||||
|
||||
void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
|
||||
state.counters["bar"] =
|
||||
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
|
||||
}
|
||||
BENCHMARK(BM_Counters_kAvgIterationsRate);
|
||||
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate "
|
||||
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
|
||||
{"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"bar\": %float,$", MR_Next},
|
||||
{"\"foo\": %float$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report,"
|
||||
"%float,%float$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckAvgIterationsRate(Results const& e) {
|
||||
double its = e.NumIterations();
|
||||
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
||||
// check that the values are within 0.1% of the expected values
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001);
|
||||
}
|
||||
CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate",
|
||||
&CheckAvgIterationsRate);
|
||||
|
||||
// ========================================================================= //
|
||||
// --------------------------- TEST CASES END ------------------------------ //
|
||||
// ========================================================================= //
|
||||
|
||||
161
utils/google-benchmark/test/user_counters_thousands_test.cc
Normal file
161
utils/google-benchmark/test/user_counters_thousands_test.cc
Normal file
@@ -0,0 +1,161 @@
|
||||
|
||||
#undef NDEBUG
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "output_test.h"
|
||||
|
||||
// ========================================================================= //
|
||||
// ------------------------ Thousands Customisation ------------------------ //
|
||||
// ========================================================================= //
|
||||
|
||||
void BM_Counters_Thousands(benchmark::State& state) {
|
||||
for (auto _ : state) {
|
||||
}
|
||||
namespace bm = benchmark;
|
||||
state.counters.insert({
|
||||
{"t0_1000000DefaultBase",
|
||||
bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
|
||||
{"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
|
||||
benchmark::Counter::OneK::kIs1000)},
|
||||
{"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
|
||||
benchmark::Counter::OneK::kIs1024)},
|
||||
{"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
|
||||
benchmark::Counter::OneK::kIs1000)},
|
||||
{"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
|
||||
benchmark::Counter::OneK::kIs1024)},
|
||||
});
|
||||
}
|
||||
BENCHMARK(BM_Counters_Thousands)->Repetitions(2);
|
||||
ADD_CASES(
|
||||
TC_ConsoleOut,
|
||||
{
|
||||
{"^BM_Counters_Thousands/repeats:2 %console_report "
|
||||
"t0_1000000DefaultBase=1000k "
|
||||
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
|
||||
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
|
||||
{"^BM_Counters_Thousands/repeats:2 %console_report "
|
||||
"t0_1000000DefaultBase=1000k "
|
||||
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
|
||||
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
|
||||
{"^BM_Counters_Thousands/repeats:2_mean %console_report "
|
||||
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
|
||||
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
|
||||
"t4_1048576Base1024=1024k$"},
|
||||
{"^BM_Counters_Thousands/repeats:2_median %console_report "
|
||||
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
|
||||
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
|
||||
"t4_1048576Base1024=1024k$"},
|
||||
{"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ "
|
||||
"]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 "
|
||||
"t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"},
|
||||
});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
|
||||
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
|
||||
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"iteration\",$", MR_Next},
|
||||
{"\"iterations\": %int,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
|
||||
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
||||
{"\"iterations\": 2,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
|
||||
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"median\",$", MR_Next},
|
||||
{"\"iterations\": 2,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
||||
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
ADD_CASES(TC_JSONOut,
|
||||
{{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
|
||||
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
||||
{"\"run_type\": \"aggregate\",$", MR_Next},
|
||||
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
||||
{"\"iterations\": 2,$", MR_Next},
|
||||
{"\"real_time\": %float,$", MR_Next},
|
||||
{"\"cpu_time\": %float,$", MR_Next},
|
||||
{"\"time_unit\": \"ns\",$", MR_Next},
|
||||
{"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
||||
{"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
||||
{"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
||||
{"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
||||
{"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
|
||||
{"}", MR_Next}});
|
||||
|
||||
ADD_CASES(
|
||||
TC_CSVOut,
|
||||
{{"^\"BM_Counters_Thousands/"
|
||||
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
|
||||
"0)*6,1\\.04858e\\+(0)*6$"},
|
||||
{"^\"BM_Counters_Thousands/"
|
||||
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
|
||||
"0)*6,1\\.04858e\\+(0)*6$"},
|
||||
{"^\"BM_Counters_Thousands/"
|
||||
"repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
|
||||
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
|
||||
{"^\"BM_Counters_Thousands/"
|
||||
"repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
|
||||
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
|
||||
{"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
|
||||
// VS2013 does not allow this function to be passed as a lambda argument
|
||||
// to CHECK_BENCHMARK_RESULTS()
|
||||
void CheckThousands(Results const& e) {
|
||||
if (e.name != "BM_Counters_Thousands/repeats:2")
|
||||
return; // Do not check the aggregates!
|
||||
|
||||
// check that the values are within 0.01% of the expected values
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000,
|
||||
0.0001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001);
|
||||
CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001);
|
||||
}
|
||||
CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
|
||||
|
||||
// ========================================================================= //
|
||||
// --------------------------- TEST CASES END ------------------------------ //
|
||||
// ========================================================================= //
|
||||
|
||||
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
|
||||
@@ -35,6 +35,34 @@ def check_inputs(in1, in2, flags):
|
||||
def create_parser():
|
||||
parser = ArgumentParser(
|
||||
description='versatile benchmark output compare tool')
|
||||
|
||||
parser.add_argument(
|
||||
'-a',
|
||||
'--display_aggregates_only',
|
||||
dest='display_aggregates_only',
|
||||
action="store_true",
|
||||
help="If there are repetitions, by default, we display everything - the"
|
||||
" actual runs, and the aggregates computed. Sometimes, it is "
|
||||
"desirable to only view the aggregates. E.g. when there are a lot "
|
||||
"of repetitions. Do note that only the display is affected. "
|
||||
"Internally, all the actual runs are still used, e.g. for U test.")
|
||||
|
||||
utest = parser.add_argument_group()
|
||||
utest.add_argument(
|
||||
'--no-utest',
|
||||
dest='utest',
|
||||
default=True,
|
||||
action="store_false",
|
||||
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
|
||||
alpha_default = 0.05
|
||||
utest.add_argument(
|
||||
"--alpha",
|
||||
dest='utest_alpha',
|
||||
default=alpha_default,
|
||||
type=float,
|
||||
help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
|
||||
alpha_default)
|
||||
|
||||
subparsers = parser.add_subparsers(
|
||||
help='This tool has multiple modes of operation:',
|
||||
dest='mode')
|
||||
@@ -138,6 +166,9 @@ def main():
|
||||
# Parse the command line flags
|
||||
parser = create_parser()
|
||||
args, unknown_args = parser.parse_known_args()
|
||||
if args.mode is None:
|
||||
parser.print_help()
|
||||
exit(1)
|
||||
assert not unknown_args
|
||||
benchmark_options = args.benchmark_options
|
||||
|
||||
@@ -175,10 +206,14 @@ def main():
|
||||
else:
|
||||
# should never happen
|
||||
print("Unrecognized mode of operation: '%s'" % args.mode)
|
||||
parser.print_help()
|
||||
exit(1)
|
||||
|
||||
check_inputs(test_baseline, test_contender, benchmark_options)
|
||||
|
||||
if args.display_aggregates_only:
|
||||
benchmark_options += ['--benchmark_display_aggregates_only=true']
|
||||
|
||||
options_baseline = []
|
||||
options_contender = []
|
||||
|
||||
@@ -201,7 +236,9 @@ def main():
|
||||
json2_orig, filter_contender, replacement)
|
||||
|
||||
# Diff and output
|
||||
output_lines = gbench.report.generate_difference_report(json1, json2)
|
||||
output_lines = gbench.report.generate_difference_report(
|
||||
json1, json2, args.display_aggregates_only,
|
||||
args.utest, args.utest_alpha)
|
||||
print(description)
|
||||
for ln in output_lines:
|
||||
print(ln)
|
||||
@@ -218,12 +255,57 @@ class TestParser(unittest.TestCase):
|
||||
os.path.realpath(__file__)),
|
||||
'gbench',
|
||||
'Inputs')
|
||||
self.testInput0 = os.path.join(testInputs, 'test_baseline_run1.json')
|
||||
self.testInput1 = os.path.join(testInputs, 'test_baseline_run2.json')
|
||||
self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
|
||||
self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
|
||||
|
||||
def test_benchmarks_basic(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['benchmarks', self.testInput0, self.testInput1])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertTrue(parsed.utest)
|
||||
self.assertEqual(parsed.mode, 'benchmarks')
|
||||
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||
self.assertFalse(parsed.benchmark_options)
|
||||
|
||||
def test_benchmarks_basic_without_utest(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertFalse(parsed.utest)
|
||||
self.assertEqual(parsed.utest_alpha, 0.05)
|
||||
self.assertEqual(parsed.mode, 'benchmarks')
|
||||
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||
self.assertFalse(parsed.benchmark_options)
|
||||
|
||||
def test_benchmarks_basic_display_aggregates_only(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['-a', 'benchmarks', self.testInput0, self.testInput1])
|
||||
self.assertTrue(parsed.display_aggregates_only)
|
||||
self.assertTrue(parsed.utest)
|
||||
self.assertEqual(parsed.mode, 'benchmarks')
|
||||
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||
self.assertFalse(parsed.benchmark_options)
|
||||
|
||||
def test_benchmarks_basic_with_utest_alpha(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertTrue(parsed.utest)
|
||||
self.assertEqual(parsed.utest_alpha, 0.314)
|
||||
self.assertEqual(parsed.mode, 'benchmarks')
|
||||
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||
self.assertFalse(parsed.benchmark_options)
|
||||
|
||||
def test_benchmarks_basic_without_utest_with_utest_alpha(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertFalse(parsed.utest)
|
||||
self.assertEqual(parsed.utest_alpha, 0.314)
|
||||
self.assertEqual(parsed.mode, 'benchmarks')
|
||||
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||
@@ -232,6 +314,8 @@ class TestParser(unittest.TestCase):
|
||||
def test_benchmarks_with_remainder(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['benchmarks', self.testInput0, self.testInput1, 'd'])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertTrue(parsed.utest)
|
||||
self.assertEqual(parsed.mode, 'benchmarks')
|
||||
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||
@@ -240,6 +324,8 @@ class TestParser(unittest.TestCase):
|
||||
def test_benchmarks_with_remainder_after_doubleminus(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertTrue(parsed.utest)
|
||||
self.assertEqual(parsed.mode, 'benchmarks')
|
||||
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
|
||||
@@ -248,6 +334,8 @@ class TestParser(unittest.TestCase):
|
||||
def test_filters_basic(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['filters', self.testInput0, 'c', 'd'])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertTrue(parsed.utest)
|
||||
self.assertEqual(parsed.mode, 'filters')
|
||||
self.assertEqual(parsed.test[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||
@@ -257,6 +345,8 @@ class TestParser(unittest.TestCase):
|
||||
def test_filters_with_remainder(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['filters', self.testInput0, 'c', 'd', 'e'])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertTrue(parsed.utest)
|
||||
self.assertEqual(parsed.mode, 'filters')
|
||||
self.assertEqual(parsed.test[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||
@@ -266,6 +356,8 @@ class TestParser(unittest.TestCase):
|
||||
def test_filters_with_remainder_after_doubleminus(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['filters', self.testInput0, 'c', 'd', '--', 'f'])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertTrue(parsed.utest)
|
||||
self.assertEqual(parsed.mode, 'filters')
|
||||
self.assertEqual(parsed.test[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||
@@ -275,6 +367,8 @@ class TestParser(unittest.TestCase):
|
||||
def test_benchmarksfiltered_basic(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertTrue(parsed.utest)
|
||||
self.assertEqual(parsed.mode, 'benchmarksfiltered')
|
||||
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||
@@ -285,6 +379,8 @@ class TestParser(unittest.TestCase):
|
||||
def test_benchmarksfiltered_with_remainder(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertTrue(parsed.utest)
|
||||
self.assertEqual(parsed.mode, 'benchmarksfiltered')
|
||||
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||
@@ -295,6 +391,8 @@ class TestParser(unittest.TestCase):
|
||||
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
|
||||
parsed = self.parser.parse_args(
|
||||
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
|
||||
self.assertFalse(parsed.display_aggregates_only)
|
||||
self.assertTrue(parsed.utest)
|
||||
self.assertEqual(parsed.mode, 'benchmarksfiltered')
|
||||
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
|
||||
self.assertEqual(parsed.filter_baseline[0], 'c')
|
||||
@@ -304,7 +402,7 @@ class TestParser(unittest.TestCase):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# unittest.main()
|
||||
#unittest.main()
|
||||
main()
|
||||
|
||||
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
compare_bench.py - Compare two benchmarks or their results and report the
|
||||
difference.
|
||||
"""
|
||||
import argparse
|
||||
from argparse import ArgumentParser
|
||||
import sys
|
||||
import gbench
|
||||
from gbench import util, report
|
||||
from gbench.util import *
|
||||
|
||||
def check_inputs(in1, in2, flags):
|
||||
"""
|
||||
Perform checking on the user provided inputs and diagnose any abnormalities
|
||||
"""
|
||||
in1_kind, in1_err = classify_input_file(in1)
|
||||
in2_kind, in2_err = classify_input_file(in2)
|
||||
output_file = find_benchmark_flag('--benchmark_out=', flags)
|
||||
output_type = find_benchmark_flag('--benchmark_out_format=', flags)
|
||||
if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
|
||||
print(("WARNING: '--benchmark_out=%s' will be passed to both "
|
||||
"benchmarks causing it to be overwritten") % output_file)
|
||||
if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
|
||||
print("WARNING: passing --benchmark flags has no effect since both "
|
||||
"inputs are JSON")
|
||||
if output_type is not None and output_type != 'json':
|
||||
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`"
|
||||
" is not supported.") % output_type)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
parser = ArgumentParser(
|
||||
description='compare the results of two benchmarks')
|
||||
parser.add_argument(
|
||||
'test1', metavar='test1', type=str, nargs=1,
|
||||
help='A benchmark executable or JSON output file')
|
||||
parser.add_argument(
|
||||
'test2', metavar='test2', type=str, nargs=1,
|
||||
help='A benchmark executable or JSON output file')
|
||||
parser.add_argument(
|
||||
'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER,
|
||||
help='Arguments to pass when running benchmark executables'
|
||||
)
|
||||
args, unknown_args = parser.parse_known_args()
|
||||
# Parse the command line flags
|
||||
test1 = args.test1[0]
|
||||
test2 = args.test2[0]
|
||||
if unknown_args:
|
||||
# should never happen
|
||||
print("Unrecognized positional argument arguments: '%s'"
|
||||
% unknown_args)
|
||||
exit(1)
|
||||
benchmark_options = args.benchmark_options
|
||||
check_inputs(test1, test2, benchmark_options)
|
||||
# Run the benchmarks and report the results
|
||||
json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options)
|
||||
json2 = gbench.util.run_or_load_benchmark(test2, benchmark_options)
|
||||
output_lines = gbench.report.generate_difference_report(json1, json2)
|
||||
print('Comparing %s to %s' % (test1, test2))
|
||||
for ln in output_lines:
|
||||
print(ln)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
65
utils/google-benchmark/tools/gbench/Inputs/test3_run0.json
Normal file
65
utils/google-benchmark/tools/gbench/Inputs/test3_run0.json
Normal file
@@ -0,0 +1,65 @@
|
||||
{
|
||||
"context": {
|
||||
"date": "2016-08-02 17:44:46",
|
||||
"num_cpus": 4,
|
||||
"mhz_per_cpu": 4228,
|
||||
"cpu_scaling_enabled": false,
|
||||
"library_build_type": "release"
|
||||
},
|
||||
"benchmarks": [
|
||||
{
|
||||
"name": "BM_One",
|
||||
"run_type": "aggregate",
|
||||
"iterations": 1000,
|
||||
"real_time": 10,
|
||||
"cpu_time": 100,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_Two",
|
||||
"iterations": 1000,
|
||||
"real_time": 9,
|
||||
"cpu_time": 90,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_Two",
|
||||
"iterations": 1000,
|
||||
"real_time": 8,
|
||||
"cpu_time": 86,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "short",
|
||||
"run_type": "aggregate",
|
||||
"iterations": 1000,
|
||||
"real_time": 8,
|
||||
"cpu_time": 80,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "short",
|
||||
"run_type": "aggregate",
|
||||
"iterations": 1000,
|
||||
"real_time": 8,
|
||||
"cpu_time": 77,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "medium",
|
||||
"run_type": "iteration",
|
||||
"iterations": 1000,
|
||||
"real_time": 8,
|
||||
"cpu_time": 80,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "medium",
|
||||
"run_type": "iteration",
|
||||
"iterations": 1000,
|
||||
"real_time": 9,
|
||||
"cpu_time": 82,
|
||||
"time_unit": "ns"
|
||||
}
|
||||
]
|
||||
}
|
||||
65
utils/google-benchmark/tools/gbench/Inputs/test3_run1.json
Normal file
65
utils/google-benchmark/tools/gbench/Inputs/test3_run1.json
Normal file
@@ -0,0 +1,65 @@
|
||||
{
|
||||
"context": {
|
||||
"date": "2016-08-02 17:44:46",
|
||||
"num_cpus": 4,
|
||||
"mhz_per_cpu": 4228,
|
||||
"cpu_scaling_enabled": false,
|
||||
"library_build_type": "release"
|
||||
},
|
||||
"benchmarks": [
|
||||
{
|
||||
"name": "BM_One",
|
||||
"iterations": 1000,
|
||||
"real_time": 9,
|
||||
"cpu_time": 110,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_Two",
|
||||
"run_type": "aggregate",
|
||||
"iterations": 1000,
|
||||
"real_time": 10,
|
||||
"cpu_time": 89,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "BM_Two",
|
||||
"iterations": 1000,
|
||||
"real_time": 7,
|
||||
"cpu_time": 72,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "short",
|
||||
"run_type": "aggregate",
|
||||
"iterations": 1000,
|
||||
"real_time": 7,
|
||||
"cpu_time": 75,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "short",
|
||||
"run_type": "aggregate",
|
||||
"iterations": 762,
|
||||
"real_time": 4.54,
|
||||
"cpu_time": 66.6,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "short",
|
||||
"run_type": "iteration",
|
||||
"iterations": 1000,
|
||||
"real_time": 800,
|
||||
"cpu_time": 1,
|
||||
"time_unit": "ns"
|
||||
},
|
||||
{
|
||||
"name": "medium",
|
||||
"run_type": "iteration",
|
||||
"iterations": 1200,
|
||||
"real_time": 5,
|
||||
"cpu_time": 53,
|
||||
"time_unit": "ns"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -4,6 +4,9 @@ import os
|
||||
import re
|
||||
import copy
|
||||
|
||||
from scipy.stats import mannwhitneyu
|
||||
|
||||
|
||||
class BenchmarkColor(object):
|
||||
def __init__(self, name, code):
|
||||
self.name = name
|
||||
@@ -16,11 +19,13 @@ class BenchmarkColor(object):
|
||||
def __format__(self, format):
|
||||
return self.code
|
||||
|
||||
|
||||
# Benchmark Colors Enumeration
|
||||
BC_NONE = BenchmarkColor('NONE', '')
|
||||
BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m')
|
||||
BC_CYAN = BenchmarkColor('CYAN', '\033[96m')
|
||||
BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m')
|
||||
BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m')
|
||||
BC_HEADER = BenchmarkColor('HEADER', '\033[92m')
|
||||
BC_WARNING = BenchmarkColor('WARNING', '\033[93m')
|
||||
BC_WHITE = BenchmarkColor('WHITE', '\033[97m')
|
||||
@@ -29,6 +34,11 @@ BC_ENDC = BenchmarkColor('ENDC', '\033[0m')
|
||||
BC_BOLD = BenchmarkColor('BOLD', '\033[1m')
|
||||
BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m')
|
||||
|
||||
UTEST_MIN_REPETITIONS = 2
|
||||
UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better.
|
||||
UTEST_COL_NAME = "_pvalue"
|
||||
|
||||
|
||||
def color_format(use_color, fmt_str, *args, **kwargs):
|
||||
"""
|
||||
Return the result of 'fmt_str.format(*args, **kwargs)' after transforming
|
||||
@@ -78,64 +88,225 @@ def filter_benchmark(json_orig, family, replacement=""):
|
||||
for be in json_orig['benchmarks']:
|
||||
if not regex.search(be['name']):
|
||||
continue
|
||||
filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
|
||||
filteredbench = copy.deepcopy(be) # Do NOT modify the old name!
|
||||
filteredbench['name'] = regex.sub(replacement, filteredbench['name'])
|
||||
filtered['benchmarks'].append(filteredbench)
|
||||
return filtered
|
||||
|
||||
|
||||
def generate_difference_report(json1, json2, use_color=True):
|
||||
def get_unique_benchmark_names(json):
|
||||
"""
|
||||
While *keeping* the order, give all the unique 'names' used for benchmarks.
|
||||
"""
|
||||
seen = set()
|
||||
uniqued = [x['name'] for x in json['benchmarks']
|
||||
if x['name'] not in seen and
|
||||
(seen.add(x['name']) or True)]
|
||||
return uniqued
|
||||
|
||||
|
||||
def intersect(list1, list2):
|
||||
"""
|
||||
Given two lists, get a new list consisting of the elements only contained
|
||||
in *both of the input lists*, while preserving the ordering.
|
||||
"""
|
||||
return [x for x in list1 if x in list2]
|
||||
|
||||
|
||||
def partition_benchmarks(json1, json2):
|
||||
"""
|
||||
While preserving the ordering, find benchmarks with the same names in
|
||||
both of the inputs, and group them.
|
||||
(i.e. partition/filter into groups with common name)
|
||||
"""
|
||||
json1_unique_names = get_unique_benchmark_names(json1)
|
||||
json2_unique_names = get_unique_benchmark_names(json2)
|
||||
names = intersect(json1_unique_names, json2_unique_names)
|
||||
partitions = []
|
||||
for name in names:
|
||||
# Pick the time unit from the first entry of the lhs benchmark.
|
||||
time_unit = (x['time_unit']
|
||||
for x in json1['benchmarks'] if x['name'] == name).next()
|
||||
# Filter by name and time unit.
|
||||
lhs = [x for x in json1['benchmarks'] if x['name'] == name and
|
||||
x['time_unit'] == time_unit]
|
||||
rhs = [x for x in json2['benchmarks'] if x['name'] == name and
|
||||
x['time_unit'] == time_unit]
|
||||
partitions.append([lhs, rhs])
|
||||
return partitions
|
||||
|
||||
|
||||
def extract_field(partition, field_name):
|
||||
# The count of elements may be different. We want *all* of them.
|
||||
lhs = [x[field_name] for x in partition[0]]
|
||||
rhs = [x[field_name] for x in partition[1]]
|
||||
return [lhs, rhs]
|
||||
|
||||
|
||||
def print_utest(partition, utest_alpha, first_col_width, use_color=True):
|
||||
timings_time = extract_field(partition, 'real_time')
|
||||
timings_cpu = extract_field(partition, 'cpu_time')
|
||||
|
||||
min_rep_cnt = min(len(timings_time[0]),
|
||||
len(timings_time[1]),
|
||||
len(timings_cpu[0]),
|
||||
len(timings_cpu[1]))
|
||||
|
||||
# Does *everything* has at least UTEST_MIN_REPETITIONS repetitions?
|
||||
if min_rep_cnt < UTEST_MIN_REPETITIONS:
|
||||
return []
|
||||
|
||||
def get_utest_color(pval):
|
||||
return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
|
||||
|
||||
time_pvalue = mannwhitneyu(
|
||||
timings_time[0], timings_time[1], alternative='two-sided').pvalue
|
||||
cpu_pvalue = mannwhitneyu(
|
||||
timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue
|
||||
|
||||
dsc = "U Test, Repetitions: {} vs {}".format(
|
||||
len(timings_cpu[0]), len(timings_cpu[1]))
|
||||
dsc_color = BC_OKGREEN
|
||||
|
||||
if min_rep_cnt < UTEST_OPTIMAL_REPETITIONS:
|
||||
dsc_color = BC_WARNING
|
||||
dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
|
||||
UTEST_OPTIMAL_REPETITIONS)
|
||||
|
||||
special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
|
||||
|
||||
last_name = partition[0][0]['name']
|
||||
return [color_format(use_color,
|
||||
special_str,
|
||||
BC_HEADER,
|
||||
"{}{}".format(last_name, UTEST_COL_NAME),
|
||||
first_col_width,
|
||||
get_utest_color(time_pvalue), time_pvalue,
|
||||
get_utest_color(cpu_pvalue), cpu_pvalue,
|
||||
dsc_color, dsc,
|
||||
endc=BC_ENDC)]
|
||||
|
||||
|
||||
def generate_difference_report(
|
||||
json1,
|
||||
json2,
|
||||
display_aggregates_only=False,
|
||||
utest=False,
|
||||
utest_alpha=0.05,
|
||||
use_color=True):
|
||||
"""
|
||||
Calculate and report the difference between each test of two benchmarks
|
||||
runs specified as 'json1' and 'json2'.
|
||||
"""
|
||||
assert utest is True or utest is False
|
||||
first_col_width = find_longest_name(json1['benchmarks'])
|
||||
|
||||
def find_test(name):
|
||||
for b in json2['benchmarks']:
|
||||
if b['name'] == name:
|
||||
return b
|
||||
return None
|
||||
first_col_width = max(first_col_width, len('Benchmark'))
|
||||
|
||||
first_col_width = max(
|
||||
first_col_width,
|
||||
len('Benchmark'))
|
||||
first_col_width += len(UTEST_COL_NAME)
|
||||
first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format(
|
||||
'Benchmark', 12 + first_col_width)
|
||||
output_strs = [first_line, '-' * len(first_line)]
|
||||
|
||||
gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn)
|
||||
for bn in gen:
|
||||
other_bench = find_test(bn['name'])
|
||||
if not other_bench:
|
||||
continue
|
||||
partitions = partition_benchmarks(json1, json2)
|
||||
for partition in partitions:
|
||||
# Careful, we may have different repetition count.
|
||||
for i in range(min(len(partition[0]), len(partition[1]))):
|
||||
bn = partition[0][i]
|
||||
other_bench = partition[1][i]
|
||||
|
||||
if bn['time_unit'] != other_bench['time_unit']:
|
||||
continue
|
||||
# *If* we were asked to only display aggregates,
|
||||
# and if it is non-aggregate, then skip it.
|
||||
if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench:
|
||||
assert bn['run_type'] == other_bench['run_type']
|
||||
if bn['run_type'] != 'aggregate':
|
||||
continue
|
||||
|
||||
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
|
||||
|
||||
def get_color(res):
|
||||
if res > 0.05:
|
||||
return BC_FAIL
|
||||
elif res > -0.07:
|
||||
return BC_WHITE
|
||||
else:
|
||||
return BC_CYAN
|
||||
|
||||
tres = calculate_change(bn['real_time'], other_bench['real_time'])
|
||||
cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
|
||||
output_strs += [color_format(use_color,
|
||||
fmt_str,
|
||||
BC_HEADER,
|
||||
bn['name'],
|
||||
first_col_width,
|
||||
get_color(tres),
|
||||
tres,
|
||||
get_color(cpures),
|
||||
cpures,
|
||||
bn['real_time'],
|
||||
other_bench['real_time'],
|
||||
bn['cpu_time'],
|
||||
other_bench['cpu_time'],
|
||||
endc=BC_ENDC)]
|
||||
|
||||
# After processing the whole partition, if requested, do the U test.
|
||||
if utest:
|
||||
output_strs += print_utest(partition,
|
||||
utest_alpha=utest_alpha,
|
||||
first_col_width=first_col_width,
|
||||
use_color=use_color)
|
||||
|
||||
def get_color(res):
|
||||
if res > 0.05:
|
||||
return BC_FAIL
|
||||
elif res > -0.07:
|
||||
return BC_WHITE
|
||||
else:
|
||||
return BC_CYAN
|
||||
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
|
||||
tres = calculate_change(bn['real_time'], other_bench['real_time'])
|
||||
cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
|
||||
output_strs += [color_format(use_color, fmt_str,
|
||||
BC_HEADER, bn['name'], first_col_width,
|
||||
get_color(tres), tres, get_color(cpures), cpures,
|
||||
bn['real_time'], other_bench['real_time'],
|
||||
bn['cpu_time'], other_bench['cpu_time'],
|
||||
endc=BC_ENDC)]
|
||||
return output_strs
|
||||
|
||||
|
||||
###############################################################################
|
||||
# Unit tests
|
||||
|
||||
|
||||
import unittest
|
||||
|
||||
|
||||
class TestGetUniqueBenchmarkNames(unittest.TestCase):
|
||||
def load_results(self):
|
||||
import json
|
||||
testInputs = os.path.join(
|
||||
os.path.dirname(
|
||||
os.path.realpath(__file__)),
|
||||
'Inputs')
|
||||
testOutput = os.path.join(testInputs, 'test3_run0.json')
|
||||
with open(testOutput, 'r') as f:
|
||||
json = json.load(f)
|
||||
return json
|
||||
|
||||
def test_basic(self):
|
||||
expect_lines = [
|
||||
'BM_One',
|
||||
'BM_Two',
|
||||
'short', # These two are not sorted
|
||||
'medium', # These two are not sorted
|
||||
]
|
||||
json = self.load_results()
|
||||
output_lines = get_unique_benchmark_names(json)
|
||||
print("\n")
|
||||
print("\n".join(output_lines))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
self.assertEqual(expect_lines[i], output_lines[i])
|
||||
|
||||
class TestReportDifference(unittest.TestCase):
|
||||
def load_results(self):
|
||||
import json
|
||||
testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
|
||||
testInputs = os.path.join(
|
||||
os.path.dirname(
|
||||
os.path.realpath(__file__)),
|
||||
'Inputs')
|
||||
testOutput1 = os.path.join(testInputs, 'test1_run1.json')
|
||||
testOutput2 = os.path.join(testInputs, 'test1_run2.json')
|
||||
with open(testOutput1, 'r') as f:
|
||||
@@ -153,27 +324,35 @@ class TestReportDifference(unittest.TestCase):
|
||||
['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'],
|
||||
['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'],
|
||||
['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'],
|
||||
['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'],
|
||||
['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'],
|
||||
['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'],
|
||||
['BM_100xSlower', '+99.0000', '+99.0000',
|
||||
'100', '10000', '100', '10000'],
|
||||
['BM_100xFaster', '-0.9900', '-0.9900',
|
||||
'10000', '100', '10000', '100'],
|
||||
['BM_10PercentCPUToTime', '+0.1000',
|
||||
'-0.1000', '100', '110', '100', '90'],
|
||||
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
|
||||
['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
|
||||
]
|
||||
json1, json2 = self.load_results()
|
||||
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
|
||||
output_lines_with_header = generate_difference_report(
|
||||
json1, json2, use_color=False)
|
||||
output_lines = output_lines_with_header[2:]
|
||||
print("\n")
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(' ') if x]
|
||||
self.assertEqual(len(parts), 7)
|
||||
self.assertEqual(parts, expect_lines[i])
|
||||
self.assertEqual(expect_lines[i], parts)
|
||||
|
||||
|
||||
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
|
||||
def load_result(self):
|
||||
import json
|
||||
testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs')
|
||||
testInputs = os.path.join(
|
||||
os.path.dirname(
|
||||
os.path.realpath(__file__)),
|
||||
'Inputs')
|
||||
testOutput = os.path.join(testInputs, 'test2_run.json')
|
||||
with open(testOutput, 'r') as f:
|
||||
json = json.load(f)
|
||||
@@ -189,15 +368,151 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
|
||||
json = self.load_result()
|
||||
json1 = filter_benchmark(json, "BM_Z.ro", ".")
|
||||
json2 = filter_benchmark(json, "BM_O.e", ".")
|
||||
output_lines_with_header = generate_difference_report(json1, json2, use_color=False)
|
||||
output_lines_with_header = generate_difference_report(
|
||||
json1, json2, use_color=False)
|
||||
output_lines = output_lines_with_header[2:]
|
||||
print "\n"
|
||||
print("\n")
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(' ') if x]
|
||||
self.assertEqual(len(parts), 7)
|
||||
self.assertEqual(parts, expect_lines[i])
|
||||
self.assertEqual(expect_lines[i], parts)
|
||||
|
||||
|
||||
class TestReportDifferenceWithUTest(unittest.TestCase):
|
||||
def load_results(self):
|
||||
import json
|
||||
testInputs = os.path.join(
|
||||
os.path.dirname(
|
||||
os.path.realpath(__file__)),
|
||||
'Inputs')
|
||||
testOutput1 = os.path.join(testInputs, 'test3_run0.json')
|
||||
testOutput2 = os.path.join(testInputs, 'test3_run1.json')
|
||||
with open(testOutput1, 'r') as f:
|
||||
json1 = json.load(f)
|
||||
with open(testOutput2, 'r') as f:
|
||||
json2 = json.load(f)
|
||||
return json1, json2
|
||||
|
||||
def test_utest(self):
|
||||
expect_lines = []
|
||||
expect_lines = [
|
||||
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
|
||||
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
|
||||
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
|
||||
['BM_Two_pvalue',
|
||||
'0.6985',
|
||||
'0.6985',
|
||||
'U',
|
||||
'Test,',
|
||||
'Repetitions:',
|
||||
'2',
|
||||
'vs',
|
||||
'2.',
|
||||
'WARNING:',
|
||||
'Results',
|
||||
'unreliable!',
|
||||
'9+',
|
||||
'repetitions',
|
||||
'recommended.'],
|
||||
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
|
||||
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
|
||||
['short_pvalue',
|
||||
'0.7671',
|
||||
'0.1489',
|
||||
'U',
|
||||
'Test,',
|
||||
'Repetitions:',
|
||||
'2',
|
||||
'vs',
|
||||
'3.',
|
||||
'WARNING:',
|
||||
'Results',
|
||||
'unreliable!',
|
||||
'9+',
|
||||
'repetitions',
|
||||
'recommended.'],
|
||||
['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
|
||||
]
|
||||
json1, json2 = self.load_results()
|
||||
output_lines_with_header = generate_difference_report(
|
||||
json1, json2, utest=True, utest_alpha=0.05, use_color=False)
|
||||
output_lines = output_lines_with_header[2:]
|
||||
print("\n")
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(' ') if x]
|
||||
self.assertEqual(expect_lines[i], parts)
|
||||
|
||||
|
||||
class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
|
||||
unittest.TestCase):
|
||||
def load_results(self):
|
||||
import json
|
||||
testInputs = os.path.join(
|
||||
os.path.dirname(
|
||||
os.path.realpath(__file__)),
|
||||
'Inputs')
|
||||
testOutput1 = os.path.join(testInputs, 'test3_run0.json')
|
||||
testOutput2 = os.path.join(testInputs, 'test3_run1.json')
|
||||
with open(testOutput1, 'r') as f:
|
||||
json1 = json.load(f)
|
||||
with open(testOutput2, 'r') as f:
|
||||
json2 = json.load(f)
|
||||
return json1, json2
|
||||
|
||||
def test_utest(self):
|
||||
expect_lines = []
|
||||
expect_lines = [
|
||||
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
|
||||
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
|
||||
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
|
||||
['BM_Two_pvalue',
|
||||
'0.6985',
|
||||
'0.6985',
|
||||
'U',
|
||||
'Test,',
|
||||
'Repetitions:',
|
||||
'2',
|
||||
'vs',
|
||||
'2.',
|
||||
'WARNING:',
|
||||
'Results',
|
||||
'unreliable!',
|
||||
'9+',
|
||||
'repetitions',
|
||||
'recommended.'],
|
||||
['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
|
||||
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
|
||||
['short_pvalue',
|
||||
'0.7671',
|
||||
'0.1489',
|
||||
'U',
|
||||
'Test,',
|
||||
'Repetitions:',
|
||||
'2',
|
||||
'vs',
|
||||
'3.',
|
||||
'WARNING:',
|
||||
'Results',
|
||||
'unreliable!',
|
||||
'9+',
|
||||
'repetitions',
|
||||
'recommended.'],
|
||||
]
|
||||
json1, json2 = self.load_results()
|
||||
output_lines_with_header = generate_difference_report(
|
||||
json1, json2, display_aggregates_only=True,
|
||||
utest=True, utest_alpha=0.05, use_color=False)
|
||||
output_lines = output_lines_with_header[2:]
|
||||
print("\n")
|
||||
print("\n".join(output_lines_with_header))
|
||||
self.assertEqual(len(output_lines), len(expect_lines))
|
||||
for i in range(0, len(output_lines)):
|
||||
parts = [x for x in output_lines[i].split(' ') if x]
|
||||
self.assertEqual(expect_lines[i], parts)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
151
utils/google-benchmark/tools/strip_asm.py
Executable file
151
utils/google-benchmark/tools/strip_asm.py
Executable file
@@ -0,0 +1,151 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
strip_asm.py - Cleanup ASM output for the specified file
|
||||
"""
|
||||
|
||||
from argparse import ArgumentParser
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
|
||||
def find_used_labels(asm):
|
||||
found = set()
|
||||
label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)")
|
||||
for l in asm.splitlines():
|
||||
m = label_re.match(l)
|
||||
if m:
|
||||
found.add('.L%s' % m.group(1))
|
||||
return found
|
||||
|
||||
|
||||
def normalize_labels(asm):
|
||||
decls = set()
|
||||
label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
|
||||
for l in asm.splitlines():
|
||||
m = label_decl.match(l)
|
||||
if m:
|
||||
decls.add(m.group(0))
|
||||
if len(decls) == 0:
|
||||
return asm
|
||||
needs_dot = next(iter(decls))[0] != '.'
|
||||
if not needs_dot:
|
||||
return asm
|
||||
for ld in decls:
|
||||
asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm)
|
||||
return asm
|
||||
|
||||
|
||||
def transform_labels(asm):
|
||||
asm = normalize_labels(asm)
|
||||
used_decls = find_used_labels(asm)
|
||||
new_asm = ''
|
||||
label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
|
||||
for l in asm.splitlines():
|
||||
m = label_decl.match(l)
|
||||
if not m or m.group(0) in used_decls:
|
||||
new_asm += l
|
||||
new_asm += '\n'
|
||||
return new_asm
|
||||
|
||||
|
||||
def is_identifier(tk):
|
||||
if len(tk) == 0:
|
||||
return False
|
||||
first = tk[0]
|
||||
if not first.isalpha() and first != '_':
|
||||
return False
|
||||
for i in range(1, len(tk)):
|
||||
c = tk[i]
|
||||
if not c.isalnum() and c != '_':
|
||||
return False
|
||||
return True
|
||||
|
||||
def process_identifiers(l):
|
||||
"""
|
||||
process_identifiers - process all identifiers and modify them to have
|
||||
consistent names across all platforms; specifically across ELF and MachO.
|
||||
For example, MachO inserts an additional understore at the beginning of
|
||||
names. This function removes that.
|
||||
"""
|
||||
parts = re.split(r'([a-zA-Z0-9_]+)', l)
|
||||
new_line = ''
|
||||
for tk in parts:
|
||||
if is_identifier(tk):
|
||||
if tk.startswith('__Z'):
|
||||
tk = tk[1:]
|
||||
elif tk.startswith('_') and len(tk) > 1 and \
|
||||
tk[1].isalpha() and tk[1] != 'Z':
|
||||
tk = tk[1:]
|
||||
new_line += tk
|
||||
return new_line
|
||||
|
||||
|
||||
def process_asm(asm):
|
||||
"""
|
||||
Strip the ASM of unwanted directives and lines
|
||||
"""
|
||||
new_contents = ''
|
||||
asm = transform_labels(asm)
|
||||
|
||||
# TODO: Add more things we want to remove
|
||||
discard_regexes = [
|
||||
re.compile("\s+\..*$"), # directive
|
||||
re.compile("\s*#(NO_APP|APP)$"), #inline ASM
|
||||
re.compile("\s*#.*$"), # comment line
|
||||
re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive
|
||||
re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"),
|
||||
]
|
||||
keep_regexes = [
|
||||
|
||||
]
|
||||
fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:")
|
||||
for l in asm.splitlines():
|
||||
# Remove Mach-O attribute
|
||||
l = l.replace('@GOTPCREL', '')
|
||||
add_line = True
|
||||
for reg in discard_regexes:
|
||||
if reg.match(l) is not None:
|
||||
add_line = False
|
||||
break
|
||||
for reg in keep_regexes:
|
||||
if reg.match(l) is not None:
|
||||
add_line = True
|
||||
break
|
||||
if add_line:
|
||||
if fn_label_def.match(l) and len(new_contents) != 0:
|
||||
new_contents += '\n'
|
||||
l = process_identifiers(l)
|
||||
new_contents += l
|
||||
new_contents += '\n'
|
||||
return new_contents
|
||||
|
||||
def main():
|
||||
parser = ArgumentParser(
|
||||
description='generate a stripped assembly file')
|
||||
parser.add_argument(
|
||||
'input', metavar='input', type=str, nargs=1,
|
||||
help='An input assembly file')
|
||||
parser.add_argument(
|
||||
'out', metavar='output', type=str, nargs=1,
|
||||
help='The output file')
|
||||
args, unknown_args = parser.parse_known_args()
|
||||
input = args.input[0]
|
||||
output = args.out[0]
|
||||
if not os.path.isfile(input):
|
||||
print(("ERROR: input file '%s' does not exist") % input)
|
||||
sys.exit(1)
|
||||
contents = None
|
||||
with open(input, 'r') as f:
|
||||
contents = f.read()
|
||||
new_contents = process_asm(contents)
|
||||
with open(output, 'w') as f:
|
||||
f.write(new_contents)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
||||
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
|
||||
# kate: indent-mode python; remove-trailing-spaces modified;
|
||||
@@ -133,7 +133,6 @@ class Configuration(object):
|
||||
self.configure_cxx()
|
||||
self.configure_triple()
|
||||
self.configure_deployment()
|
||||
self.configure_availability()
|
||||
self.configure_src_root()
|
||||
self.configure_obj_root()
|
||||
self.configure_cxx_stdlib_under_test()
|
||||
@@ -307,16 +306,10 @@ class Configuration(object):
|
||||
elif self.use_system_cxx_lib == 'false':
|
||||
self.use_system_cxx_lib = False
|
||||
elif self.use_system_cxx_lib:
|
||||
assert os.path.isdir(self.use_system_cxx_lib)
|
||||
assert os.path.isdir(self.use_system_cxx_lib), "the specified use_system_cxx_lib parameter (%s) is not a valid directory" % self.use_system_cxx_lib
|
||||
self.lit_config.note(
|
||||
"inferred use_system_cxx_lib as: %r" % self.use_system_cxx_lib)
|
||||
|
||||
def configure_availability(self):
|
||||
# See http://llvm.org/docs/AvailabilityMarkup.html
|
||||
self.with_availability = self.get_lit_bool('with_availability', False)
|
||||
self.lit_config.note(
|
||||
"inferred with_availability as: %r" % self.with_availability)
|
||||
|
||||
def configure_cxx_stdlib_under_test(self):
|
||||
self.cxx_stdlib_under_test = self.get_lit_conf(
|
||||
'cxx_stdlib_under_test', 'libc++')
|
||||
@@ -338,9 +331,6 @@ class Configuration(object):
|
||||
|
||||
def configure_use_clang_verify(self):
|
||||
'''If set, run clang with -verify on failing tests.'''
|
||||
if self.with_availability:
|
||||
self.use_clang_verify = False
|
||||
return
|
||||
self.use_clang_verify = self.get_lit_bool('use_clang_verify')
|
||||
if self.use_clang_verify is None:
|
||||
# NOTE: We do not test for the -verify flag directly because
|
||||
@@ -417,11 +407,10 @@ class Configuration(object):
|
||||
self.add_deployment_feature('with_system_cxx_lib')
|
||||
|
||||
# Configure the availability markup checks features.
|
||||
if self.with_availability:
|
||||
if self.use_deployment:
|
||||
self.config.available_features.add('availability_markup')
|
||||
self.add_deployment_feature('availability_markup')
|
||||
|
||||
if self.use_system_cxx_lib or self.with_availability:
|
||||
self.config.available_features.add('availability')
|
||||
self.add_deployment_feature('availability')
|
||||
|
||||
@@ -446,7 +435,7 @@ class Configuration(object):
|
||||
# Run a compile test for the -fsized-deallocation flag. This is needed
|
||||
# in test/std/language.support/support.dynamic/new.delete
|
||||
if self.cxx.hasCompileFlag('-fsized-deallocation'):
|
||||
self.config.available_features.add('fsized-deallocation')
|
||||
self.config.available_features.add('-fsized-deallocation')
|
||||
|
||||
if self.cxx.hasCompileFlag('-faligned-allocation'):
|
||||
self.config.available_features.add('-faligned-allocation')
|
||||
@@ -585,9 +574,6 @@ class Configuration(object):
|
||||
self.cxx.flags += ['-arch', arch]
|
||||
self.cxx.flags += ['-m' + name + '-version-min=' + version]
|
||||
|
||||
# Disable availability unless explicitely requested
|
||||
if not self.with_availability:
|
||||
self.cxx.flags += ['-D_LIBCPP_DISABLE_AVAILABILITY']
|
||||
# FIXME(EricWF): variant_size.pass.cpp requires a slightly larger
|
||||
# template depth with older Clang versions.
|
||||
self.cxx.addFlagIfSupported('-ftemplate-depth=270')
|
||||
@@ -677,7 +663,8 @@ class Configuration(object):
|
||||
if feature_macros[m]:
|
||||
define += '=%s' % (feature_macros[m])
|
||||
self.cxx.compile_flags += [define]
|
||||
if m == '_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS':
|
||||
if m == '_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS' or \
|
||||
m == '_LIBCPP_HIDE_FROM_ABI_PER_TU_BY_DEFAULT':
|
||||
continue
|
||||
if m == '_LIBCPP_ABI_VERSION':
|
||||
self.config.available_features.add('libcpp-abi-version-v%s'
|
||||
@@ -720,13 +707,9 @@ class Configuration(object):
|
||||
enable_fs = self.get_lit_bool('enable_filesystem', default=False)
|
||||
if not enable_fs:
|
||||
return
|
||||
enable_experimental = self.get_lit_bool('enable_experimental', default=False)
|
||||
if not enable_experimental:
|
||||
self.lit_config.fatal(
|
||||
'filesystem is enabled but libc++experimental.a is not.')
|
||||
self.config.available_features.add('c++filesystem')
|
||||
static_env = os.path.join(self.libcxx_src_root, 'test', 'std',
|
||||
'experimental', 'filesystem', 'Inputs', 'static_test_env')
|
||||
'input.output', 'filesystems', 'Inputs', 'static_test_env')
|
||||
static_env = os.path.realpath(static_env)
|
||||
assert os.path.isdir(static_env)
|
||||
self.cxx.compile_flags += ['-DLIBCXX_FILESYSTEM_STATIC_TEST_ROOT="%s"' % static_env]
|
||||
@@ -821,6 +804,10 @@ class Configuration(object):
|
||||
if libcxx_experimental:
|
||||
self.config.available_features.add('c++experimental')
|
||||
self.cxx.link_flags += ['-lc++experimental']
|
||||
libcxx_fs = self.get_lit_bool('enable_filesystem', default=False)
|
||||
if libcxx_fs:
|
||||
self.config.available_features.add('c++fs')
|
||||
self.cxx.link_flags += ['-lc++fs']
|
||||
if self.link_shared:
|
||||
self.cxx.link_flags += ['-lc++']
|
||||
else:
|
||||
@@ -930,12 +917,6 @@ class Configuration(object):
|
||||
self.cxx.addWarningFlagIfSupported('-Wunused-variable')
|
||||
self.cxx.addWarningFlagIfSupported('-Wunused-parameter')
|
||||
self.cxx.addWarningFlagIfSupported('-Wunreachable-code')
|
||||
# FIXME: Enable the two warnings below.
|
||||
self.cxx.addWarningFlagIfSupported('-Wno-conversion')
|
||||
self.cxx.addWarningFlagIfSupported('-Wno-unused-local-typedef')
|
||||
# FIXME: Remove this warning once the min/max handling patch lands
|
||||
# See https://reviews.llvm.org/D33080
|
||||
self.cxx.addWarningFlagIfSupported('-Wno-#warnings')
|
||||
std = self.get_lit_conf('std', None)
|
||||
if std in ['c++98', 'c++03']:
|
||||
# The '#define static_assert' provided by libc++ in C++03 mode
|
||||
|
||||
@@ -188,7 +188,7 @@ class LibcxxTestFormat(object):
|
||||
if rc != 0:
|
||||
report = libcxx.util.makeReport(cmd, out, err, rc)
|
||||
report += "Compilation failed unexpectedly!"
|
||||
return lit.Test.FAIL, report
|
||||
return lit.Test.Result(lit.Test.FAIL, report)
|
||||
# Run the test
|
||||
local_cwd = os.path.dirname(source_path)
|
||||
env = None
|
||||
@@ -206,14 +206,14 @@ class LibcxxTestFormat(object):
|
||||
cmd, out, err, rc = self.executor.run(exec_path, [exec_path],
|
||||
local_cwd, data_files,
|
||||
env)
|
||||
report = "Compiled With: '%s'\n" % ' '.join(compile_cmd)
|
||||
report += libcxx.util.makeReport(cmd, out, err, rc)
|
||||
if rc == 0:
|
||||
res = lit.Test.PASS if retry_count == 0 else lit.Test.FLAKYPASS
|
||||
return res, ''
|
||||
return lit.Test.Result(res, report)
|
||||
elif rc != 0 and retry_count + 1 == max_retry:
|
||||
report = libcxx.util.makeReport(cmd, out, err, rc)
|
||||
report = "Compiled With: %s\n%s" % (compile_cmd, report)
|
||||
report += "Compiled test failed unexpectedly!"
|
||||
return lit.Test.FAIL, report
|
||||
return lit.Test.Result(lit.Test.FAIL, report)
|
||||
|
||||
assert False # Unreachable
|
||||
finally:
|
||||
@@ -250,16 +250,15 @@ class LibcxxTestFormat(object):
|
||||
#
|
||||
# Therefore, we check if the test was expected to fail because of
|
||||
# nodiscard before enabling it
|
||||
test_str = "ignoring return value of function declared with " \
|
||||
+ "'nodiscard' attribute"
|
||||
if test_str in contents:
|
||||
test_str_list = ['ignoring return value', 'nodiscard', 'NODISCARD']
|
||||
if any(test_str in contents for test_str in test_str_list):
|
||||
test_cxx.flags += ['-Werror=unused-result']
|
||||
cmd, out, err, rc = test_cxx.compile(source_path, out=os.devnull)
|
||||
expected_rc = 0 if use_verify else 1
|
||||
report = libcxx.util.makeReport(cmd, out, err, rc)
|
||||
if rc == expected_rc:
|
||||
return lit.Test.PASS, ''
|
||||
return lit.Test.Result(lit.Test.PASS, report)
|
||||
else:
|
||||
report = libcxx.util.makeReport(cmd, out, err, rc)
|
||||
report_msg = ('Expected compilation to fail!' if not use_verify else
|
||||
'Expected compilation using verify to pass!')
|
||||
return lit.Test.FAIL, report + report_msg + '\n'
|
||||
report += ('Expected compilation to fail!\n' if not use_verify else
|
||||
'Expected compilation using verify to pass!\n')
|
||||
return lit.Test.Result(lit.Test.FAIL, report)
|
||||
|
||||
122
utils/libcxx/test/googlebenchmark.py
Normal file
122
utils/libcxx/test/googlebenchmark.py
Normal file
@@ -0,0 +1,122 @@
|
||||
from __future__ import absolute_import
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import lit.Test
|
||||
import lit.TestRunner
|
||||
import lit.util
|
||||
from lit.formats.base import TestFormat
|
||||
|
||||
kIsWindows = sys.platform in ['win32', 'cygwin']
|
||||
|
||||
class GoogleBenchmark(TestFormat):
|
||||
def __init__(self, test_sub_dirs, test_suffix, benchmark_args=[]):
|
||||
self.benchmark_args = list(benchmark_args)
|
||||
self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(';')
|
||||
|
||||
# On Windows, assume tests will also end in '.exe'.
|
||||
exe_suffix = str(test_suffix)
|
||||
if kIsWindows:
|
||||
exe_suffix += '.exe'
|
||||
|
||||
# Also check for .py files for testing purposes.
|
||||
self.test_suffixes = {exe_suffix, test_suffix + '.py'}
|
||||
|
||||
def getBenchmarkTests(self, path, litConfig, localConfig):
|
||||
"""getBenchmarkTests(path) - [name]
|
||||
|
||||
Return the tests available in gtest executable.
|
||||
|
||||
Args:
|
||||
path: String path to a gtest executable
|
||||
litConfig: LitConfig instance
|
||||
localConfig: TestingConfig instance"""
|
||||
|
||||
# TODO: allow splitting tests according to the "benchmark family" so
|
||||
# the output for a single family of tests all belongs to the same test
|
||||
# target.
|
||||
list_test_cmd = [path, '--benchmark_list_tests']
|
||||
try:
|
||||
output = subprocess.check_output(list_test_cmd,
|
||||
env=localConfig.environment)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
litConfig.warning(
|
||||
"unable to discover google-benchmarks in %r: %s. Process output: %s"
|
||||
% (path, sys.exc_info()[1], exc.output))
|
||||
raise StopIteration
|
||||
|
||||
nested_tests = []
|
||||
for ln in output.splitlines(False): # Don't keep newlines.
|
||||
ln = lit.util.to_string(ln)
|
||||
if not ln.strip():
|
||||
continue
|
||||
|
||||
index = 0
|
||||
while ln[index*2:index*2+2] == ' ':
|
||||
index += 1
|
||||
while len(nested_tests) > index:
|
||||
nested_tests.pop()
|
||||
|
||||
ln = ln[index*2:]
|
||||
if ln.endswith('.'):
|
||||
nested_tests.append(ln)
|
||||
elif any([name.startswith('DISABLED_')
|
||||
for name in nested_tests + [ln]]):
|
||||
# Gtest will internally skip these tests. No need to launch a
|
||||
# child process for it.
|
||||
continue
|
||||
else:
|
||||
yield ''.join(nested_tests) + ln
|
||||
|
||||
def getTestsInDirectory(self, testSuite, path_in_suite,
|
||||
litConfig, localConfig):
|
||||
source_path = testSuite.getSourcePath(path_in_suite)
|
||||
for subdir in self.test_sub_dirs:
|
||||
dir_path = os.path.join(source_path, subdir)
|
||||
if not os.path.isdir(dir_path):
|
||||
continue
|
||||
for fn in lit.util.listdir_files(dir_path,
|
||||
suffixes=self.test_suffixes):
|
||||
# Discover the tests in this executable.
|
||||
execpath = os.path.join(source_path, subdir, fn)
|
||||
testnames = self.getBenchmarkTests(execpath, litConfig, localConfig)
|
||||
for testname in testnames:
|
||||
testPath = path_in_suite + (subdir, fn, testname)
|
||||
yield lit.Test.Test(testSuite, testPath, localConfig,
|
||||
file_path=execpath)
|
||||
|
||||
def execute(self, test, litConfig):
|
||||
testPath,testName = os.path.split(test.getSourcePath())
|
||||
while not os.path.exists(testPath):
|
||||
# Handle GTest parametrized and typed tests, whose name includes
|
||||
# some '/'s.
|
||||
testPath, namePrefix = os.path.split(testPath)
|
||||
testName = namePrefix + '/' + testName
|
||||
|
||||
cmd = [testPath, '--benchmark_filter=%s$' % testName ] + self.benchmark_args
|
||||
|
||||
if litConfig.noExecute:
|
||||
return lit.Test.PASS, ''
|
||||
|
||||
try:
|
||||
out, err, exitCode = lit.util.executeCommand(
|
||||
cmd, env=test.config.environment,
|
||||
timeout=litConfig.maxIndividualTestTime)
|
||||
except lit.util.ExecuteCommandTimeoutException:
|
||||
return (lit.Test.TIMEOUT,
|
||||
'Reached timeout of {} seconds'.format(
|
||||
litConfig.maxIndividualTestTime)
|
||||
)
|
||||
|
||||
if exitCode:
|
||||
return lit.Test.FAIL, out + err
|
||||
|
||||
passing_test_line = testName
|
||||
if passing_test_line not in out:
|
||||
msg = ('Unable to find %r in google benchmark output:\n\n%s%s' %
|
||||
(passing_test_line, out, err))
|
||||
return lit.Test.UNRESOLVED, msg
|
||||
|
||||
return lit.Test.PASS, err + out
|
||||
|
||||
@@ -15,6 +15,8 @@ import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from libcxx.util import executeCommand
|
||||
|
||||
class DefaultTargetInfo(object):
|
||||
def __init__(self, full_config):
|
||||
self.full_config = full_config
|
||||
@@ -127,14 +129,13 @@ class DarwinLocalTI(DefaultTargetInfo):
|
||||
cmd = ['xcrun', '--sdk', name, '--show-sdk-path']
|
||||
else:
|
||||
cmd = ['xcrun', '--show-sdk-path']
|
||||
try:
|
||||
out = subprocess.check_output(cmd).strip()
|
||||
res = 0
|
||||
except OSError:
|
||||
res = -1
|
||||
if res == 0 and out:
|
||||
sdk_path = out
|
||||
out, err, exit_code = executeCommand(cmd)
|
||||
if exit_code != 0:
|
||||
self.full_config.lit_config.warning("Could not determine macOS SDK path! stderr was " + err)
|
||||
if exit_code == 0 and out:
|
||||
sdk_path = out.strip()
|
||||
self.full_config.lit_config.note('using SDKROOT: %r' % sdk_path)
|
||||
assert isinstance(sdk_path, str)
|
||||
flags += ["-isysroot", sdk_path]
|
||||
|
||||
def add_cxx_link_flags(self, flags):
|
||||
@@ -222,12 +223,17 @@ class LinuxLocalTI(DefaultTargetInfo):
|
||||
self.full_config.config.available_features)
|
||||
llvm_unwinder = self.full_config.get_lit_bool('llvm_unwinder', False)
|
||||
shared_libcxx = self.full_config.get_lit_bool('enable_shared', True)
|
||||
# FIXME: Remove the need to link -lrt in all the tests, and instead
|
||||
# limit it only to the filesystem tests. This ensures we don't cause an
|
||||
# implicit dependency on librt except when filesystem is needed.
|
||||
enable_fs = self.full_config.get_lit_bool('enable_filesystem',
|
||||
default=False)
|
||||
flags += ['-lm']
|
||||
if not llvm_unwinder:
|
||||
flags += ['-lgcc_s', '-lgcc']
|
||||
if enable_threads:
|
||||
flags += ['-lpthread']
|
||||
if not shared_libcxx:
|
||||
if not shared_libcxx or enable_fs:
|
||||
flags += ['-lrt']
|
||||
flags += ['-lc']
|
||||
if llvm_unwinder:
|
||||
|
||||
Reference in New Issue
Block a user