#!/bin/bash

# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

# Wrapper to run the platform_BootPerfServer autotest, and store the
# results for later analysis by the 'showbootdata' script.
#
# NOTE: This script must be run from inside the chromeos build
# chroot environment.
#

# --- BEGIN COMMON.SH BOILERPLATE ---
# Load common CrOS utilities.  Inside the chroot this file is installed in
# /usr/lib/crosutils.  Outside the chroot we find it relative to the script's
# location.
find_common_sh() {
  local common_paths=(/usr/lib/crosutils "$(dirname "$(readlink -f "$0")")/..")
  local path

  SCRIPT_ROOT=
  for path in "${common_paths[@]}"; do
    if [ -r "${path}/common.sh" ]; then
      SCRIPT_ROOT=${path}
      break
    fi
  done
}

find_common_sh
. "${SCRIPT_ROOT}/common.sh" || (echo "Unable to load common.sh" && exit 1)
# --- END COMMON.SH BOILERPLATE ---

# TODO(jrbarnette) The log files produced in this script will be
# stored inside the chroot.  So, from outside the chroot, this
# script doesn't work.  I don't know if this is easy to fix, but
# you're welcome to try.  Let me know how it goes.  :-)
assert_inside_chroot

DEFINE_string output_dir "" "output directory for results" o
DEFINE_string board "" "name of board we are testing"
DEFINE_boolean keep_logs "$FLAGS_FALSE" "keep autotest results" k

RUN_TEST="test_that"
TEST='platform_BootPerfServer.bootperf'
TMP_RESULTS="$(mktemp -d /tmp/bootperf.XXXXXX)"
RESULTS_ROOT="results-1-$TEST"
RESULTS_DIR=platform_BootPerfServer/results
RESULTS_KEYVAL=$RESULTS_DIR/keyval
RESULTS_SUMMARY_FILES=(
  $RESULTS_DIR
  keyval
  platform_BootPerfServer/keyval
  platform_BootPerfServer/platform_BootPerf/keyval
  platform_BootPerfServer/platform_BootPerf/status
  platform_BootPerfServer/status
  platform_BootPerfServer/status.log
  platform_BootPerfServer/sysinfo/cmdline
  platform_BootPerfServer/sysinfo/cpuinfo
  platform_BootPerfServer/sysinfo/modules
  platform_BootPerfServer/sysinfo/uname
  platform_BootPerfServer/sysinfo/version
  status.log
)

# Structure of a results directory:
#   $RUNDIR.$ITER/          - directory
#       $RUNDIR_LOG             - file
#       $RUNDIR_SUMMARY/        - directory
#       $RUNDIR_ALL_RESULTS/    - optional directory
#   $KEYVAL_SUMMARY/        - file
# If you add any other content under the results directory, you'll
# probably need to change extra_files(), below.
RUNDIR=run
RUNDIR_LOG=log.txt
RUNDIR_SUMMARY=summary
RUNDIR_ALL_RESULTS=logs
KEYVAL_SUMMARY=results_keyval


# Usage/help function.  This function is known to the shflags library,
# and mustn't be renamed.
flags_help() {
  cat <<END_USAGE >&2
usage: $(basename $0) [ <options> ] <ip-address> [ <count> ]
Options:
  --output_dir <directory>
  --o <directory>       Specify output directory for results

  --board <BOARDNAME>   name of board we are testing (e.g. daisy)

  --[no]keep_logs
  -k                    Keep [don't keep] autotest log files
Summary:
  Run the platform_BootPerfServer autotest, and store results in the
  given destination directory.  The test target is specified by
  <ip-address>.

  By default, the test is run once; if <count> is given, the test is
  run that many times.  Note that the platform_BootPerfServer test
  reboots the target 10 times, so the total number of reboots will
  be 10*<count>.

  If the destination directory doesn't exist, it is created.  If the
  destination directory already holds test results, additional
  results are added in without overwriting earlier results.

  If no destination is specified, the current directory is used,
  provided that the directory is empty, or has been previously used
  as a destination directory for this command.

  By default, only a summary subset of the log files created by
  autotest is preserved; with --keep_logs the (potentially large)
  autotest logs are preserved with the test results.
END_USAGE
  return $FLAGS_TRUE
}

usage() {
  if [ $# -gt 0 ]; then
    error "$(basename $0): $*"
    echo >&2
  fi
  flags_help
  exit 1
}

# List any files in the current directory not created as output
# from running this script.
extra_files() {
  ls | grep -v "^$RUNDIR[.]...\$" |
       grep -v $KEYVAL_SUMMARY
}

# Main function to run the boot performance test.  Run the boot
# performance test for the given count, putting output into the
# current directory.
#
# Arguments are <ip-address> and <count> arguments, as for the main
# command.
#
# We terminate test runs if "test_that" ever fails to produce the
# results keyval file; generally this is the result of a serious
# error (e.g. disk full) that won't go away if we just plow on.
run_boot_test() {
  local remote="$1"
  local count="${2:-1}"

  local iter=$(expr "$(echo $RUNDIR.???)" : '.*\(...\)')
  if [ "$iter" != "???" ]; then
      iter=$(echo $iter | awk '{printf "%03d\n", $1 + 1}')
  else
      iter=000
  fi

  i=0
  while [ $i -lt $count ]; do
    local iter_rundir=$RUNDIR.$iter
    local logfile=$(pwd)/$iter_rundir/$RUNDIR_LOG
    local summary_dir=$iter_rundir/$RUNDIR_SUMMARY
    local all_results_dir=$iter_rundir/$RUNDIR_ALL_RESULTS

    mkdir $iter_rundir
    echo "$(date '+%T') - $logfile"

    $RUN_TEST --results_dir="$TMP_RESULTS" --args "10" $BOARD \
              "$remote" $TEST >$logfile 2>&1
    if [ ! -e "$TMP_RESULTS/$RESULTS_ROOT/$RESULTS_KEYVAL" ]; then
      error "No results file; terminating test runs."
      error "Check $logfile for output from the test run,"
      error "and see $TMP_RESULTS for full test logs and output."
      return
    fi
    mkdir $summary_dir
    tar cf - -C $TMP_RESULTS/$RESULTS_ROOT "${RESULTS_SUMMARY_FILES[@]}" |
      tar xf - -C $summary_dir
    if [ $FLAGS_keep_logs -eq $FLAGS_TRUE ]; then
      mv $TMP_RESULTS $all_results_dir
      chmod 755 $all_results_dir
    else
      rm -rf $TMP_RESULTS
    fi
    i=$(expr $i + 1)
    iter=$(echo $iter | awk '{printf "%03d\n", $1 + 1}')
  done
  date '+%T'
  cat $RUNDIR.???/$RUNDIR_SUMMARY/$RESULTS_KEYVAL >$KEYVAL_SUMMARY
}

# Main routine - check validity of the (already parsed) command line
# options.  'cd' to the results directory, if it was specified.  If
# all the arguments checks pass, hand control to run_boot_test
main() {
  if [ $# -lt 1 ]; then
      usage "Missing target host address"
  elif [ $# -gt 2 ]; then
      usage "Too many arguments"
  fi

  if [ -n "${FLAGS_board}" ]; then
    BOARD="--board=${FLAGS_board}"
  fi

  if [ -n "${FLAGS_output_dir}" ]; then
    if [ ! -d "${FLAGS_output_dir}" ]; then
      if ! mkdir "${FLAGS_output_dir}"; then
        usage "Unable to create ${FLAGS_output_dir}"
      fi
    fi
    cd "${FLAGS_output_dir}" ||
      usage "No permissions to chdir to ${FLAGS_output_dir}"
  elif [ -n "$(extra_files)" ]; then
    error "No results directory specified, and current directory"
    error "contains contents other than run results."
    error "You can override this error by using the --output_dir option"
    usage
  fi

  # Check the count argument.
  # N.B. the test [ "$2" -eq "$2" ] tests whether "$2" is valid as a
  # number; when it fails it will also report a syntax error (which
  # we suppress).
  if [ -n "$2" ]; then
    if ! [ "$2" -eq "$2" ] 2>/dev/null || [ "$2" -le 0 ]; then
      usage "<count> argument must be a positive number"
    fi
  fi

  run_boot_test "$@"
}

# shflags defines --help implicitly; if it's used on the command
# line FLAGS will invoke flags_help, set FLAGS_help to TRUE, and
# then return false.  To avoid printing help twice, we have to check
# for that case here.
if ! FLAGS "$@"; then
  if [ ${FLAGS_help} -eq ${FLAGS_TRUE} ]; then
    exit 0
  else
    usage
  fi
fi

eval main "${FLAGS_ARGV}"