1
    2
    3
    4
    5
    6
    7
    8
    9
   10
   11
   12
   13
   14
   15
   16
   17
   18
   19
   20
   21
   22
   23
   24
   25
   26
   27
   28
   29
   30
   31
   32
   33
   34
   35
   36
   37
   38
   39
   40
   41
   42
   43
   44
   45
   46
   47
   48
   49
   50
   51
   52
   53
   54
   55
   56
   57
   58
   59
   60
   61
   62
   63
   64
   65
   66
   67
   68
   69
   70
   71
   72
   73
   74
   75
   76
   77
   78
   79
   80
   81
   82
   83
   84
   85
   86
   87
   88
   89
   90
   91
   92
   93
   94
   95
   96
   97
   98
   99
  100
  101
  102
  103
  104
  105
  106
  107
  108
  109
  110
  111
  112
  113
  114
  115
  116
  117
  118
  119
  120
  121
  122
  123
  124
  125
  126
  127
  128
  129
  130
  131
  132
  133
  134
  135
  136
  137
  138
  139
  140
  141
  142
  143
  144
  145
  146
  147
  148
  149
  150
  151
  152
  153
  154
  155
  156
  157
  158
  159
  160
  161
  162
  163
  164
  165
  166
  167
  168
  169
  170
  171
  172
  173
  174
  175
  176
  177
  178
  179
  180
  181
  182
  183
  184
  185
  186
  187
  188
  189
  190
  191
  192
  193
  194
  195
  196
  197
  198
  199

build / util / lib / common / perf_tests_results_helper.py [blame]

# Copyright 2013 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.


import re
import sys

import json
import logging
import math

from lib.common import perf_result_data_type


# Mapping from result type to test output
RESULT_TYPES = {perf_result_data_type.UNIMPORTANT: 'RESULT ',
                perf_result_data_type.DEFAULT: '*RESULT ',
                perf_result_data_type.INFORMATIONAL: '',
                perf_result_data_type.UNIMPORTANT_HISTOGRAM: 'HISTOGRAM ',
                perf_result_data_type.HISTOGRAM: '*HISTOGRAM '}


def _EscapePerfResult(s):
  """Escapes |s| for use in a perf result."""
  return re.sub('[\:|=/#&,]', '_', s)


def FlattenList(values):
  """Returns a simple list without sub-lists."""
  ret = []
  for entry in values:
    if isinstance(entry, list):
      ret.extend(FlattenList(entry))
    else:
      ret.append(entry)
  return ret


def GeomMeanAndStdDevFromHistogram(histogram_json):
  histogram = json.loads(histogram_json)
  # Handle empty histograms gracefully.
  if not 'buckets' in histogram:
    return 0.0, 0.0
  count = 0
  sum_of_logs = 0
  for bucket in histogram['buckets']:
    if 'high' in bucket:
      bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
    else:
      bucket['mean'] = bucket['low']
    if bucket['mean'] > 0:
      sum_of_logs += math.log(bucket['mean']) * bucket['count']
      count += bucket['count']

  if count == 0:
    return 0.0, 0.0

  sum_of_squares = 0
  geom_mean = math.exp(sum_of_logs / count)
  for bucket in histogram['buckets']:
    if bucket['mean'] > 0:
      sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
  return geom_mean, math.sqrt(sum_of_squares / count)


def _ValueToString(v):
  # Special case for floats so we don't print using scientific notation.
  if isinstance(v, float):
    return '%f' % v
  else:
    return str(v)


def _MeanAndStdDevFromList(values):
  avg = None
  sd = None
  if len(values) > 1:
    try:
      value = '[%s]' % ','.join([_ValueToString(v) for v in values])
      avg = sum([float(v) for v in values]) / len(values)
      sqdiffs = [(float(v) - avg) ** 2 for v in values]
      variance = sum(sqdiffs) / (len(values) - 1)
      sd = math.sqrt(variance)
    except ValueError:
      value = ', '.join(values)
  else:
    value = values[0]
  return value, avg, sd


def PrintPages(page_list):
  """Prints list of pages to stdout in the format required by perf tests."""
  print('Pages: [%s]' % ','.join([_EscapePerfResult(p) for p in page_list]))


def PrintPerfResult(measurement, trace, values, units,
                    result_type=perf_result_data_type.DEFAULT,
                    print_to_stdout=True):
  """Prints numerical data to stdout in the format required by perf tests.

  The string args may be empty but they must not contain any colons (:) or
  equals signs (=).

  Args:
    measurement: A description of the quantity being measured, e.g. "vm_peak".
        On the dashboard, this maps to a particular graph. Mandatory.
    trace: A description of the particular data point, e.g. "reference".
        On the dashboard, this maps to a particular "line" in the graph.
        Mandatory.
    values: A list of numeric measured values. An N-dimensional list will be
        flattened and treated as a simple list.
    units: A description of the units of measure, e.g. "bytes".
    result_type: Accepts values of perf_result_data_type.ALL_TYPES.
    print_to_stdout: If True, prints the output in stdout instead of returning
        the output to caller.

    Returns:
      String of the formated perf result.
  """
  assert perf_result_data_type.IsValidType(result_type), \
         'result type: %s is invalid' % result_type

  trace_name = _EscapePerfResult(trace)

  if (result_type == perf_result_data_type.UNIMPORTANT or
      result_type == perf_result_data_type.DEFAULT or
      result_type == perf_result_data_type.INFORMATIONAL):
    assert isinstance(values, list)
    assert '/' not in measurement
    flattened_values = FlattenList(values)
    assert len(flattened_values)
    value, avg, sd = _MeanAndStdDevFromList(flattened_values)
    output = '%s%s: %s%s%s %s' % (
        RESULT_TYPES[result_type],
        _EscapePerfResult(measurement),
        trace_name,
        # Do not show equal sign if the trace is empty. Usually it happens when
        # measurement is enough clear to describe the result.
        '= ' if trace_name else '',
        value,
        units)
  else:
    assert perf_result_data_type.IsHistogram(result_type)
    assert isinstance(values, list)
    # The histograms can only be printed individually, there's no computation
    # across different histograms.
    assert len(values) == 1
    value = values[0]
    output = '%s%s: %s= %s %s' % (
        RESULT_TYPES[result_type],
        _EscapePerfResult(measurement),
        trace_name,
        value,
        units)
    avg, sd = GeomMeanAndStdDevFromHistogram(value)

  if avg:
    output += '\nAvg %s: %f%s' % (measurement, avg, units)
  if sd:
    output += '\nSd  %s: %f%s' % (measurement, sd, units)
  if print_to_stdout:
    print(output)
    sys.stdout.flush()
  return output


def ReportPerfResult(chart_data, graph_title, trace_title, value, units,
                     improvement_direction='down', important=True):
  """Outputs test results in correct format.

  If chart_data is None, it outputs data in old format. If chart_data is a
  dictionary, formats in chartjson format. If any other format defaults to
  old format.

  Args:
    chart_data: A dictionary corresponding to perf results in the chartjson
        format.
    graph_title: A string containing the name of the chart to add the result
        to.
    trace_title: A string containing the name of the trace within the chart
        to add the result to.
    value: The value of the result being reported.
    units: The units of the value being reported.
    improvement_direction: A string denoting whether higher or lower is
        better for the result. Either 'up' or 'down'.
    important: A boolean denoting whether the result is important or not.
  """
  if chart_data and isinstance(chart_data, dict):
    chart_data['charts'].setdefault(graph_title, {})
    chart_data['charts'][graph_title][trace_title] = {
        'type': 'scalar',
        'value': value,
        'units': units,
        'improvement_direction': improvement_direction,
        'important': important
    }
  else:
    PrintPerfResult(graph_title, trace_title, [value], units)