summaryrefslogtreecommitdiff
path: root/src/library/scala/testing/Benchmark.scala
blob: 66d7d448eb2a7fc97a06f418c098e71eeb7e6582 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
/*                     __                                               *\
**     ________ ___   / /  ___     Scala API                            **
**    / __/ __// _ | / /  / _ |    (c) 2003-2013, LAMP/EPFL             **
**  __\ \/ /__/ __ |/ /__/ __ |    http://scala-lang.org/               **
** /____/\___/_/ |_/____/_/ | |                                         **
**                          |/                                          **
\*                                                                      */

package scala.testing

import scala.compat.Platform

/** `Benchmark` can be used to quickly turn an existing class into a
 *  benchmark. Here is a short example:
 *  {{{
 *  object sort1 extends Sorter with Benchmark {
 *    def run = sort(List.range(1, 1000))
 *  }
 *  }}}
 *  The `run` method has to be defined by the user, who will perform the
 *  timed operation there. Run the benchmark as follows:
 *  {{{
 *  > scala sort1 5
 *  }}}
 *  This will run the benchmark 5 times, forcing a garbage collection
 *  between runs, and printing the execution times to stdout.
 *
 *  It is also possible to add a multiplier, so
 *  {{{
 *  > scala sort1 5 10
 *  }}}
 *  will run the entire benchmark 10 times, each time for 5 runs.
 *
 *  @author Iulian Dragos, Burak Emir
 */
@deprecated("This class will be removed.", "2.10.0")
trait Benchmark {

  /** this method should be implemented by the concrete benchmark.
   *  This method is called by the benchmarking code for a number of times.
   *  The GC is called between "multiplier" calls to run, right after tear
   *  down.
   *
   *  @see setUp
   *  @see tearDown
   */
  def run()

  var multiplier = 1

  /** Run the benchmark the specified number of times and return a list with
   *  the execution times in milliseconds in reverse order of the execution.
   */
  def runBenchmark(noTimes: Int): List[Long] =
    for (i <- List.range(1, noTimes + 1)) yield {
      setUp
      val startTime = Platform.currentTime
      var i = 0; while (i < multiplier) {
        run()
        i += 1
      }
      val stopTime = Platform.currentTime
      tearDown
      Platform.collectGarbage

      stopTime - startTime
    }

  /** Prepare any data needed by the benchmark, but whose execution time
   *  should not be measured. This method is run before each call to the
   *  benchmark payload, 'run'.
   */
  def setUp() {}

  /** Perform cleanup operations after each 'run'. For micro benchmarks,
   *  think about using the result of 'run' in a way that prevents the JVM
   *  to dead-code eliminate the whole 'run' method. For instance, print or
   *  write the results to a file. The execution time of this method is not
   *  measured.
   */
  def tearDown() {}

  /** a string that is written at the beginning of the output line
   *   that contains the timings. By default, this is the class name.
   */
  def prefix: String = getClass().getName()

  /**
   * The entry point. It takes two arguments:
   * - argument `n` is the number of consecutive runs
   * - optional argument `mult` specifies that the `n` runs are repeated
   *   `mult` times.
   */
  def main(args: Array[String]) {
    if (args.length > 0) {
      val logFile = new java.io.OutputStreamWriter(System.out)
      if (args.length > 1) multiplier = args(1).toInt
      logFile.write(prefix)
      for (t <- runBenchmark(args(0).toInt))
        logFile.write("\t" + t)

      logFile.write(Platform.EOL)
      logFile.flush()
    } else {
      println("Usage: scala benchmarks.program <runs> ")
      println("   or: scala benchmarks.program <runs> <multiplier>")
      println("""
    The benchmark is run <runs> times, forcing a garbage collection between runs. The optional
    <multiplier> causes the benchmark to be repeated <multiplier> times, each time for <runs>
    executions.
      """)
    }
  }
}