summaryrefslogtreecommitdiff
path: root/src/compiler/scala/tools/nsc/backend/icode/Linearizers.scala
blob: b8a98955c9e60ca2be2f73f8746d8d8417eda0f7 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
/* NSC -- new scala compiler
 * Copyright 2005-2013 LAMP/EPFL
 * @author  Martin Odersky
 */


package scala.tools.nsc
package backend
package icode

import scala.collection.{ mutable, immutable }
import mutable.ListBuffer

trait Linearizers {
  self: ICodes =>

  import global.debuglog
  import opcodes._

  abstract class Linearizer {
    def linearize(c: IMethod): List[BasicBlock]
    def linearizeAt(c: IMethod, start: BasicBlock): List[BasicBlock]
  }

  /**
   * A simple linearizer which predicts all branches to
   * take the 'success' branch and tries to schedule those
   * blocks immediately after the test. This is in sync with
   * how 'while' statements are translated (if the test is
   * 'true', the loop continues).
   */
  class NormalLinearizer extends Linearizer with WorklistAlgorithm {
    type Elem = BasicBlock
    val worklist: WList = new mutable.Stack()
    var blocks: List[BasicBlock] = Nil

    def linearize(m: IMethod): List[BasicBlock] = {
      val b = m.startBlock;
      blocks = Nil;

      run {
        worklist pushAll (m.exh map (_.startBlock));
        worklist.push(b);
      }

      blocks.reverse;
    }

    def linearizeAt(m: IMethod, start: BasicBlock): List[BasicBlock] = {
      blocks = Nil
      worklist.clear()
      linearize(start)
    }

    /** Linearize another subtree and append it to the existing blocks. */
    def linearize(startBlock: BasicBlock): List[BasicBlock] = {
      //blocks = startBlock :: Nil;
      run( { worklist.push(startBlock); } );
      blocks.reverse;
    }

    def processElement(b: BasicBlock) =
      if (b.nonEmpty) {
        add(b);
        b.lastInstruction match {
          case JUMP(whereto) =>
            add(whereto);
          case CJUMP(success, failure, _, _) =>
            add(success);
            add(failure);
          case CZJUMP(success, failure, _, _) =>
            add(success);
            add(failure);
          case SWITCH(_, labels) =>
            add(labels);
          case RETURN(_) => ();
          case THROW(clasz) =>   ();
        }
      }

    def dequeue: Elem = worklist.pop;

    /**
     * Prepend b to the list, if not already scheduled.
     * TODO: use better test than linear search
     */
    def add(b: BasicBlock) {
      if (blocks.contains(b))
        ()
      else {
        blocks = b :: blocks;
        worklist push b;
      }
    }

    def add(bs: List[BasicBlock]): Unit = bs foreach add;
  }

  /**
   * Linearize code using a depth first traversal.
   */
  class DepthFirstLinerizer extends Linearizer {
    var blocks: List[BasicBlock] = Nil;

    def linearize(m: IMethod): List[BasicBlock] = {
      blocks = Nil;

      dfs(m.startBlock);
      m.exh foreach (b => dfs(b.startBlock));

      blocks.reverse
    }

    def linearizeAt(m: IMethod, start: BasicBlock): List[BasicBlock] = {
      blocks = Nil
      dfs(start)
      blocks.reverse
    }

    def dfs(b: BasicBlock): Unit =
      if (b.nonEmpty && add(b))
        b.successors foreach dfs;

    /**
     * Prepend b to the list, if not already scheduled.
     * TODO: use better test than linear search
     * @return Returns true if the block was added.
     */
    def add(b: BasicBlock): Boolean =
      !(blocks contains b) && {
        blocks = b :: blocks;
        true
      }
  }

  /**
   * Linearize code in reverse post order. In fact, it does
   * a post order traversal, prepending visited nodes to the list.
   * This way, it is constructed already in reverse post order.
   */
  class ReversePostOrderLinearizer extends Linearizer {
    var blocks: List[BasicBlock] = Nil
    val visited = new mutable.HashSet[BasicBlock]
    val added = new mutable.BitSet

    def linearize(m: IMethod): List[BasicBlock] = {
      blocks = Nil;
      visited.clear()
      added.clear;

      m.exh foreach (b => rpo(b.startBlock));
      rpo(m.startBlock);

      // if the start block has predecessors, it won't be the first one
      // in the linearization, so we need to enforce it here
      if (m.startBlock.predecessors eq Nil)
        blocks
      else
        m.startBlock :: (blocks.filterNot(_ == m.startBlock))
    }

    def linearizeAt(m: IMethod, start: BasicBlock): List[BasicBlock] = {
      blocks = Nil
      visited.clear()
      added.clear()

      rpo(start)
      blocks
    }

    def rpo(b: BasicBlock): Unit =
      if (b.nonEmpty && !visited(b)) {
        visited += b;
        b.successors foreach rpo
        add(b)
      }

    /**
     * Prepend b to the list, if not already scheduled.
     * @return Returns true if the block was added.
     */
    def add(b: BasicBlock) = {
      debuglog("Linearizer adding block " + b.label)

      if (!added(b.label)) {
        added += b.label
        blocks = b :: blocks;
      }
    }
  }

  /** A 'dump' of the blocks in this method, which does not
   *  require any well-formedness of the basic blocks (like
   *  the last instruction being a jump).
   */
  class DumpLinearizer extends Linearizer {
    def linearize(m: IMethod): List[BasicBlock] = m.blocks
    def linearizeAt(m: IMethod, start: BasicBlock): List[BasicBlock] = sys.error("not implemented")
  }

  /** The MSIL linearizer is used only for methods with at least one exception handler.
   *  It makes sure that all the blocks belonging to a `try`, `catch` or `finally` block
   *  are emitted in an order that allows the lexical nesting of try-catch-finally, just
   *  like in the source code.
   */
  class MSILLinearizer extends Linearizer {
    /** The MSIL linearizer first calls a NormalLInearizer. This is because the ILGenerator checks
     *  the stack size before emitting instructions. For instance, to emit a `store`, there needs
     *  to be some value on the stack. This can blow up in situations like this:
     *       ...
     *       jump 3
     *    4: store_local 0
     *       jump 5
     *    3: load_value
     *       jump 4
     *    5: ...
     *  here, 3 must be scheduled first.
     *
     *  The NormalLinearizer also removes dead blocks (blocks without predecessor). This is important
     *  in the following example:
     *     try { throw new Exception }
     *     catch { case e => throw e }
     *  which adds a dead block containing just a "throw" (which, again, would blow up code generation
     *  because of the stack size; there's no value on the stack when emitting that `throw`)
     */
    val normalLinearizer = new NormalLinearizer()

    def linearize(m: IMethod): List[BasicBlock] = {

      val handlersByCovered = m.exh.groupBy(_.covered)

      // number of basic blocks covered by the entire try-catch expression
      def size(covered: scala.collection.immutable.Set[BasicBlock]) = {
        val hs = handlersByCovered(covered)
        covered.size + (hs :\ 0)((h, s) => h.blocks.length + s)
      }

      val tryBlocks = handlersByCovered.keys.toList sortBy size
      var result    = normalLinearizer.linearize(m)
      val frozen    = mutable.HashSet[BasicBlock](result.head)

      for (tryBlock <- tryBlocks) {
        result = groupBlocks(m, result, handlersByCovered(tryBlock), frozen)
      }
      result
    }

    /** @param handlers a list of handlers covering the same blocks (same try, multiple catches)
     *  @param frozen blocks can't be moved (fist block of a method, blocks directly following a try-catch)
     */
    def groupBlocks(method: IMethod, blocks: List[BasicBlock], handlers: List[ExceptionHandler], frozen: mutable.HashSet[BasicBlock]) = {
      assert(blocks.head == method.startBlock, method)

      // blocks before the try, and blocks for the try
      val beforeAndTry = new ListBuffer[BasicBlock]()
      // blocks for the handlers
      val catches = handlers map (_ => new ListBuffer[BasicBlock]())
      // blocks to be put at the end
      val after = new ListBuffer[BasicBlock]()

      var beforeTry = true
      val head = handlers.head

      for (b <- blocks) {
        if (head covers b) {
          beforeTry = false
          beforeAndTry += b
        } else {
          val handlerIndex = handlers.indexWhere(_.blocks.contains(b))
          if (handlerIndex >= 0) {
            catches(handlerIndex) += b
          } else if (beforeTry) {
            beforeAndTry += b
          } else {
            after += b
          }
        }
      }

      // reorder the blocks in "catches" so that the "firstBlock" is actually first
      (catches, handlers).zipped foreach { (lb, handler) =>
        lb -= handler.startBlock
        handler.startBlock +=: lb
      }

      // The first block emitted after a try-catch must be the one that the try / catch
      // blocks jump to (because in msil, these jumps cannot be emitted manually)
      var firstAfter: Option[BasicBlock] = None

      // Find the (hopefully) unique successor, look at the try and all catch blocks
      var blks = head.covered.toList :: handlers.map(_.blocks)
      while (firstAfter.isEmpty && !blks.isEmpty) {
        val b = blks.head
        blks = blks.tail

        val leaving = leavingBlocks(b)
        // no leaving blocks when the try or catch ends with THROW or RET
        if (!leaving.isEmpty) {
          assert(leaving.size <= 1, leaving)
          firstAfter = Some(leaving.head)
        }
      }
      if (firstAfter.isDefined) {
        val b = firstAfter.get
        if (frozen(b)) {
          assert(after contains b, b +", "+ method)
        } else {
          frozen += b
          if (beforeAndTry contains b) {
            beforeAndTry -= b
          } else {
            assert(after contains b, after)
            after -= b
          }
          b +=: after
        }
      }

      for (lb <- catches) { beforeAndTry ++= lb }
      beforeAndTry ++= after
      beforeAndTry.toList
    }

    /** Returns all direct successors of `blocks` wich are not part
     *  that list, i.e. successors outside the `blocks` list.
     */
    private def leavingBlocks(blocks: List[BasicBlock]) = {
      val res = new mutable.HashSet[BasicBlock]()
      for (b <- blocks; s <- b.directSuccessors; if (!blocks.contains(s)))
        res += s
      res
    }

    def linearizeAt(m: IMethod, start: BasicBlock): List[BasicBlock] = {
      sys.error("not implemented")
    }
  }
}