summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rwxr-xr-xtest/files/bench/equality/eq.scala34
-rwxr-xr-xtest/files/bench/equality/eqeq.log42
-rwxr-xr-xtest/files/bench/equality/eqeq.scala46
-rw-r--r--test/files/jvm/serialization.check229
-rw-r--r--test/files/jvm/serialization.scala552
-rw-r--r--test/files/jvm/stringbuilder.scala4
-rw-r--r--test/files/jvm/t2585.check0
-rw-r--r--test/files/jvm/t2585/Test.java16
-rw-r--r--test/files/jvm/t2585/genericouter.scala25
-rw-r--r--test/files/neg/bug563.scala2
-rw-r--r--test/files/neg/bug700.check2
-rw-r--r--test/files/neg/bug700.scala2
-rw-r--r--test/files/neg/bug875.check4
-rw-r--r--test/files/neg/bug875.scala6
-rw-r--r--test/files/neg/bug910.check4
-rw-r--r--test/files/neg/bug910.scala2
-rw-r--r--test/files/neg/constrs.check2
-rw-r--r--test/files/neg/constrs.scala2
-rw-r--r--test/files/neg/gadts1.scala8
-rw-r--r--test/files/neg/implicits.check2
-rw-r--r--test/files/neg/implicits.scala4
-rw-r--r--test/files/neg/overload.check2
-rw-r--r--test/files/neg/overload.scala2
-rw-r--r--test/files/neg/t0218.scala2
-rw-r--r--test/files/neg/t1422.check4
-rw-r--r--test/files/neg/t1422.scala1
-rw-r--r--test/files/neg/t1477.check5
-rw-r--r--test/files/neg/t1477.scala25
-rw-r--r--test/files/neg/t2179.check9
-rwxr-xr-xtest/files/neg/t2179.scala3
-rw-r--r--test/files/neg/t2641.check39
-rw-r--r--test/files/neg/t2641.scala31
-rw-r--r--test/files/neg/t771.check4
-rwxr-xr-xtest/files/neg/t771.scala5
-rw-r--r--test/files/neg/viewtest.scala16
-rw-r--r--test/files/pos/bug0091.scala2
-rw-r--r--test/files/pos/bug1075.scala2
-rw-r--r--test/files/pos/bug287.scala2
-rw-r--r--test/files/pos/collections.scala2
-rw-r--r--test/files/pos/depexists.scala5
-rw-r--r--test/files/pos/implicits.scala19
-rw-r--r--test/files/pos/nested2.scala2
-rw-r--r--test/files/pos/switchUnbox.scala2
-rw-r--r--test/files/pos/t1164.scala2
-rw-r--r--test/files/pos/t1226.scala8
-rw-r--r--test/files/pos/t1236.scala14
-rw-r--r--test/files/pos/t1422.scala2
-rwxr-xr-xtest/files/pos/t1459/AbstractBase.java5
-rwxr-xr-xtest/files/pos/t1459/App.scala18
-rwxr-xr-xtest/files/pos/t1459/Caller.java7
-rwxr-xr-xtest/files/pos/t1545.scala (renamed from test/pending/neg/t1545.scala)0
-rwxr-xr-xtest/files/pos/t2484.scala17
-rwxr-xr-xtest/files/pos/t2635.scala16
-rw-r--r--test/files/pos/t2664.scala9
-rw-r--r--test/files/pos/t2665.scala3
-rw-r--r--test/files/pos/t2667.scala6
-rw-r--r--test/files/pos/t2669.scala28
-rw-r--r--test/files/pos/t2698.scala10
-rw-r--r--test/files/run/Course-2002-09.scala8
-rw-r--r--test/files/run/SymbolsTest.scala283
-rw-r--r--test/files/run/bug2552.check49
-rw-r--r--test/files/run/bug2552.scala34
-rw-r--r--test/files/run/bug2636.scala35
-rw-r--r--test/files/run/bug627.scala2
-rw-r--r--test/files/run/bugs2087-and-2400.scala20
-rw-r--r--test/files/run/priorityQueue.scala354
-rw-r--r--test/files/run/randomAccessSeq-apply.scala2
-rw-r--r--test/files/run/t1524.scala2
-rw-r--r--test/files/run/t153.check2
-rw-r--r--test/files/run/t153.scala2
-rw-r--r--test/files/run/t2526.scala54
-rw-r--r--test/files/run/unapply.scala2
-rw-r--r--test/files/run/unapplyArray.scala2
-rw-r--r--test/pending/pos/bug1357.scala (renamed from test/files/pos/bug1357.scala)0
-rw-r--r--test/pending/pos/t2610.scala17
-rw-r--r--test/pending/pos/t2619.scala80
-rw-r--r--test/pending/pos/t2625.scala9
-rw-r--r--test/pending/pos/t2635.scala16
-rw-r--r--test/pending/pos/t2641.scala16
-rw-r--r--test/pending/pos/t2660.scala25
-rw-r--r--test/pending/pos/t2691.scala9
-rw-r--r--test/postreview.py2540
-rwxr-xr-xtest/review44
-rw-r--r--test/simplejson/__init__.py318
-rw-r--r--test/simplejson/decoder.py354
-rw-r--r--test/simplejson/encoder.py440
-rw-r--r--test/simplejson/scanner.py65
-rw-r--r--test/simplejson/tool.py37
88 files changed, 5796 insertions, 342 deletions
diff --git a/test/files/bench/equality/eq.scala b/test/files/bench/equality/eq.scala
new file mode 100755
index 0000000000..8ac5b5ef5c
--- /dev/null
+++ b/test/files/bench/equality/eq.scala
@@ -0,0 +1,34 @@
+object eq extends testing.Benchmark {
+
+ def eqtest[T](creator: Int => T, n: Int): Int = {
+ val elems = Array.tabulate[AnyRef](n)(i => creator(i % 2).asInstanceOf[AnyRef])
+
+ var sum = 0
+ var i = 0
+ while (i < n) {
+ var j = 0
+ while (j < n) {
+ if (elems(i) eq elems(j)) sum += 1
+ j += 1
+ }
+ i += 1
+ }
+ sum
+ }
+
+ val obj1 = new Object
+ val obj2 = new Object
+
+ def run() {
+ var sum = 0
+ sum += eqtest(x => if (x == 0) obj1 else obj2, 2000)
+ sum += eqtest(x => x, 1000)
+ sum += eqtest(x => x.toChar, 550)
+ sum += eqtest(x => x.toByte, 550)
+ sum += eqtest(x => x.toLong, 550)
+ sum += eqtest(x => x.toShort, 100)
+ sum += eqtest(x => x.toFloat, 100)
+ sum += eqtest(x => x.toDouble, 100)
+ assert(sum == 2958950)
+ }
+}
diff --git a/test/files/bench/equality/eqeq.log b/test/files/bench/equality/eqeq.log
new file mode 100755
index 0000000000..d1e27aceed
--- /dev/null
+++ b/test/files/bench/equality/eqeq.log
@@ -0,0 +1,42 @@
+Banchmark results for testing equality operations:
+eq.scala: Base case, use eq equality only
+eqeq.scala: Test case, use == instead of eq.
+All tests run on Thinkpad T400, 1.6.0_12 client VM.
+Test command: java eq 5 5
+ java eqeq 5 5
+eq.scala, no -optimise
+eq$ 109 78 79 63 63
+eq$ 94 63 63 78 78
+eq$ 94 62 62 62 78
+eq$ 94 78 78 78 78
+eq$ 94 78 78 78 78
+eq.scala, with -optimise
+eq$ 421 63 62 47 63
+eq$ 406 62 62 63 62
+eq$ 407 62 62 78 63
+eq$ 406 63 63 62 62
+eq$ 407 62 62 63 47
+eqeq.scala with version of BoxesRuntime as of Nov 13th, no -optimise
+eqeq$ 562 516 516 516 515
+eqeq$ 547 515 515 531 532
+eqeq$ 532 516 516 515 516
+eqeq$ 547 531 531 516 531
+eqeq$ 547 515 515 516 516
+eqeq.scala with version of BoxesRuntime as of Nov 13th, with -optimise
+eqeq$ 1031 390 391 391 391
+eqeq$ 1031 391 391 391 390
+eqeq$ 1031 390 390 391 391
+eqeq$ 1031 406 407 391 390
+eqeq$ 1031 390 390 391 391
+eqeq.scala with 1st optimized of Nov 14th, no -optimise
+eqeq$ 484 421 438 438 437
+eqeq$ 484 438 437 437 438
+eqeq$ 469 437 453 454 438
+eqeq$ 468 437 438 468 438
+eqeq$ 485 437 437 422 438
+eqeq.scala with 1st optimized of Nov 14th, with -optimise
+eqeq$ 1016 375 391 375 375
+eqeq$ 1016 375 391 390 375
+eqeq$ 1016 390 391 375 375
+eqeq$ 1015 375 391 390 375
+eqeq$ 1016 390 375 375 375
diff --git a/test/files/bench/equality/eqeq.scala b/test/files/bench/equality/eqeq.scala
new file mode 100755
index 0000000000..afccece88a
--- /dev/null
+++ b/test/files/bench/equality/eqeq.scala
@@ -0,0 +1,46 @@
+/** benchmark for testing equality.
+ * Mix: == between non-numbers ith Object.equals as equality: 66%
+ * 50% of these are tests where eq is true.
+ * == between boxed integers: 17%
+ * == between boxed characters: 5%
+ * == between boxed bytes: 5%
+ * == between boxed longs: 5%
+ * == between boxed shorts: < 1%
+ * == between boxed floats: < 1%
+ * == between boxed doubles: < 1%
+ * In all cases 50% of the tests return true.
+ */
+object eqeq extends testing.Benchmark {
+
+ def eqeqtest[T](creator: Int => T, n: Int): Int = {
+ val elems = Array.tabulate[AnyRef](n)(i => creator(i % 2).asInstanceOf[AnyRef])
+
+ var sum = 0
+ var i = 0
+ while (i < n) {
+ var j = 0
+ while (j < n) {
+ if (elems(i) == elems(j)) sum += 1
+ j += 1
+ }
+ i += 1
+ }
+ sum
+ }
+
+ val obj1 = new Object
+ val obj2 = new Object
+
+ def run() {
+ var sum = 0
+ sum += eqeqtest(x => if (x == 0) obj1 else obj2, 2000)
+ sum += eqeqtest(x => x, 1000)
+ sum += eqeqtest(x => x.toChar, 550)
+ sum += eqeqtest(x => x.toByte, 550)
+ sum += eqeqtest(x => x.toLong, 550)
+ sum += eqeqtest(x => x.toShort, 100)
+ sum += eqeqtest(x => x.toFloat, 100)
+ sum += eqeqtest(x => x.toDouble, 100)
+ assert(sum == 2968750)
+ }
+}
diff --git a/test/files/jvm/serialization.check b/test/files/jvm/serialization.check
index da11d7c7f0..f1b5b10ec6 100644
--- a/test/files/jvm/serialization.check
+++ b/test/files/jvm/serialization.check
@@ -1,126 +1,209 @@
-x0 = List(1, 2, 3)
-y0 = List(1, 2, 3)
-x0 eq y0: false - y0 eq x0: false
-x0 equals y0: true - y0 equals x0: true
+a1 = Array[1,2,3]
+_a1 = Array[1,2,3]
+arrayEquals(a1, _a1): true
-x1 = List()
-y1 = List()
-x1 eq y1: true - y1 eq x1: true
+c1 = Cell(a)
+_c1 = Cell(a)
+c1 eq _c1: false, _c1 eq c1: false
+c1 equals _c1: true, _c1 equals c1: true
-x2 = None
-y2 = None
-x2 eq y2: true - y2 eq x2: true
-
-x3 = Array[1,2,3]
-y3 = Array[1,2,3]
-arrayEquals(x3, y3): true
-
-x4 = <na>
-y4 = <na>
-x4(2): 4 - y4(2): 4
-
-x5 = 'hello
-y5 = 'hello
-x5 eq y5: true - y5 eq x5: true
-x5 equals y5: true - y5 equals x5: true
-
-x6 = (BannerLimit,12345)
-y6 = (BannerLimit,12345)
-x6 eq y6: false - y6 eq x6: false
-x6 equals y6: true - y6 equals x6: true
+e1 = Left(1)
+_e1 = Left(1)
+e1 eq _e1: false, _e1 eq e1: false
+e1 equals _e1: true, _e1 equals e1: true
x7 = RoundingMode
y7 = RoundingMode
-x7 eq y7: true - y7 eq x7: true
-x7 equals y7: true - y7 equals x7: true
+x7 eq y7: true, y7 eq x7: true
+x7 equals y7: true, y7 equals x7: true
x8 = WeekDay
y8 = WeekDay
-x8 eq y8: true - y8 eq x8: true
-x8 equals y8: true - y8 equals x8: true
+x8 eq y8: true, y8 eq x8: true
+x8 equals y8: true, y8 equals x8: true
x9 = UP
y9 = UP
-x9 eq y9: true - y9 eq x9: true
-x9 equals y9: true - y9 equals x9: true
+x9 eq y9: true, y9 eq x9: true
+x9 equals y9: true, y9 equals x9: true
x10 = Monday
y10 = Monday
-x10 eq y10: true - y10 eq x10: true
-x10 equals y10: true - y10 equals x10: true
+x10 eq y10: true, y10 eq x10: true
+x10 equals y10: true, y10 equals x10: true
+
+x9 eq x10: false, x10 eq x9: false
+x9 equals x10: true, x10 equals x9: true
+x9 eq y10: false, y10 eq x9: false
+x9 equals y10: true, y10 equals x9: true
+
+f1 = <na>
+_f1 = <na>
+f1(2): 4, _f1(2): 4
+
+xs0 = List(1, 2, 3)
+_xs0 = List(1, 2, 3)
+xs0 eq _xs0: false, _xs0 eq xs0: false
+xs0 equals _xs0: true, _xs0 equals xs0: true
+
+xs1 = List()
+_xs1 = List()
+xs1 eq _xs1: true, _xs1 eq xs1: true
+
+o1 = None
+_o1 = None
+o1 eq _o1: true, _o1 eq o1: true
+
+o2 = Some(1)
+_o2 = Some(1)
+o2 eq _o2: false, _o2 eq o2: false
+o2 equals _o2: true, _o2 equals o2: true
+
+s1 = 'hello
+_s1 = 'hello
+s1 eq _s1: true, _s1 eq s1: true
+s1 equals _s1: true, _s1 equals s1: true
+
+t1 = (BannerLimit,12345)
+_t1 = (BannerLimit,12345)
+t1 eq _t1: false, _t1 eq t1: false
+t1 equals _t1: true, _t1 equals t1: true
+
+x = BitSet(1, 2)
+y = BitSet(1, 2)
+x equals y: true, y equals x: true
+
+x = BitSet(2, 3)
+y = BitSet(2, 3)
+x equals y: true, y equals x: true
+
+x = Map(2 -> B, 1 -> A, 3 -> C)
+y = Map(2 -> B, 1 -> A, 3 -> C)
+x equals y: true, y equals x: true
-x9 eq x10: false - x10 eq x9: false
-x9 equals x10: true - x10 equals x9: true
-x9 eq y10: false - y10 eq x9: false
-x9 equals y10: true - y10 equals x9: true
+x = Set(1, 2)
+y = Set(1, 2)
+x equals y: true, y equals x: true
x = List((buffers,20), (layers,2), (title,3))
y = List((buffers,20), (layers,2), (title,3))
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
x = Map(buffers -> 20, layers -> 2, title -> 3)
y = Map(buffers -> 20, layers -> 2, title -> 3)
-x equals y: true - y equals x: true
-
-x = BitSet(2, 3)
-y = BitSet(2, 3)
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
x = Set(5, 3)
y = Set(5, 3)
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
x = Queue(a, b, c)
y = Queue(a, b, c)
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
+
+x = Range(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
+y = Range(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
+x equals y: true, y equals x: true
+
+x = NumericRange(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
+y = NumericRange(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
+x equals y: true, y equals x: true
+
+x = Map(1 -> A, 2 -> B, 3 -> C)
+y = Map(1 -> A, 2 -> B, 3 -> C)
+x equals y: true, y equals x: true
+
+x = TreeSet(1, 2, 3)
+y = TreeSet(1, 2, 3)
+x equals y: true, y equals x: true
x = Stack(c, b, a)
y = Stack(c, b, a)
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
+
+x = Stream(0, ?)
+y = Stream(0, ?)
+x equals y: true, y equals x: true
x = Map(42 -> FortyTwo)
y = Map(42 -> FortyTwo)
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
x = TreeSet(0, 2)
y = TreeSet(0, 2)
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
+
+x = Vector('a, 'b, 'c)
+y = Vector('a, 'b, 'c)
+x equals y: true, y equals x: true
x = ArrayBuffer(one, two)
y = ArrayBuffer(one, two)
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
+
+x = ArrayBuilder.ofLong
+y = ArrayBuilder.ofLong
+x equals y: true, y equals x: true
+
+x = ArrayBuilder.ofFloat
+y = ArrayBuilder.ofFloat
+x equals y: true, y equals x: true
-x = Map(title -> 3, buffers -> 20, layers -> 2)
-y = Map(title -> 3, buffers -> 20, layers -> 2)
-x equals y: true - y equals x: true
+x = ArrayStack(3, 2, 20)
+y = ArrayStack(3, 2, 20)
+x equals y: true, y equals x: true
x = BitSet(0, 8, 9)
y = BitSet(0, 8, 9)
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
+
+x = Map(A -> 1, C -> 3, B -> 2)
+y = Map(A -> 1, C -> 3, B -> 2)
+x equals y: true, y equals x: true
x = Set(layers, buffers, title)
y = Set(layers, buffers, title)
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
+
+x = History()
+y = History()
+x equals y: true, y equals x: true
+
+x = ListBuffer(white, black)
+y = ListBuffer(white, black)
+x equals y: true, y equals x: true
x = Queue(20, 2, 3)
y = Queue(20, 2, 3)
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
x = Stack(3, 2, 20)
y = Stack(3, 2, 20)
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
-x = ListBuffer(white, black)
-y = ListBuffer(white, black)
-x equals y: true - y equals x: true
+x = abc
+y = abc
+x equals y: true, y equals x: true
+
+x = WrappedArray(1, 2, 3)
+y = WrappedArray(1, 2, 3)
+x equals y: true, y equals x: true
+
+x = xml:src="hello"
+y = xml:src="hello"
+x equals y: true, y equals x: true
+
+x = <title></title>
+y = <title></title>
+x equals y: true, y equals x: true
x = <html><title>title</title><body></body></html>
y = <html><title>title</title><body></body></html>
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
x = <html>
<body>
- <table cellpadding="2" cellspacing="0">
+ <table cellpadding="2" cellspacing="0">
<tr>
<th>Last Name</th>
<th>First Name</th>
@@ -137,10 +220,10 @@ x = <html>
</tr>
</table>
</body>
- </html>
+ </html>
y = <html>
<body>
- <table cellpadding="2" cellspacing="0">
+ <table cellpadding="2" cellspacing="0">
<tr>
<th>Last Name</th>
<th>First Name</th>
@@ -157,26 +240,26 @@ y = <html>
</tr>
</table>
</body>
- </html>
-x equals y: true - y equals x: true
+ </html>
+x equals y: true, y equals x: true
x = Tim
y = Tim
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
x = Bob
y = Bob
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
x = John
y = John
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
x = Bill
y = Bill
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
x = Paul
y = Paul
-x equals y: true - y equals x: true
+x equals y: true, y equals x: true
diff --git a/test/files/jvm/serialization.scala b/test/files/jvm/serialization.scala
index 81d21e6dc5..06086f4038 100644
--- a/test/files/jvm/serialization.scala
+++ b/test/files/jvm/serialization.scala
@@ -2,17 +2,6 @@
// Serialization
//############################################################################
-import java.lang.System
-
-object EqualityTest {
- def check[A, B](x: A, y: B) {
- println("x = " + x)
- println("y = " + y)
- println("x equals y: " + (x equals y) + " - y equals x: " + (y equals x))
- println()
- }
-}
-
object Serialize {
@throws(classOf[java.io.IOException])
def write[A](o: A): Array[Byte] = {
@@ -29,7 +18,14 @@ object Serialize {
new java.io.ObjectInputStream(new java.io.ByteArrayInputStream(buffer))
in.readObject().asInstanceOf[A]
}
+ def check[A, B](x: A, y: B) {
+ println("x = " + x)
+ println("y = " + y)
+ println("x equals y: " + (x equals y) + ", y equals x: " + (y equals x))
+ println()
+ }
}
+import Serialize._
//############################################################################
// Test classes in package "scala"
@@ -50,92 +46,140 @@ object Test1_scala {
}
import WeekDay._, BigDecimal._, RoundingMode._
- val x0 = List(1, 2, 3)
- val x1 = Nil
- val x2 = None
- val x3 = Array(1, 2, 3)
- val x4 = { x: Int => 2 * x }
- val x5 = 'hello
- val x6 = ("BannerLimit", 12345)
- val x7 = BigDecimal.RoundingMode
- val x8 = WeekDay
- val x9 = UP
- val x10 = Monday
-
+ // in alphabetic order
try {
- val y0: List[Int] = Serialize.read(Serialize.write(x0))
- val y1: List[Nothing] = Serialize.read(Serialize.write(x1))
- val y2: Option[Nothing] = Serialize.read(Serialize.write(x2))
- val y3: Array[Int] = Serialize.read(Serialize.write(x3))
- val y4: Function[Int, Int] = Serialize.read(Serialize.write(x4))
- val y5: Symbol = Serialize.read(Serialize.write(x5))
- val y6: (String, Int) = Serialize.read(Serialize.write(x6))
- val y7: RoundingMode.type = Serialize.read(Serialize.write(x7))
- val y8: WeekDay.type = Serialize.read(Serialize.write(x8))
- val y9: RoundingMode = Serialize.read(Serialize.write(x9))
- val y10: WeekDay = Serialize.read(Serialize.write(x10))
-
- println("x0 = " + x0)
- println("y0 = " + y0)
- println("x0 eq y0: " + (x0 eq y0) + " - y0 eq x0: " + (y0 eq x0))
- println("x0 equals y0: " + (x0 equals y0) + " - y0 equals x0: " + (y0 equals x0))
- println()
- println("x1 = " + x1)
- println("y1 = " + y1)
- println("x1 eq y1: " + (x1 eq y1) + " - y1 eq x1: " + (y1 eq x1))
- println()
- println("x2 = " + x2)
- println("y2 = " + y2)
- println("x2 eq y2: " + (x2 eq y2) + " - y2 eq x2: " + (y2 eq x2))
- println()
- println("x3 = " + arrayToString(x3))
- println("y3 = " + arrayToString(y3))
- println("arrayEquals(x3, y3): " + arrayEquals(x3, y3))
+ // Array
+ val a1 = Array(1, 2, 3)
+ val _a1: Array[Int] = read(write(a1))
+ println("a1 = " + arrayToString(a1))
+ println("_a1 = " + arrayToString(_a1))
+ println("arrayEquals(a1, _a1): " + arrayEquals(a1, _a1))
println()
- println("x4 = <na>")
- println("y4 = <na>")
- println("x4(2): " + x4(2) + " - y4(2): " + y4(2))
- println()
- println("x5 = " + x5)
- println("y5 = " + y5)
- println("x5 eq y5: " + (x5 eq y5) + " - y5 eq x5: " + (y5 eq x5))
- println("x5 equals y5: " + (x5 equals y5) + " - y5 equals x5: " + (y5 equals x5))
+
+ // Cell
+ val c1 = new Cell('a')
+ val _c1: Cell[Char] = read(write(c1))
+ println("c1 = " + c1)
+ println("_c1 = " + _c1)
+ println("c1 eq _c1: " + (c1 eq _c1) + ", _c1 eq c1: " + (_c1 eq c1))
+ println("c1 equals _c1: " + (c1 equals _c1) + ", _c1 equals c1: " + (_c1 equals c1))
println()
- println("x6 = " + x6)
- println("y6 = " + y6)
- println("x6 eq y6: " + (x6 eq y6) + " - y6 eq x6: " + (y6 eq x6))
- println("x6 equals y6: " + (x6 equals y6) + " - y6 equals x6: " + (y6 equals x6))
+
+ // Either
+ val e1 = Left(1)
+ val _e1: Either[Int, String] = read(write(e1))
+ println("e1 = " + e1)
+ println("_e1 = " + _e1)
+ println("e1 eq _e1: " + (e1 eq _e1) + ", _e1 eq e1: " + (_e1 eq e1))
+ println("e1 equals _e1: " + (e1 equals _e1) + ", _e1 equals e1: " + (_e1 equals e1))
println()
+
+ // Enumeration
+ val x7 = BigDecimal.RoundingMode
+ val y7: RoundingMode.type = read(write(x7))
println("x7 = " + x7)
println("y7 = " + y7)
- println("x7 eq y7: " + (x7 eq y7) + " - y7 eq x7: " + (y7 eq x7))
- println("x7 equals y7: " + (x7 equals y7) + " - y7 equals x7: " + (y7 equals x7))
+ println("x7 eq y7: " + (x7 eq y7) + ", y7 eq x7: " + (y7 eq x7))
+ println("x7 equals y7: " + (x7 equals y7) + ", y7 equals x7: " + (y7 equals x7))
println()
+
+ val x8 = WeekDay
+ val y8: WeekDay.type = read(write(x8))
println("x8 = " + x8)
println("y8 = " + y8)
- println("x8 eq y8: " + (x8 eq y8) + " - y8 eq x8: " + (y8 eq x8))
- println("x8 equals y8: " + (x8 equals y8) + " - y8 equals x8: " + (y8 equals x8))
+ println("x8 eq y8: " + (x8 eq y8) + ", y8 eq x8: " + (y8 eq x8))
+ println("x8 equals y8: " + (x8 equals y8) + ", y8 equals x8: " + (y8 equals x8))
println()
+
+ val x9 = UP
+ val y9: RoundingMode = read(write(x9))
println("x9 = " + x9)
println("y9 = " + y9)
- println("x9 eq y9: " + (x9 eq y9) + " - y9 eq x9: " + (y9 eq x9))
- println("x9 equals y9: " + (x9 equals y9) + " - y9 equals x9: " + (y9 equals x9))
+ println("x9 eq y9: " + (x9 eq y9) + ", y9 eq x9: " + (y9 eq x9))
+ println("x9 equals y9: " + (x9 equals y9) + ", y9 equals x9: " + (y9 equals x9))
println()
+
+ val x10 = Monday
+ val y10: WeekDay = read(write(x10))
println("x10 = " + x10)
println("y10 = " + y10)
- println("x10 eq y10: " + (x10 eq y10) + " - y10 eq x10: " + (y10 eq x10))
- println("x10 equals y10: " + (x10 equals y10) + " - y10 equals x10: " + (y10 equals x10))
+ println("x10 eq y10: " + (x10 eq y10) + ", y10 eq x10: " + (y10 eq x10))
+ println("x10 equals y10: " + (x10 equals y10) + ", y10 equals x10: " + (y10 equals x10))
println()
- println("x9 eq x10: " + (x9 eq x10) + " - x10 eq x9: " + (x10 eq x9))
- println("x9 equals x10: " + (x9 equals x10) + " - x10 equals x9: " + (x10 equals x9))
- println("x9 eq y10: " + (x9 eq y10) + " - y10 eq x9: " + (y10 eq x9))
- println("x9 equals y10: " + (x9 equals y10) + " - y10 equals x9: " + (y10 equals x9))
+
+ println("x9 eq x10: " + (x9 eq x10) + ", x10 eq x9: " + (x10 eq x9))
+ println("x9 equals x10: " + (x9 equals x10) + ", x10 equals x9: " + (x10 equals x9))
+ println("x9 eq y10: " + (x9 eq y10) + ", y10 eq x9: " + (y10 eq x9))
+ println("x9 equals y10: " + (x9 equals y10) + ", y10 equals x9: " + (y10 equals x9))
+ println()
+
+ // Function
+ val f1 = { x: Int => 2 * x }
+ val _f1: Function[Int, Int] = read(write(f1))
+ println("f1 = <na>")
+ println("_f1 = <na>")
+ println("f1(2): " + f1(2) + ", _f1(2): " + _f1(2))
+ println()
+
+ // List
+ val xs0 = List(1, 2, 3)
+ val _xs0: List[Int] = read(write(xs0))
+ println("xs0 = " + xs0)
+ println("_xs0 = " + _xs0)
+ println("xs0 eq _xs0: " + (xs0 eq _xs0) + ", _xs0 eq xs0: " + (_xs0 eq xs0))
+ println("xs0 equals _xs0: " + (xs0 equals _xs0) + ", _xs0 equals xs0: " + (_xs0 equals xs0))
+ println()
+
+ val xs1 = Nil
+ val _xs1: List[Nothing] = read(write(xs1))
+ println("xs1 = " + xs1)
+ println("_xs1 = " + _xs1)
+ println("xs1 eq _xs1: " + (xs1 eq _xs1) + ", _xs1 eq xs1: " + (_xs1 eq xs1))
+ println()
+
+ // Option
+ val o1 = None
+ val _o1: Option[Nothing] = read(write(o1))
+ println("o1 = " + o1)
+ println("_o1 = " + _o1)
+ println("o1 eq _o1: " + (o1 eq _o1) + ", _o1 eq o1: " + (_o1 eq o1))
+ println()
+
+ val o2 = Some(1)
+ val _o2: Option[Int] = read(write(o2))
+ println("o2 = " + o2)
+ println("_o2 = " + _o2)
+ println("o2 eq _o2: " + (o2 eq _o2) + ", _o2 eq o2: " + (_o2 eq o2))
+ println("o2 equals _o2: " + (o2 equals _o2) + ", _o2 equals o2: " + (_o2 equals o2))
+ println()
+/*
+ // Responder
+ val r1 = Responder.constant("xyz")
+ val _r1: Responder[String] = read(write(r1))
+ check(r1, _r1)
+*/
+ // Symbol
+ val s1 = 'hello
+ val _s1: Symbol = read(write(s1))
+ println("s1 = " + s1)
+ println("_s1 = " + _s1)
+ println("s1 eq _s1: " + (s1 eq _s1) + ", _s1 eq s1: " + (_s1 eq s1))
+ println("s1 equals _s1: " + (s1 equals _s1) + ", _s1 equals s1: " + (_s1 equals s1))
+ println()
+
+ // Tuple
+ val t1 = ("BannerLimit", 12345)
+ val _t1: (String, Int) = read(write(t1))
+ println("t1 = " + t1)
+ println("_t1 = " + _t1)
+ println("t1 eq _t1: " + (t1 eq _t1) + ", _t1 eq t1: " + (_t1 eq t1))
+ println("t1 equals _t1: " + (t1 equals _t1) + ", _t1 equals t1: " + (_t1 equals t1))
println()
}
catch {
case e: Exception =>
- e.printStackTrace()
println("Error in Test1_scala: " + e)
+ throw e
}
}
@@ -145,50 +189,97 @@ object Test1_scala {
@serializable
object Test2_immutable {
import scala.collection.immutable.{
- BitSet, ListMap, ListSet, Queue, Stack, TreeSet, TreeMap}
-
- val x1 = List(
- Pair("buffers", 20),
- Pair("layers", 2),
- Pair("title", 3)
- )
-
- val x2 = new ListMap[String, Int] + ("buffers" -> 20, "layers" -> 2, "title" -> 3)
-
- val x3 = {
- val bs = new collection.mutable.BitSet()
- bs += 2; bs += 3
- bs.toImmutable
- }
-
- val x4 = new ListSet[Int]() + 3 + 5
-
- val x5 = Queue("a", "b", "c")
-
- val x6 = new Stack().push("a", "b", "c")
-
- val x7 = new TreeMap[Int, String] + (42 -> "FortyTwo")
-
- val x8 = new TreeSet[Int]() + 2 + 0
+ BitSet, HashMap, HashSet, ListMap, ListSet, Queue, Range, SortedMap,
+ SortedSet, Stack, Stream, TreeMap, TreeSet, Vector}
+ // in alphabetic order
try {
- val y1: List[Pair[String, Int]] = Serialize.read(Serialize.write(x1))
- val y2: ListMap[String, Int] = Serialize.read(Serialize.write(x2))
- val y3: BitSet = Serialize.read(Serialize.write(x3))
- val y4: ListSet[Int] = Serialize.read(Serialize.write(x4))
- val y5: Queue[String] = Serialize.read(Serialize.write(x5))
- val y6: Stack[String] = Serialize.read(Serialize.write(x6))
- val y7: TreeMap[Int, String] = Serialize.read(Serialize.write(x7))
- val y8: TreeSet[Int] = Serialize.read(Serialize.write(x8))
-
- EqualityTest.check(x1, y1)
- EqualityTest.check(x2, y2)
- EqualityTest.check(x3, y3)
- EqualityTest.check(x4, y4)
- EqualityTest.check(x5, y5)
- EqualityTest.check(x6, y6)
- EqualityTest.check(x7, y7)
- EqualityTest.check(x8, y8)
+ // BitSet
+ val bs1 = BitSet.empty + 1 + 2
+ val _bs1: BitSet = read(write(bs1))
+ check(bs1, _bs1)
+
+ val bs2 = {
+ val bs = new collection.mutable.BitSet()
+ bs += 2; bs += 3
+ bs.toImmutable
+ }
+ val _bs2: BitSet = read(write(bs2))
+ check(bs2, _bs2)
+
+ // HashMap
+ val hm1 = new HashMap[Int, String] + (1 -> "A", 2 -> "B", 3 -> "C")
+ val _hm1: HashMap[Int, String] = read(write(hm1))
+ check(hm1, _hm1)
+
+ // HashSet
+ val hs1 = new HashSet[Int] + 1 + 2
+ val _hs1: HashSet[Int] = read(write(hs1))
+ check(hs1, _hs1)
+
+ // List
+ val xs1 = List(("buffers", 20), ("layers", 2), ("title", 3))
+ val _xs1: List[(String, Int)] = read(write(xs1))
+ check(xs1, _xs1)
+
+ // ListMap
+ val lm1 = new ListMap[String, Int] + ("buffers" -> 20, "layers" -> 2, "title" -> 3)
+ val _lm1: ListMap[String, Int] = read(write(lm1))
+ check(lm1, _lm1)
+
+ // ListSet
+ val ls1 = new ListSet[Int] + 3 + 5
+ val _ls1: ListSet[Int] = read(write(ls1))
+ check(ls1, _ls1)
+
+ // Queue
+ val q1 = Queue("a", "b", "c")
+ val _q1: Queue[String] = read(write(q1))
+ check(q1, _q1)
+
+ // Range
+ val r1 = 0 until 10
+ val _r1: Range = read(write(r1))
+ check(r1, _r1)
+
+ val r2 = Range.Long(0L, 10L, 1)
+ val _r2: r2.type = read(write(r2))
+ check(r2, _r2)
+
+ // SortedMap
+ val sm1 = SortedMap.empty[Int, String] + (2 -> "B", 3 -> "C", 1 -> "A")
+ val _sm1: SortedMap[Int, String] = read(write(sm1))
+ check(sm1, _sm1)
+
+ // SortedSet
+ val ss1 = SortedSet.empty[Int] + 2 + 3 + 1
+ val _ss1: SortedSet[Int] = read(write(ss1))
+ check(ss1, _ss1)
+
+ // Stack
+ val s1 = new Stack().push("a", "b", "c")
+ val _s1: Stack[String] = read(write(s1))
+ check(s1, _s1)
+
+ // Stream
+ val st1 = Stream.range(0, 10)
+ val _st1: Stream[Int] = read(write(st1))
+ check(st1, _st1)
+
+ // TreeMap
+ val tm1 = new TreeMap[Int, String] + (42 -> "FortyTwo")
+ val _tm1: TreeMap[Int, String] = read(write(tm1))
+ check(tm1, _tm1)
+
+ // TreeSet
+ val ts1 = new TreeSet[Int]() + 2 + 0
+ val _ts1: TreeSet[Int] = read(write(ts1))
+ check(ts1, _ts1)
+
+ // Vector
+ val v1 = Vector('a, 'b, 'c)
+ val _v1: Vector[Symbol] = read(write(v1))
+ check(v1, _v1)
}
catch {
case e: Exception =>
@@ -201,65 +292,110 @@ object Test2_immutable {
// Test classes in package "scala.collection.mutable"
object Test3_mutable {
+ import scala.reflect.ClassManifest
import scala.collection.mutable.{
- ArrayBuffer, BitSet, HashMap, HashSet, History, LinkedList, ListBuffer,
- Publisher, Queue, Stack}
-
- val x0 = new ArrayBuffer[String]
- x0 ++= List("one", "two")
-
- val x2 = new BitSet()
- x2 += 0
- x2 += 8
- x2 += 9
-
- val x1 = new HashMap[String, Int]
- x1 ++= Test2_immutable.x1
-
- val x3 = new HashSet[String]
- x3 ++= Test2_immutable.x1.map(p => p._1)
-
- @serializable
- class Feed extends Publisher[String, Feed]
-
- val x8 = new History[String, Feed]
-
- val x4 = new LinkedList[Int](2, null)
- x4.append(new LinkedList(3, null))
-
- val x7 = new ListBuffer[String]
- x7 ++= List("white", "black")
-
- val x5 = new Queue[Int]
- x5 ++= Test2_immutable.x1.map(p => p._2)
-
- val x6 = new Stack[Int]
- x6 ++= x5
+ ArrayBuffer, ArrayBuilder, ArrayStack, BitSet, DoubleLinkedList,
+ HashMap, HashSet, History, LinkedList, ListBuffer, Publisher, Queue,
+ Stack, StringBuilder, WrappedArray}
+ // in alphabetic order
try {
- val y0: ArrayBuffer[String] = Serialize.read(Serialize.write(x0))
- val y1: HashMap[String, Int] = Serialize.read(Serialize.write(x1))
- val y2: BitSet = Serialize.read(Serialize.write(x2))
- val y3: HashSet[String] = Serialize.read(Serialize.write(x3))
-// val y4: LinkedList[Int] = Serialize.read(Serialize.write(x4))
- val y5: Queue[Int] = Serialize.read(Serialize.write(x5))
- val y6: Stack[Int] = Serialize.read(Serialize.write(x6))
- val y7: ListBuffer[String] = Serialize.read(Serialize.write(x7))
- val y8: History[String, Feed] = Serialize.read(Serialize.write(x8))
-
- EqualityTest.check(x0, y0)
- EqualityTest.check(x1, y1)
- EqualityTest.check(x2, y2)
- EqualityTest.check(x3, y3)
- //EqualityTest.check(x4, y4) //todo
- EqualityTest.check(x5, y5)
- EqualityTest.check(x6, y6)
- EqualityTest.check(x7, y7)
- //EqualityTest.check(x8, y8) //todo
+ // ArrayBuffer
+ val ab1 = new ArrayBuffer[String]
+ ab1 ++= List("one", "two")
+ val _ab1: ArrayBuffer[String] = read(write(ab1))
+ check(ab1, _ab1)
+
+ // ArrayBuilder
+ val abu1 = ArrayBuilder.make[Long]
+ val _abu1: ArrayBuilder[ClassManifest[Long]] = read(write(abu1))
+ check(abu1, _abu1)
+
+ val abu2 = ArrayBuilder.make[Float]
+ val _abu2: ArrayBuilder[ClassManifest[Float]] = read(write(abu2))
+ check(abu2, _abu2)
+
+ // ArrayStack
+ val as1 = new ArrayStack[Int]
+ as1 ++= List(20, 2, 3).iterator
+ val _as1: ArrayStack[Int] = read(write(as1))
+ check(as1, _as1)
+
+ // BitSet
+ val bs1 = new BitSet()
+ bs1 += 0
+ bs1 += 8
+ bs1 += 9
+ val _bs1: BitSet = read(write(bs1))
+ check(bs1, _bs1)
+/*
+ // DoubleLinkedList
+ val dl1 = new DoubleLinkedList[Int](2, null)
+ dl1.append(new DoubleLinkedList(3, null))
+ val _dl1: DoubleLinkedList[Int] = read(write(dl1))
+ check(dl1, _dl1)
+*/
+ // HashMap
+ val hm1 = new HashMap[String, Int]
+ hm1 ++= List(("A", 1), ("B", 2), ("C", 3)).iterator
+ val _hm1: HashMap[String, Int] = read(write(hm1))
+ check(hm1, _hm1)
+
+ // HashSet
+ val hs1 = new HashSet[String]
+ hs1 ++= List("layers", "buffers", "title").iterator
+ val _hs1: HashSet[String] = read(write(hs1))
+ check(hs1, _hs1)
+
+ // History
+ @serializable
+ class Feed extends Publisher[String]
+
+ val h1 = new History[String, Int]
+ val _h1: History[String, Int] = read(write(h1))
+ check(h1, _h1)
+/*
+ // LinkedList
+ val ll1 = new LinkedList[Int](2, null)
+ ll1.append(new LinkedList(3, null))
+ val _ll1: LinkedList[Int] = read(write(ll1))
+ check(ll1, _ll1)
+*/
+ // ListBuffer
+ val lb1 = new ListBuffer[String]
+ lb1 ++= List("white", "black")
+ val _lb1: ListBuffer[String] = read(write(lb1))
+ check(lb1, _lb1)
+
+ // Publisher
+
+ // Queue
+ val q1 = new Queue[Int]
+ q1 ++= List(20, 2, 3).iterator
+ val _q1: Queue[Int] = read(write(q1))
+ check(q1, _q1)
+
+ // Stack
+ val s1 = new Stack[Int]
+ s1 pushAll q1
+ val _s1: Stack[Int] = read(write(s1))
+ check(s1, _s1)
+
+ // StringBuilder
+ val sb1 = new StringBuilder
+ sb1 append "abc"
+ val _sb1: StringBuilder = read(write(sb1))
+ check(sb1, _sb1)
+
+ // WrappedArray
+ val wa1 = WrappedArray.make(Array(1, 2, 3))
+ val _wa1: WrappedArray[Int] = read(write(wa1))
+ check(wa1, _wa1)
}
catch {
case e: Exception =>
println("Error in Test3_mutable: " + e)
+ throw e
}
}
@@ -267,15 +403,31 @@ object Test3_mutable {
// Test classes in package "scala.xml"
object Test4_xml {
- import scala.xml.Elem
-
- val x1 = <html><title>title</title><body></body></html>;
+ import scala.xml.{Attribute, Document, Elem, Null, PrefixedAttribute, Text}
case class Person(name: String, age: Int)
- class AddressBook(a: Person*) {
- private val people: List[Person] = a.toList
- def toXHTML =
+ try {
+ // Attribute
+ val a1 = new PrefixedAttribute("xml", "src", Text("hello"), Null)
+ val _a1: Attribute = read(write(a1))
+ check(a1, _a1)
+
+ // Document
+ val d1 = new Document
+ d1.docElem = <title></title>
+ d1.encoding = Some("UTF-8")
+ val _d1: Document = read(write(d1))
+ check(d1, _d1)
+
+ // Elem
+ val e1 = <html><title>title</title><body></body></html>;
+ val _e1: Elem = read(write(e1))
+ check(e1, _e1)
+
+ class AddressBook(a: Person*) {
+ private val people: List[Person] = a.toList
+ def toXHTML =
<table cellpadding="2" cellspacing="0">
<tr>
<th>Last Name</th>
@@ -287,30 +439,26 @@ object Test4_xml {
<td> { p.age.toString() } </td>
</tr> }
</table>;
- }
+ }
- val people = new AddressBook(
- Person("Tom", 20),
- Person("Bob", 22),
- Person("James", 19))
+ val people = new AddressBook(
+ Person("Tom", 20),
+ Person("Bob", 22),
+ Person("James", 19))
- val x2 =
- <html>
+ val e2 =
+ <html>
<body>
- { people.toXHTML }
+ { people.toXHTML }
</body>
- </html>;
-
- try {
- val y1: scala.xml.Elem = Serialize.read(Serialize.write(x1))
- val y2: scala.xml.Elem = Serialize.read(Serialize.write(x2))
-
- EqualityTest.check(x1, y1)
- EqualityTest.check(x2, y2)
+ </html>;
+ val _e2: Elem = read(write(e2))
+ check(e2, _e2)
}
catch {
case e: Exception =>
println("Error in Test4_xml: " + e)
+ throw e
}
}
@@ -339,11 +487,11 @@ object Test5 {
val x2 = bob
try {
- val y1: Person = Serialize.read(Serialize.write(x1))
- val y2: Employee = Serialize.read(Serialize.write(x2))
+ val y1: Person = read(write(x1))
+ val y2: Employee = read(write(x2))
- EqualityTest.check(x1, y1)
- EqualityTest.check(x2, y2)
+ check(x1, y1)
+ check(x2, y2)
}
catch {
case e: Exception =>
@@ -369,13 +517,13 @@ object Test6 {
val x3 = paul
try {
- val y1: Person = Serialize.read(Serialize.write(x1))
- val y2: Employee = Serialize.read(Serialize.write(x2))
- val y3: Person = Serialize.read(Serialize.write(x3))
+ val y1: Person = read(write(x1))
+ val y2: Employee = read(write(x2))
+ val y3: Person = read(write(x3))
- EqualityTest.check(x1, y1)
- EqualityTest.check(x2, y2)
- EqualityTest.check(x3, y3)
+ check(x1, y1)
+ check(x2, y2)
+ check(x3, y3)
}
catch {
case e: Exception =>
diff --git a/test/files/jvm/stringbuilder.scala b/test/files/jvm/stringbuilder.scala
index bacd13c715..c86a8a7713 100644
--- a/test/files/jvm/stringbuilder.scala
+++ b/test/files/jvm/stringbuilder.scala
@@ -53,7 +53,7 @@ object Test2 extends TestCase("append") with Assert {
val j1 = new java.lang.StringBuilder // Java 1.5+
val s1 = new StringBuilder
j1 append "###" append Array('0', '1', '2') append "xyz".subSequence(0, 3)
- s1 append "###" append Array('0', '1', '2') append List('x', 'y', 'z')
+ s1 append "###" appendAll Array('0', '1', '2') appendAll List('x', 'y', 'z')
assertEquals("s1.toString equals j1.toString", true, s1.toString equals j1.toString)
}
}
@@ -72,7 +72,7 @@ object Test3 extends TestCase("insert") with Assert {
val j1 = new java.lang.StringBuilder // Java 1.5+
val s1 = new StringBuilder
j1 insert (0, "###") insert (0, Array('0', '1', '2')) insert (0, "xyz".subSequence(0, 3))
- s1 insert (0, "###") insert (0, Array('0', '1', '2')) insert (0, List('x', 'y', 'z'))
+ s1 insert (0, "###") insertAll (0, Array('0', '1', '2')) insertAll (0, List('x', 'y', 'z'))
//println("j1="+j1+", s1="+s1)//debug
assertEquals("s1.toString equals j1.toString", true, s1.toString equals j1.toString)
diff --git a/test/files/jvm/t2585.check b/test/files/jvm/t2585.check
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/files/jvm/t2585.check
diff --git a/test/files/jvm/t2585/Test.java b/test/files/jvm/t2585/Test.java
new file mode 100644
index 0000000000..51fe20d81e
--- /dev/null
+++ b/test/files/jvm/t2585/Test.java
@@ -0,0 +1,16 @@
+class J { S s ; }
+
+public class Test {
+ public static void main(String[] args) {
+ final X x = new X();
+ final OuterImpl o = new OuterImpl(x);
+
+ final OuterImpl.Inner i1 = o.newInner();
+ i1.getT().getI().getT().getI(); // <--- Error: "The method getI() is undefined for the type Object"
+
+ final Outer<X>.Inner i2 = o.newInner();
+ i2.getT().getI().getT().getI(); // <--- Error: "The method getI() is undefined for the type Object"
+
+ HashMap<String, String> map = new HashMap<String, String>();
+ }
+} \ No newline at end of file
diff --git a/test/files/jvm/t2585/genericouter.scala b/test/files/jvm/t2585/genericouter.scala
new file mode 100644
index 0000000000..e06aa8101e
--- /dev/null
+++ b/test/files/jvm/t2585/genericouter.scala
@@ -0,0 +1,25 @@
+case class S(n:Int)
+
+trait TraversableLike[+A, +Repr] {
+ class WithFilter(p: A => Boolean)
+ def withFilter(p: A => Boolean): WithFilter = new WithFilter(p)
+}
+
+class HashMap[K, +V] extends TraversableLike[(K, V), HashMap[K, V]]
+
+class Outer[T](val t: T) {
+ class Inner {
+ def getT : T = t
+ }
+}
+
+class OuterImpl(x: X) extends Outer[X](x) {
+ def newInner = new Inner
+}
+
+class X {
+ def getI : Outer[X]#Inner = {
+ val oImpl = new OuterImpl(this)
+ new oImpl.Inner
+ }
+} \ No newline at end of file
diff --git a/test/files/neg/bug563.scala b/test/files/neg/bug563.scala
index d8e026e656..624b83b1fa 100644
--- a/test/files/neg/bug563.scala
+++ b/test/files/neg/bug563.scala
@@ -1,7 +1,7 @@
object Test {
def map[A,R](a : List[A], f : A => R) : List[R] = a.map(f);
- def split(sn : Iterable[List[Cell[int]]]) : unit =
+ def split(sn : Iterable[List[Cell[Int]]]) : Unit =
for (n <- sn)
map(n,ptr => new Cell(ptr.elem));
}
diff --git a/test/files/neg/bug700.check b/test/files/neg/bug700.check
index 33a67e5094..5c2854069c 100644
--- a/test/files/neg/bug700.check
+++ b/test/files/neg/bug700.check
@@ -1,4 +1,4 @@
bug700.scala:6: error: method foobar in trait Foo is accessed from super. It may not be abstract unless it is overridden by a member declared `abstract' and `override'
- def foobar: unit = super.foobar
+ def foobar: Unit = super.foobar
^
one error found
diff --git a/test/files/neg/bug700.scala b/test/files/neg/bug700.scala
index 7477bb54f6..b08c8b5529 100644
--- a/test/files/neg/bug700.scala
+++ b/test/files/neg/bug700.scala
@@ -3,7 +3,7 @@ trait Foo {
}
trait Bar extends Foo {
- def foobar: unit = super.foobar
+ def foobar: Unit = super.foobar
}
// the following definition breaks the compiler
diff --git a/test/files/neg/bug875.check b/test/files/neg/bug875.check
index d547c8d69c..16a982241e 100644
--- a/test/files/neg/bug875.check
+++ b/test/files/neg/bug875.check
@@ -4,8 +4,8 @@ bug875.scala:3: error: no `: _*' annotation allowed here
^
bug875.scala:6: error: no `: _*' annotation allowed here
(such annotations are only allowed in arguments to *-parameters)
- mkList(xs: _*)
- ^
+ mkList1(xs: _*)
+ ^
bug875.scala:15: error: no `: _*' annotation allowed here
(such annotations are only allowed in arguments to *-parameters)
f(true, 1, xs: _*)
diff --git a/test/files/neg/bug875.scala b/test/files/neg/bug875.scala
index 9c579b0166..38affd5a43 100644
--- a/test/files/neg/bug875.scala
+++ b/test/files/neg/bug875.scala
@@ -1,9 +1,9 @@
object Test extends Application {
val xs = List(4, 5, 6)
val ys = List(1, 2, 3, xs: _*)
- def mkList(x: Int) = List(x)
- def mkList(x: Boolean) = List(x)
- mkList(xs: _*)
+ def mkList1(x: Int) = List(x)
+ def mkList2(x: Boolean) = List(x)
+ mkList1(xs: _*)
def f(x: Int*) = List(x: _*)
diff --git a/test/files/neg/bug910.check b/test/files/neg/bug910.check
index fe4ad4fca4..2bc2d986fa 100644
--- a/test/files/neg/bug910.check
+++ b/test/files/neg/bug910.check
@@ -1,6 +1,6 @@
bug910.scala:4: error: type mismatch;
found : Seq[Char]
- required: scala.Seq[int]
- val y: Seq[int] = rest
+ required: scala.Seq[Int]
+ val y: Seq[Int] = rest
^
one error found
diff --git a/test/files/neg/bug910.scala b/test/files/neg/bug910.scala
index 2f28ea408f..540ee7001d 100644
--- a/test/files/neg/bug910.scala
+++ b/test/files/neg/bug910.scala
@@ -1,7 +1,7 @@
object RegExpTest1 extends Application {
def co(x: Seq[Char]) = x match {
case Seq('s','c','a','l','a', rest @ _*) =>
- val y: Seq[int] = rest
+ val y: Seq[Int] = rest
y
}
}
diff --git a/test/files/neg/constrs.check b/test/files/neg/constrs.check
index 3524709785..4f4a12bc13 100644
--- a/test/files/neg/constrs.check
+++ b/test/files/neg/constrs.check
@@ -8,7 +8,7 @@ constrs.scala:10: error: called constructor's definition must precede calling co
def this() = this("abc")
^
constrs.scala:12: error: called constructor's definition must precede calling constructor's definition
- def this(x: boolean) = this(x)
+ def this(x: Boolean) = this(x)
^
constrs.scala:16: error: type mismatch;
found : Int(1)
diff --git a/test/files/neg/constrs.scala b/test/files/neg/constrs.scala
index 969f593a2d..016df098f0 100644
--- a/test/files/neg/constrs.scala
+++ b/test/files/neg/constrs.scala
@@ -9,7 +9,7 @@ object test {
class Foo(x: Int) {
def this() = this("abc")
def this(x: String) = this(1)
- def this(x: boolean) = this(x)
+ def this(x: Boolean) = this(x)
}
class Bar[a](x: a) {
diff --git a/test/files/neg/gadts1.scala b/test/files/neg/gadts1.scala
index 67aef4f2d9..07200ff7aa 100644
--- a/test/files/neg/gadts1.scala
+++ b/test/files/neg/gadts1.scala
@@ -1,8 +1,8 @@
object Test{
abstract class Number
-case class Int(n: int) extends Number
-case class Double(d: double) extends Number
+case class Int(n: scala.Int) extends Number
+case class Double(d: scala.Double) extends Number
trait Term[+a]
case class Cell[a](var x: a) extends Term[a]
@@ -10,7 +10,7 @@ case class NumTerm(val n: Number) extends Term[Number]
class IntTerm(n: Int) extends NumTerm(n) with Term[Int]
-def f[a](t:Term[a], c:Cell[a]): unit =
+def f[a](t:Term[a], c:Cell[a]): Unit =
t match {
case NumTerm(n) => c.x = Double(1.0)
}
@@ -18,7 +18,7 @@ def f[a](t:Term[a], c:Cell[a]): unit =
val x:Term[Number] = NumTerm(Int(5))
-def main(args: Array[String]): unit = {
+def main(args: Array[String]): Unit = {
val cell = Cell[Int](Int(6))
Console.println(cell)
f[Int](new IntTerm(Int(5)), cell)
diff --git a/test/files/neg/implicits.check b/test/files/neg/implicits.check
index d94e1f27f2..337560f423 100644
--- a/test/files/neg/implicits.check
+++ b/test/files/neg/implicits.check
@@ -3,7 +3,7 @@ implicits.scala:21: error: type mismatch;
required: ?{val +: ?}
Note that implicit conversions are not applicable because they are ambiguous:
both method any2plus in object Sub of type (x: Any)Sub.Plus
- and method pos2int in object Super of type (p: Pos)int
+ and method pos2int in object Super of type (p: Pos)Int
are possible conversion functions from Pos to ?{val +: ?}
f(p+1)
^
diff --git a/test/files/neg/implicits.scala b/test/files/neg/implicits.scala
index be85029660..846591e22d 100644
--- a/test/files/neg/implicits.scala
+++ b/test/files/neg/implicits.scala
@@ -3,7 +3,7 @@ class Pos
class Super
object Super {
- implicit def pos2int(p: Pos): int = 0
+ implicit def pos2int(p: Pos): Int = 0
}
object Sub extends Super {
@@ -17,7 +17,7 @@ object Test {
import Super._
import Sub._
val p = new Pos
- def f(x: int): int = x
+ def f(x: Int): Int = x
f(p+1)
}
diff --git a/test/files/neg/overload.check b/test/files/neg/overload.check
index 0faa97adb1..abfabaf3f2 100644
--- a/test/files/neg/overload.check
+++ b/test/files/neg/overload.check
@@ -1,6 +1,6 @@
overload.scala:10: error: ambiguous reference to overloaded definition,
both method f in class D of type (x: Any)Unit
-and method f in class C of type (x: int)Unit
+and method f in class C of type (x: Int)Unit
match argument types (Int)
(new D).f(1)
^
diff --git a/test/files/neg/overload.scala b/test/files/neg/overload.scala
index 311ea3874b..6ad911e90e 100644
--- a/test/files/neg/overload.scala
+++ b/test/files/neg/overload.scala
@@ -1,5 +1,5 @@
class C {
- def f(x: int) {}
+ def f(x: Int) {}
}
class D extends C {
diff --git a/test/files/neg/t0218.scala b/test/files/neg/t0218.scala
index 282e85e814..319be82a7a 100644
--- a/test/files/neg/t0218.scala
+++ b/test/files/neg/t0218.scala
@@ -6,7 +6,7 @@ trait APQ {
type PP = P
- def pq(numQueens: int, numRows: int) : List[Placement] = {
+ def pq(numQueens: Int, numRows: Int) : List[Placement] = {
List(new PP)
}
}
diff --git a/test/files/neg/t1422.check b/test/files/neg/t1422.check
new file mode 100644
index 0000000000..5931fcb049
--- /dev/null
+++ b/test/files/neg/t1422.check
@@ -0,0 +1,4 @@
+t1422.scala:1: error: private[this] not allowed for case class parameters
+case class A(private[this] val foo:String)
+ ^
+one error found
diff --git a/test/files/neg/t1422.scala b/test/files/neg/t1422.scala
new file mode 100644
index 0000000000..751f05a764
--- /dev/null
+++ b/test/files/neg/t1422.scala
@@ -0,0 +1 @@
+case class A(private[this] val foo:String)
diff --git a/test/files/neg/t1477.check b/test/files/neg/t1477.check
new file mode 100644
index 0000000000..e497637857
--- /dev/null
+++ b/test/files/neg/t1477.check
@@ -0,0 +1,5 @@
+t1477.scala:13: error: overriding type V in trait C with bounds >: Nothing <: Middle.this.D;
+ type V is a volatile type; cannot override a type with non-volatile upper bound
+ type V <: (D with U)
+ ^
+one error found
diff --git a/test/files/neg/t1477.scala b/test/files/neg/t1477.scala
new file mode 100644
index 0000000000..0cc0cd5f7a
--- /dev/null
+++ b/test/files/neg/t1477.scala
@@ -0,0 +1,25 @@
+object Test extends Application {
+ trait A
+ trait B extends A
+
+ trait C {
+ type U
+ trait D { type T >: B <: A }
+ type V <: D
+ val y: V#T = new B { }
+ }
+
+ trait Middle extends C {
+ type V <: (D with U)
+ }
+
+ class D extends Middle {
+ trait E
+ trait F { type T = E }
+ type U = F
+ def frob(arg : E) : E = arg
+ frob(y)
+ }
+
+ new D
+}
diff --git a/test/files/neg/t2179.check b/test/files/neg/t2179.check
new file mode 100644
index 0000000000..e454e117b5
--- /dev/null
+++ b/test/files/neg/t2179.check
@@ -0,0 +1,9 @@
+t2179.scala:2: error: inferred type arguments [scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]{def sameElements[B >: Any](that: Iterable[B]): Boolean}]; def reverse: scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; protected def thisCollection: Seq[Double]{def companion: scala.collection.generic.GenericCompanion[Seq[Any]]}; def dropRight(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def takeRight(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def slice(start: Int,end: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def take(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def drop(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}}] do not conform to method reduceLeft's type parameter bounds [B >: List[Double]]
+ (Nil:List[List[Double]]).reduceLeft((_: Any, _: Any) => Nil.indices.map(_ => 0d))
+ ^
+t2179.scala:2: error: type mismatch;
+ found : (Any, Any) => scala.collection.immutable.IndexedSeq[Double]
+ required: (scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]{def sameElements[B >: Any](that: Iterable[B]): Boolean}]; def reverse: scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; protected def thisCollection: Seq[Double]{def companion: scala.collection.generic.GenericCompanion[Seq[Any]]}; def dropRight(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def takeRight(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def slice(start: Int,end: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def take(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def drop(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}}, List[Double]) => scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]{def sameElements[B >: Any](that: Iterable[B]): Boolean}]; def reverse: scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; protected def thisCollection: Seq[Double]{def companion: scala.collection.generic.GenericCompanion[Seq[Any]]}; def dropRight(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def takeRight(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def slice(start: Int,end: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def take(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}; def drop(n: Int): scala.collection.immutable.Seq[Double]{def companion: scala.collection.generic.GenericCompanion[scala.collection.immutable.Seq[Any]]}}
+ (Nil:List[List[Double]]).reduceLeft((_: Any, _: Any) => Nil.indices.map(_ => 0d))
+ ^
+two errors found
diff --git a/test/files/neg/t2179.scala b/test/files/neg/t2179.scala
new file mode 100755
index 0000000000..89e22b6e2a
--- /dev/null
+++ b/test/files/neg/t2179.scala
@@ -0,0 +1,3 @@
+object Test {
+ (Nil:List[List[Double]]).reduceLeft((_: Any, _: Any) => Nil.indices.map(_ => 0d))
+}
diff --git a/test/files/neg/t2641.check b/test/files/neg/t2641.check
new file mode 100644
index 0000000000..70123bfc58
--- /dev/null
+++ b/test/files/neg/t2641.check
@@ -0,0 +1,39 @@
+t2641.scala:19: error: illegal cyclic reference involving trait ManagedSeq
+ with TraversableViewLike[A, ManagedSeqStrict[A], ManagedSeq[A]]
+ ^
+t2641.scala:17: error: illegal inheritance;
+ self-type ManagedSeq does not conform to ManagedSeqStrict[A]'s selftype ManagedSeqStrict[A]
+ extends ManagedSeqStrict[A]
+ ^
+t2641.scala:18: error: illegal inheritance;
+ self-type ManagedSeq does not conform to scala.collection.TraversableView[A,ManagedSeqStrict[A]]'s selftype scala.collection.TraversableView[A,ManagedSeqStrict[A]]
+ with TraversableView[A, ManagedSeqStrict[A]]
+ ^
+t2641.scala:19: error: illegal inheritance;
+ self-type ManagedSeq does not conform to scala.collection.TraversableViewLike[A,ManagedSeqStrict[A],<error>]'s selftype scala.collection.TraversableViewLike[A,Coll,This]
+ with TraversableViewLike[A, ManagedSeqStrict[A], ManagedSeq[A]]
+ ^
+t2641.scala:17: error: illegal inheritance;
+ self-type ManagedSeq does not conform to ScalaObject's selftype ScalaObject
+ extends ManagedSeqStrict[A]
+ ^
+t2641.scala:25: error: something is wrong (wrong class file?): trait ManagedSeq with type parameters [A,Coll] gets applied to arguments [], phase = typer
+ trait Transformed[+B] extends ManagedSeq[B, Coll] with super.Transformed[B]
+ ^
+t2641.scala:27: error: something is wrong (wrong class file?): trait ManagedSeq with type parameters [A,Coll] gets applied to arguments [], phase = namer
+ trait Sliced extends Transformed[A] with super.Sliced {
+ ^
+t2641.scala:27: error: illegal inheritance; superclass Any
+ is not a subclass of the superclass ManagedSeqStrict
+ of the mixin trait Transformed
+ trait Sliced extends Transformed[A] with super.Sliced {
+ ^
+t2641.scala:27: error: illegal inheritance; superclass Any
+ is not a subclass of the superclass Object
+ of the mixin trait Sliced
+ trait Sliced extends Transformed[A] with super.Sliced {
+ ^
+t2641.scala:28: error: value managedIterator is not a member of ManagedSeq
+ override def managedIterator = self.managedIterator slice (from, until)
+ ^
+10 errors found
diff --git a/test/files/neg/t2641.scala b/test/files/neg/t2641.scala
new file mode 100644
index 0000000000..5529035f79
--- /dev/null
+++ b/test/files/neg/t2641.scala
@@ -0,0 +1,31 @@
+import scala.collection._
+import scala.collection.generic._
+import scala.collection.mutable.Builder
+
+
+abstract class ManagedSeqStrict[+A]
+ extends Traversable[A]
+ with GenericTraversableTemplate[A, ManagedSeqStrict]
+{
+ override def companion: GenericCompanion[ManagedSeqStrict] = null
+
+ override def foreach[U](f: A => U): Unit =
+ null
+}
+
+trait ManagedSeq[+A, +Coll]
+ extends ManagedSeqStrict[A]
+ with TraversableView[A, ManagedSeqStrict[A]]
+ with TraversableViewLike[A, ManagedSeqStrict[A], ManagedSeq[A]]
+{ self =>
+
+ override def underlying = throw new Exception("no underlying")
+
+ //trait Transformed[+B] extends ManagedSeq[B] with super.Transformed[B]
+ trait Transformed[+B] extends ManagedSeq[B, Coll] with super.Transformed[B]
+
+ trait Sliced extends Transformed[A] with super.Sliced {
+ override def managedIterator = self.managedIterator slice (from, until)
+ }
+
+}
diff --git a/test/files/neg/t771.check b/test/files/neg/t771.check
new file mode 100644
index 0000000000..c0d1e002f8
--- /dev/null
+++ b/test/files/neg/t771.check
@@ -0,0 +1,4 @@
+t771.scala:4: error: trait Iterator is abstract; cannot be instantiated
+ def c[A](it:java.util.Iterator[A]) = new scala.Iterator[A]
+ ^
+one error found
diff --git a/test/files/neg/t771.scala b/test/files/neg/t771.scala
new file mode 100755
index 0000000000..26bf441648
--- /dev/null
+++ b/test/files/neg/t771.scala
@@ -0,0 +1,5 @@
+class Foo {
+ def a = c(b)
+ def b[List[AnyRef]] = new java.util.Iterator[List[Object]] { }
+ def c[A](it:java.util.Iterator[A]) = new scala.Iterator[A]
+}
diff --git a/test/files/neg/viewtest.scala b/test/files/neg/viewtest.scala
index 778e672d91..5e7d624d23 100644
--- a/test/files/neg/viewtest.scala
+++ b/test/files/neg/viewtest.scala
@@ -12,13 +12,13 @@ trait Ordered[+a] {
*/
def compareTo [b >: a <% Ordered[b]](that: b): Int
- def < [b >: a <% Ordered[b]](that: b): boolean = (this compareTo that) < 0
+ def < [b >: a <% Ordered[b]](that: b): Boolean = (this compareTo that) < 0
- def > [b >: a <% Ordered[b]](that: b): boolean = (this compareTo that) > 0
+ def > [b >: a <% Ordered[b]](that: b): Boolean = (this compareTo that) > 0
- def <= [b >: a <% Ordered[b]](that: b): boolean = (this compareTo that) <= 0
+ def <= [b >: a <% Ordered[b]](that: b): Boolean = (this compareTo that) <= 0
- def >= [b >: a <% Ordered[b]](that: b): boolean = (this compareTo that) >= 0
+ def >= [b >: a <% Ordered[b]](that: b): Boolean = (this compareTo that) >= 0
}
@@ -30,9 +30,9 @@ object O {
case _ => -(y compareTo x)
}
}
- implicit def view2(x: char): Ordered[char] = new Ordered[char] {
- def compareTo [b >: char <% Ordered[b]](y: b): Int = y match {
- case y1: char => x - y1
+ implicit def view2(x: Char): Ordered[Char] = new Ordered[Char] {
+ def compareTo [b >: Char <% Ordered[b]](y: b): Int = y match {
+ case y1: Char => x - y1
case _ => -(y compareTo x)
}
}
@@ -106,7 +106,7 @@ object Test {
Console.println(t.elements)
}
{
- var t: Tree[List[char]] = Empty
+ var t: Tree[List[Char]] = Empty
for (s <- args) {
t = t insert toCharList(s)
}
diff --git a/test/files/pos/bug0091.scala b/test/files/pos/bug0091.scala
index 54c821b41c..d491b7cfb9 100644
--- a/test/files/pos/bug0091.scala
+++ b/test/files/pos/bug0091.scala
@@ -1,6 +1,6 @@
class Bug {
def main(args: Array[String]) = {
var msg: String = null; // no bug if "null" instead of "_"
- val f: PartialFunction[Any, unit] = { case 42 => msg = "coucou" };
+ val f: PartialFunction[Any, Unit] = { case 42 => msg = "coucou" };
}
}
diff --git a/test/files/pos/bug1075.scala b/test/files/pos/bug1075.scala
index 936ef72272..0f518b24db 100644
--- a/test/files/pos/bug1075.scala
+++ b/test/files/pos/bug1075.scala
@@ -5,7 +5,7 @@ class Directory(var dir_ : String)
}
dir_ = dir_.replaceAll("/{2,}", "/")
- def this(serialized : Array[byte]) = {
+ def this(serialized : Array[Byte]) = {
this(new String(serialized, "UTF-8"))
}
diff --git a/test/files/pos/bug287.scala b/test/files/pos/bug287.scala
index 81a01951b2..8e5e8831c1 100644
--- a/test/files/pos/bug287.scala
+++ b/test/files/pos/bug287.scala
@@ -1,7 +1,7 @@
object testBuf {
class mystream extends java.io.BufferedOutputStream(new java.io.FileOutputStream("/dev/null")) {
def w( x:String ):Unit = {
- val foo = new Array[byte](2);
+ val foo = new Array[Byte](2);
// write( byte[] ) is defined in FilterOutputStream, the superclass of BufferedOutputStream
super.write( foo ); // error
diff --git a/test/files/pos/collections.scala b/test/files/pos/collections.scala
index 61a25528c7..23b23d016e 100644
--- a/test/files/pos/collections.scala
+++ b/test/files/pos/collections.scala
@@ -2,7 +2,7 @@ package mixins;
import scala.collection.mutable._;
-class Collections extends HashSet[Int] with ObservableSet[Int,Collections] {
+class Collections extends HashSet[Int] with ObservableSet[Int] {
override def +=(elem: Int): this.type = super.+=(elem);
override def -=(elem: Int): this.type = super.-=(elem);
override def clear: Unit = super.clear;
diff --git a/test/files/pos/depexists.scala b/test/files/pos/depexists.scala
new file mode 100644
index 0000000000..d539c844c1
--- /dev/null
+++ b/test/files/pos/depexists.scala
@@ -0,0 +1,5 @@
+object depexists {
+
+ val c: Cell[(a, b)] forSome { type a <: Number; type b <: (a, a) } = null
+ val d = c
+}
diff --git a/test/files/pos/implicits.scala b/test/files/pos/implicits.scala
index aeb6591507..4979835e21 100644
--- a/test/files/pos/implicits.scala
+++ b/test/files/pos/implicits.scala
@@ -1,3 +1,17 @@
+// #1435
+object t1435 {
+ implicit def a(s:String):String = error("")
+ implicit def a(i:Int):String = error("")
+ implicit def b(i:Int):String = error("")
+}
+
+class C1435 {
+ val v:String = {
+ import t1435.a
+ 2
+ }
+}
+
// #1579
object Test1579 {
class Column
@@ -36,3 +50,8 @@ object Test2188 {
val x: java.util.List[String] = List("foo")
}
+
+object TestNumericWidening {
+ val y = 1
+ val x: java.lang.Long = y
+}
diff --git a/test/files/pos/nested2.scala b/test/files/pos/nested2.scala
index 302688a0ef..421ea6facf 100644
--- a/test/files/pos/nested2.scala
+++ b/test/files/pos/nested2.scala
@@ -5,5 +5,5 @@ class C[A] {
object Test {
val x = new C[String]
- val y: C[String]#D[int] = new x.D[int]
+ val y: C[String]#D[Int] = new x.D[Int]
}
diff --git a/test/files/pos/switchUnbox.scala b/test/files/pos/switchUnbox.scala
index a97bff5521..4f5467de29 100644
--- a/test/files/pos/switchUnbox.scala
+++ b/test/files/pos/switchUnbox.scala
@@ -2,7 +2,7 @@
// that contains -Xsqueeze:on
//
object Foo {
- var xyz: (int, String) = (1, "abc")
+ var xyz: (Int, String) = (1, "abc")
xyz._1 match {
case 1 => Console.println("OK")
case 2 => Console.println("OK")
diff --git a/test/files/pos/t1164.scala b/test/files/pos/t1164.scala
index 3acda88ba9..b238bf54d9 100644
--- a/test/files/pos/t1164.scala
+++ b/test/files/pos/t1164.scala
@@ -15,7 +15,7 @@ object test {
// Try the same thing as above but use function as arguemnt to Bar
// constructor
- type FunIntToA [a] = (int) => a
+ type FunIntToA [a] = (Int) => a
class Bar[a] (var f: FunIntToA[a])
object Bar {
diff --git a/test/files/pos/t1226.scala b/test/files/pos/t1226.scala
new file mode 100644
index 0000000000..0af21cbb61
--- /dev/null
+++ b/test/files/pos/t1226.scala
@@ -0,0 +1,8 @@
+package graphs;
+
+abstract class Graph (private[graphs] val mappings : Any){
+}
+
+class Nodes (mappings : Any) extends Graph(mappings) {
+ mappings.toString;
+}
diff --git a/test/files/pos/t1236.scala b/test/files/pos/t1236.scala
new file mode 100644
index 0000000000..5e221ce411
--- /dev/null
+++ b/test/files/pos/t1236.scala
@@ -0,0 +1,14 @@
+trait Empty[E[_]] {
+ def e[A]: E[A]
+}
+
+object T {
+ val ListEmpty = new Empty[List] {
+ def e[A] = Nil
+ }
+
+ def foo[F[_]](q:(String,String)) = "hello"
+ def foo[F[_]](e: Empty[F]) = "world"
+
+ val x = foo[List](ListEmpty)
+} \ No newline at end of file
diff --git a/test/files/pos/t1422.scala b/test/files/pos/t1422.scala
new file mode 100644
index 0000000000..658f5c730d
--- /dev/null
+++ b/test/files/pos/t1422.scala
@@ -0,0 +1,2 @@
+case class A(private val foo:String)
+case class B(protected[this] val foo:String)
diff --git a/test/files/pos/t1459/AbstractBase.java b/test/files/pos/t1459/AbstractBase.java
new file mode 100755
index 0000000000..492419416c
--- /dev/null
+++ b/test/files/pos/t1459/AbstractBase.java
@@ -0,0 +1,5 @@
+package base;
+
+public abstract class AbstractBase {
+ public abstract void doStuff(String... params); // !!! was Object..
+} \ No newline at end of file
diff --git a/test/files/pos/t1459/App.scala b/test/files/pos/t1459/App.scala
new file mode 100755
index 0000000000..651b285b17
--- /dev/null
+++ b/test/files/pos/t1459/App.scala
@@ -0,0 +1,18 @@
+package foo
+import base._
+
+object App extends Application {
+ class Concrete extends AbstractBase {
+ override def doStuff(params:java.lang.String*): Unit = println("doStuff invoked")
+ }
+
+ val impl = new Concrete
+
+ //succeeds
+ impl.doStuff(null)
+
+ val caller = new Caller
+
+ // fails with AbstractMethodError
+ caller.callDoStuff(impl)
+}
diff --git a/test/files/pos/t1459/Caller.java b/test/files/pos/t1459/Caller.java
new file mode 100755
index 0000000000..4ae51d8c57
--- /dev/null
+++ b/test/files/pos/t1459/Caller.java
@@ -0,0 +1,7 @@
+package base;
+
+public class Caller {
+ public void callDoStuff(AbstractBase impl) {
+ impl.doStuff("abc"); // was new Object());
+ }
+} \ No newline at end of file
diff --git a/test/pending/neg/t1545.scala b/test/files/pos/t1545.scala
index d7c0245725..d7c0245725 100755
--- a/test/pending/neg/t1545.scala
+++ b/test/files/pos/t1545.scala
diff --git a/test/files/pos/t2484.scala b/test/files/pos/t2484.scala
new file mode 100755
index 0000000000..6990c46099
--- /dev/null
+++ b/test/files/pos/t2484.scala
@@ -0,0 +1,17 @@
+class Admin extends javax.swing.JApplet {
+ val jScrollPane = new javax.swing.JScrollPane (null, 0, 0)
+ def bug2484: Unit = {
+ scala.concurrent.ops.spawn {jScrollPane.synchronized {
+ def someFunction () = {}
+ //scala.concurrent.ops.spawn {someFunction ()}
+ jScrollPane.addComponentListener (new java.awt.event.ComponentAdapter {override def componentShown (e: java.awt.event.ComponentEvent) = {
+ someFunction (); jScrollPane.removeComponentListener (this)}})
+ }}
+ }
+}
+// t2630.scala
+object Test {
+ def meh(xs: List[Any]) {
+ xs map { x => (new AnyRef {}) }
+ }
+}
diff --git a/test/files/pos/t2635.scala b/test/files/pos/t2635.scala
new file mode 100755
index 0000000000..7cd5531356
--- /dev/null
+++ b/test/files/pos/t2635.scala
@@ -0,0 +1,16 @@
+abstract class Base
+
+object Test
+{
+ def run(c: Class[_ <: Base]): Unit = {
+ }
+
+ def main(args: Array[String]): Unit =
+ {
+ val sc: Option[Class[_ <: Base]] = Some(classOf[Base])
+ sc match {
+ case Some(c) => run(c)
+ case None =>
+ }
+ }
+}
diff --git a/test/files/pos/t2664.scala b/test/files/pos/t2664.scala
new file mode 100644
index 0000000000..7b667d0106
--- /dev/null
+++ b/test/files/pos/t2664.scala
@@ -0,0 +1,9 @@
+package pkg1 {
+ class C {
+ private[pkg1] def foo: Int = 1
+ }
+
+ trait T extends C {
+ private[pkg1] abstract override def foo = super.foo + 1
+ }
+}
diff --git a/test/files/pos/t2665.scala b/test/files/pos/t2665.scala
new file mode 100644
index 0000000000..3163e31326
--- /dev/null
+++ b/test/files/pos/t2665.scala
@@ -0,0 +1,3 @@
+object Test {
+ val x: Unit = Array("")
+} \ No newline at end of file
diff --git a/test/files/pos/t2667.scala b/test/files/pos/t2667.scala
new file mode 100644
index 0000000000..b214cc7f37
--- /dev/null
+++ b/test/files/pos/t2667.scala
@@ -0,0 +1,6 @@
+object A {
+ def foo(x: Int, y: Int*): Int = 45
+ def foo[T](x: T*): Int = 55
+
+ val x: Unit = foo(23, 23f)
+} \ No newline at end of file
diff --git a/test/files/pos/t2669.scala b/test/files/pos/t2669.scala
new file mode 100644
index 0000000000..72e931178c
--- /dev/null
+++ b/test/files/pos/t2669.scala
@@ -0,0 +1,28 @@
+// #2629, #2639, #2669
+object Test2669 {
+
+ def test[T](l: java.util.ArrayList[_ <: T]) = 1
+ test(new java.util.ArrayList[String]())
+
+}
+
+import java.util.ArrayList
+
+object Test2629 {
+ def main(args: Array[String]): Unit = {
+ val l = new ArrayList[String](1)
+ val m = new ArrayList(l)
+
+ println(l.size)
+ println(m.size)
+ }
+}
+
+
+import java.util.Vector
+
+// scalac cannot detect lack of type params, but then throws AssertionError later:
+class TVector2639 {
+ val b = new Vector // this line passed without error detected
+ val a = new Vector(1) // this line caused throwing AssertionError when scalac
+}
diff --git a/test/files/pos/t2698.scala b/test/files/pos/t2698.scala
new file mode 100644
index 0000000000..0e2662de61
--- /dev/null
+++ b/test/files/pos/t2698.scala
@@ -0,0 +1,10 @@
+import scala.collection._
+import scala.util.regexp._
+
+abstract class S2 {
+ val lang: WordExp
+ type __labelT = lang._labelT
+
+ var deltaq: Array[__labelT] = _
+ def delta1 = immutable.Map(deltaq.zipWithIndex: _*)
+}
diff --git a/test/files/run/Course-2002-09.scala b/test/files/run/Course-2002-09.scala
index fac39e0841..384a91efd8 100644
--- a/test/files/run/Course-2002-09.scala
+++ b/test/files/run/Course-2002-09.scala
@@ -81,7 +81,7 @@ class Constant(q: Quantity, v: Double) extends Constraint {
class Probe(name: String, q: Quantity) extends Constraint {
def newValue: Unit = printProbe(q.getValue);
def dropValue: Unit = printProbe(None);
- private def printProbe(v: Option[double]) {
+ private def printProbe(v: Option[Double]) {
val vstr = v match {
case Some(x) => x.toString()
case None => "?"
@@ -103,7 +103,7 @@ class Quantity() {
if (v != v1) error("Error! contradiction: " + v + " and " + v1);
case None =>
informant = setter; value = Some(v);
- for (val c <- constraints; !(c == informant)) {
+ for (c <- constraints; if !(c == informant)) {
c.newValue;
}
}
@@ -112,7 +112,7 @@ class Quantity() {
def forgetValue(retractor: Constraint): Unit = {
if (retractor == informant) {
value = None;
- for (val c <- constraints; !(c == informant)) c.dropValue;
+ for (c <- constraints; if !(c == informant)) c.dropValue;
}
}
def forgetValue: Unit = forgetValue(NoConstraint);
@@ -258,7 +258,7 @@ object M2 {
};
}
- def show(x: Option[int], y: Option[Int], z: Option[int]) = {
+ def show(x: Option[Int], y: Option[Int], z: Option[Int]) = {
Console.print("a = " +set(a,x)+ ", b = " +set(b,y)+ ", c = " +set(c,z));
Console.println(" => " + a.str + " * " + b.str + " = " + c.str);
a.forgetValue; b.forgetValue; c.forgetValue;
diff --git a/test/files/run/SymbolsTest.scala b/test/files/run/SymbolsTest.scala
new file mode 100644
index 0000000000..53caa5e62f
--- /dev/null
+++ b/test/files/run/SymbolsTest.scala
@@ -0,0 +1,283 @@
+
+
+
+
+class Slazz {
+ val s1 = 'myFirstSymbol
+ val s2 = 'mySecondSymbol
+ def s3 = 'myThirdSymbol
+ var s4: Symbol = null
+
+ s4 = 'myFourthSymbol
+}
+
+class Base {
+ val basesymbol = 'symbase
+}
+
+class Sub extends Base {
+ val subsymbol = 'symsub
+}
+
+trait Signs {
+ val ind = 'indication
+ val trace = 'trace
+}
+
+trait Lazy1 {
+ lazy val v1 = "lazy v1"
+ lazy val s1 = 'lazySymbol1
+}
+
+trait Lazy2 {
+ lazy val v2 = "lazy v2"
+ lazy val s2 = 'lazySymbol2
+}
+
+trait Lazy3 {
+ lazy val v3 = "lazy v3"
+ lazy val s3 = 'lazySymbol3
+}
+
+object SingletonOfLazyness {
+ lazy val lazysym = 'lazySymbol
+ lazy val another = 'another
+ lazy val lastone = 'lastone
+}
+
+/*
+ * Tests symbols to see if they work correct.
+ */
+object Test {
+ class Inner {
+ val simba = 'smba
+ var mfs: Symbol = null
+ mfs = Symbol("mfsa")
+ }
+
+ object InnerObject {
+ val o1 = 'aaa
+ val o2 = 'ddd
+ }
+
+ def aSymbol = 'myFirstSymbol
+ val anotherSymbol = 'mySecondSymbol
+
+ def main(args: Array[String]) {
+ testLiterals
+ testForLoop
+ testInnerClasses
+ testInnerObjects
+ testWithHashMaps
+ testLists
+ testAnonymous
+ testNestedObject
+ testInheritance
+ testTraits
+ testLazyTraits
+ testLazyObjects
+ }
+
+ def testLiterals {
+ val scl = new Slazz
+ assert(scl.s1 == aSymbol)
+ assert(scl.s2 == anotherSymbol)
+ assert(scl.s3 == 'myThirdSymbol)
+ assert(scl.s4 == Symbol.apply("myFourthSymbol"))
+ assert(scl.s1 == Symbol("myFirstSymbol"))
+ }
+
+ def testForLoop {
+ for (i <- 0 until 100) List("Val" + i)
+ }
+
+ def testInnerClasses {
+ val innerPower = new Inner
+ assert(innerPower.simba == 'smba)
+ assert(innerPower.mfs == 'mfsa)
+ }
+
+ def testInnerObjects {
+ assert(InnerObject.o1 == 'aaa)
+ assert(InnerObject.o2 == 'ddd)
+ }
+
+ def testWithHashMaps {
+ val map = new collection.mutable.HashMap[Symbol, Symbol]
+ map.put(InnerObject.o1, 'smba)
+ map.put(InnerObject.o2, 'mfsa)
+ map.put(Symbol("WeirdKey" + 1), Symbol("Weird" + "Val" + 1))
+ assert(map('aaa) == 'smba)
+ assert(map('ddd) == 'mfsa)
+ assert(map('WeirdKey1) == Symbol("WeirdVal1"))
+
+ map.clear
+ for (i <- 0 until 100) map.put(Symbol("symKey" + i), Symbol("symVal" + i))
+ assert(map(Symbol("symKey15")) == Symbol("symVal15"))
+ assert(map('symKey22) == 'symVal22)
+ assert(map('symKey73) == 'symVal73)
+ assert(map('symKey56) == 'symVal56)
+ assert(map('symKey91) == 'symVal91)
+ }
+
+ def testLists {
+ var lst: List[Symbol] = Nil
+ for (i <- 0 until 100) lst ::= Symbol("lsym" + (99 - i))
+ assert(lst(0) == 'lsym0)
+ assert(lst(10) == 'lsym10)
+ assert(lst(30) == 'lsym30)
+ assert(lst(40) == 'lsym40)
+ assert(lst(65) == 'lsym65)
+ assert(lst(90) == 'lsym90)
+ }
+
+ def testAnonymous { // TODO complaints classdef can't be found for some reason, runs fine in my case
+ // val anon = () => {
+ // val simba = 'smba
+ // simba
+ // }
+ // val an2 = () => {
+ // object nested {
+ // val m = 'mfsa
+ // }
+ // nested.m
+ // }
+ // val an3 = () => {
+ // object nested {
+ // val f = () => {
+ // 'layered
+ // }
+ // def gets = f()
+ // }
+ // nested.gets
+ // }
+ // val inner = new Inner
+ // assert(anon() == inner.simba)
+ // assert(anon().toString == "'smba")
+ // assert(an2() == 'mfsa)
+ // assert(an3() == Symbol("layered" + ""))
+ }
+
+ def testNestedObject {
+ object nested {
+ def sign = 'sign
+ def insignia = 'insignia
+ }
+ assert(nested.sign == 'sign)
+ assert(nested.insignia == 'insignia)
+ assert(('insignia).toString == "'insignia")
+ }
+
+ def testInheritance {
+ val base = new Base
+ val sub = new Sub
+ assert(base.basesymbol == 'symbase)
+ assert(sub.subsymbol == 'symsub)
+ assert(sub.basesymbol == 'symbase)
+
+ val anon = new Sub {
+ def subsubsymbol = 'symsubsub
+ }
+ assert(anon.subsubsymbol == 'symsubsub)
+ assert(anon.subsymbol == 'symsub)
+ assert(anon.basesymbol == 'symbase)
+
+ object nested extends Sub {
+ def objsymbol = 'symobj
+ }
+ assert(nested.objsymbol == 'symobj)
+ assert(nested.subsymbol == 'symsub)
+ assert(nested.basesymbol == 'symbase)
+ assert(('symbase).toString == "'symbase")
+ }
+
+ def testTraits {
+ val fromTrait = new AnyRef with Signs {
+ def traitsymbol = 'traitSymbol
+ }
+
+ assert(fromTrait.traitsymbol == 'traitSymbol)
+ assert(fromTrait.ind == 'indication)
+ assert(fromTrait.trace == 'trace)
+ assert(('trace).toString == "'trace")
+
+ trait Compl {
+ val s1 = 's1
+ def s2 = 's2
+ object inner {
+ val s3 = 's3
+ val s4 = 's4
+ }
+ }
+
+ val compl = new Sub with Signs with Compl
+ assert(compl.s1 == 's1)
+ assert(compl.s2 == 's2)
+ assert(compl.inner.s3 == 's3)
+ assert(compl.inner.s4 == 's4)
+ assert(compl.ind == 'indication)
+ assert(compl.trace == 'trace)
+ assert(compl.subsymbol == 'symsub)
+ assert(compl.basesymbol == 'symbase)
+
+ object Local extends Signs with Compl {
+ val s5 = 's5
+ def s6 = 's6
+ object inner2 {
+ val s7 = 's7
+ def s8 = 's8
+ }
+ }
+ assert(Local.s5 == 's5)
+ assert(Local.s6 == 's6)
+ assert(Local.inner2.s7 == 's7)
+ assert(Local.inner2.s8 == 's8)
+ assert(Local.inner.s3 == 's3)
+ assert(Local.inner.s4 == 's4)
+ assert(Local.s1 == 's1)
+ assert(Local.s2 == 's2)
+ assert(Local.trace == 'trace)
+ assert(Local.ind == 'indication)
+ assert(('s8).toString == "'s8")
+ }
+
+ def testLazyTraits {
+ val l1 = new AnyRef with Lazy1
+ val l2 = new AnyRef with Lazy2
+ val l3 = new AnyRef with Lazy3
+
+ l1.v1
+ l2.v2
+ l3.v3
+ assert((l1.s1).toString == "'lazySymbol1")
+ assert(l2.s2 == Symbol("lazySymbol" + 2))
+ assert(l3.s3 == 'lazySymbol3)
+ }
+
+ def testLazyObjects {
+ assert(SingletonOfLazyness.lazysym == 'lazySymbol)
+ assert(SingletonOfLazyness.another == Symbol("ano" + "ther"))
+ assert((SingletonOfLazyness.lastone).toString == "'lastone")
+
+ object nested {
+ lazy val sym1 = 'snested1
+ lazy val sym2 = 'snested2
+ }
+
+ assert(nested.sym1 == 'snested1)
+ assert(nested.sym2 == Symbol("snested" + "2"))
+ }
+
+}
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/test/files/run/bug2552.check b/test/files/run/bug2552.check
new file mode 100644
index 0000000000..774e360d13
--- /dev/null
+++ b/test/files/run/bug2552.check
@@ -0,0 +1,49 @@
+p(0)
+0
+p(1)
+1
+p(2)
+2
+p(3)
+3
+p(4)
+4
+p(5)
+5
+p(6)
+6
+p(7)
+7
+p(8)
+8
+p(9)
+9
+p(10)
+p(0)
+true
+true
+0
+p(1)
+true
+1
+p(2)
+false
+p(2)
+false
+p(0)
+true
+true
+0
+p(1)
+p(2)
+2
+p(3)
+p(4)
+4
+p(5)
+p(6)
+6
+p(7)
+p(8)
+8
+p(9)
diff --git a/test/files/run/bug2552.scala b/test/files/run/bug2552.scala
new file mode 100644
index 0000000000..911d98decc
--- /dev/null
+++ b/test/files/run/bug2552.scala
@@ -0,0 +1,34 @@
+object Test extends Application {
+ def testTakeWhile = {
+ val numbers = Iterator.range(0, 50)
+ val zeroTo9 = numbers.takeWhile(x => { println("p(" + x + ")"); x < 10 } )
+
+ zeroTo9.foreach(println _)
+
+ val zeroTo1 = Iterator.range(0, 20).takeWhile(x => { println("p(" + x + ")"); x < 2 } )
+
+ println(zeroTo1.hasNext)
+ println(zeroTo1.hasNext)
+ println(zeroTo1.next)
+ println(zeroTo1.hasNext)
+ println(zeroTo1.next)
+ println(zeroTo1.hasNext)
+ println(zeroTo1.hasNext)
+ }
+
+ def testFilter = {
+ val predicate = (x: Int) => { println("p(" + x + ")"); x % 2 == 0 }
+
+ val evens = Iterator.range(0, 10).filter(predicate)
+
+ println(evens.hasNext)
+ println(evens.hasNext)
+ println(evens.next)
+
+ evens.foreach(println _)
+ }
+
+ testTakeWhile
+ testFilter
+}
+
diff --git a/test/files/run/bug2636.scala b/test/files/run/bug2636.scala
new file mode 100644
index 0000000000..8c49a733fd
--- /dev/null
+++ b/test/files/run/bug2636.scala
@@ -0,0 +1,35 @@
+object Test
+{
+ type Foo = { def update(x: Int, value: String): Unit }
+ type Foo2 = { def update(x: Int, value: String): Int }
+ type Foo3 = { def update(x: Int, value: String): Array[Int] }
+
+ def alen() = {
+ type L1 = { def length: Int }
+ def len(p: L1) = p.length
+ val x: L1 = Array(1,2,3)
+ len(x)
+ }
+
+ type A1 = { def apply(x: Int): String }
+ def arrApply(a: A1, x: Int) = a(x)
+
+ def main(args: Array[String]): Unit = {
+ val arr = new Array[String](3)
+ val p1: Foo = arr
+ def a1 = p1(0) = "b"
+
+ val p2: Foo2 = new { def update(x: Int, value: String) = { p1(1) = "o" ; 1 } }
+ def a2 = p2(0) = "c"
+
+ val p3: Foo3 = new { def update(x: Int, value: String) = { p1(2) = "b" ; Array(1) } }
+ def a3 = p3(10) = "hi mom"
+
+ a1 ; a2 ; a3 ;
+
+ assert(arr.mkString == "bob")
+ assert(alen() == 3)
+ assert(arrApply(arr, 1) == "o")
+ assert(arrApply(new { def apply(x: Int) = "tom" }, -100) == "tom")
+ }
+} \ No newline at end of file
diff --git a/test/files/run/bug627.scala b/test/files/run/bug627.scala
index 6415694ffe..ecaf150741 100644
--- a/test/files/run/bug627.scala
+++ b/test/files/run/bug627.scala
@@ -1,6 +1,6 @@
object Test {
def main(args: Array[String]) {
- val s: Seq[int] = Array(1, 2, 3, 4)
+ val s: Seq[Int] = Array(1, 2, 3, 4)
println(s)
}
}
diff --git a/test/files/run/bugs2087-and-2400.scala b/test/files/run/bugs2087-and-2400.scala
new file mode 100644
index 0000000000..19a5df26e3
--- /dev/null
+++ b/test/files/run/bugs2087-and-2400.scala
@@ -0,0 +1,20 @@
+object Test
+{
+ def negativeCharMaker = new (Short => Char) { def apply(x: Short) = x.toChar }
+ def main(args: Array[String]): Unit = {
+ // throws exception if -100 gets to Character.valueOf
+ val x = negativeCharMaker(-100)
+
+ // chars are unsigned, they should never be equal to negative values
+ assert((-100).toShort != (-100).toChar)
+ assert((-100).toChar != (-100).toShort)
+ assert((-100).toChar != (-100).toByte)
+ assert((-100).toByte != (-100).toChar)
+
+ // BoxesRunTime must agree as well
+ assert(((-100).toShort: Any) != (-100).toChar)
+ assert(((-100).toChar: Any) != (-100).toShort)
+ assert(((-100).toChar: Any) != (-100).toByte)
+ assert(((-100).toByte: Any) != (-100).toChar)
+ }
+}
diff --git a/test/files/run/priorityQueue.scala b/test/files/run/priorityQueue.scala
index 9f453788fc..20f7a3cb44 100644
--- a/test/files/run/priorityQueue.scala
+++ b/test/files/run/priorityQueue.scala
@@ -1,24 +1,346 @@
+
+
+import scala.collection.mutable.PriorityQueue
+
+
+
+
+
+
// populate a priority queue a few different ways and make sure they all seem equal
-object Test extends Application {
- import scala.collection.mutable.PriorityQueue
- import scala.util.Random.nextInt
- val pq1 = new PriorityQueue[String]
- val pq2 = new PriorityQueue[String]
- val pq3 = new PriorityQueue[String]
- val pq4 = new PriorityQueue[String]
+object Test {
+
+ def main(args: Array[String]) {
+ testInsertionsAndEqualities
+ testIntensiveEnqueueDequeue
+ testIndexing
+ testTails
+ testInits
+ testFilters
+ testDrops
+ testUpdates
+ testEquality
+ testMisc
+ testReverse
+ }
+
+ def testInsertionsAndEqualities {
+ import scala.util.Random.nextInt
+ val pq1 = new PriorityQueue[String]
+ val pq2 = new PriorityQueue[String]
+ val pq3 = new PriorityQueue[String]
+ val pq4 = new PriorityQueue[String]
+
+ val strings = (1 to 20).toList map (i => List.fill((Math.abs(nextInt % 20)) + 1)("x").mkString)
+
+ pq1 ++= strings
+ pq2 ++= strings.reverse
+ for (s <- strings) pq3 += s
+ for (s <- strings.reverse) pq4 += s
+
+ val pqs = List(pq1, pq2, pq3, pq4, pq1.clone, pq2.clone)
+
+ for (queue1 <- pqs ; queue2 <- pqs) {
+ assert(queue1 == queue2)
+ assert(queue1.max == queue2.max)
+ }
+
+ assertPriority(pq1)
+ }
+
+ def testIndexing {
+ val pq = new PriorityQueue[Char]
+ "The quick brown fox jumps over the lazy dog".foreach(pq += _)
+
+ // val iter = pq.iterator
+ // while (iter.hasNext) println("`" + iter.next + "`")
+ assert(pq(0) == 'z')
+ assert(pq(1) == 'y')
+ assert(pq(2) == 'x')
+ assert(pq(3) == 'w')
+ assert(pq(4) == 'v')
+ assert(pq(5) == 'u')
+ assert(pq(7) == 't')
+ assert(pq(8) == 's')
+ assert(pq(9) == 'r')
+ assert(pq(10) == 'r')
+
+ pq.clear
+ "abcdefghijklmnopqrstuvwxyz".foreach(pq += _)
+ for (i <- 0 until 26) assert(pq(i) == ('z' - i))
+
+ val intpq = new PriorityQueue[Int]
+ val intlst = new collection.mutable.ArrayBuffer ++ (0 until 100)
+ val random = new util.Random(101)
+ while (intlst.nonEmpty) {
+ val idx = random.nextInt(intlst.size)
+ intpq += intlst(idx)
+ intlst.remove(idx)
+ }
+ for (i <- 0 until 100) assert(intpq(i) == (99 - i))
+ }
+
+ def testTails {
+ val pq = new PriorityQueue[Int]
+ for (i <- 0 until 10) pq += i * 4321 % 200
+
+ assert(pq.size == 10)
+ assert(pq.nonEmpty)
+
+ val tailpq = pq.tail
+ // pq.printstate
+ // tailpq.printstate
+ assert(tailpq.size == 9)
+ assert(tailpq.nonEmpty)
+ assertPriorityDestructive(tailpq)
+ }
+
+ def assertPriorityDestructive[A](pq: PriorityQueue[A])(implicit ord: Ordering[A]) {
+ import ord._
+ var prev: A = null.asInstanceOf[A]
+ while (pq.nonEmpty) {
+ val curr = pq.dequeue
+ if (prev != null) assert(curr <= prev)
+ prev = curr
+ }
+ }
+
+ def assertPriority[A](pq: PriorityQueue[A])(implicit ord: Ordering[A]) {
+ import ord._
+ var prev: A = null.asInstanceOf[A]
+ val iter = pq.iterator
+ while (iter.hasNext) {
+ val curr = iter.next
+ if (prev != null) assert(curr <= prev)
+ prev = curr
+ }
+ }
+
+ def testInits {
+ val pq = new PriorityQueue[Long]
+ for (i <- 0 until 20) pq += (i + 313) * 111 % 300
+
+ assert(pq.size == 20)
+
+ val initpq = pq.init
+ assert(initpq.size == 19)
+ assertPriorityDestructive(initpq)
+ }
+
+ def testFilters {
+ val pq = new PriorityQueue[String]
+ for (i <- 0 until 100) pq += "Some " + (i * 312 % 200)
+
+ val filpq = pq.filter(_.indexOf('0') != -1)
+ assertPriorityDestructive(filpq)
+ }
+
+ def testIntensiveEnqueueDequeue {
+ val pq = new PriorityQueue[Int]
+
+ testIntensive(1000, pq)
+ pq.clear
+ testIntensive(200, pq)
+ }
+
+ def testIntensive(sz: Int, pq: PriorityQueue[Int]) {
+ val lst = new collection.mutable.ArrayBuffer[Int] ++ (0 until sz)
+ val rand = new util.Random(7)
+ while (lst.nonEmpty) {
+ val idx = rand.nextInt(lst.size)
+ pq.enqueue(lst(idx))
+ lst.remove(idx)
+ if (rand.nextDouble < 0.25 && pq.nonEmpty) pq.dequeue
+ assertPriority(pq)
+ }
+ }
+
+ def testDrops {
+ val pq = new PriorityQueue[Int]
+ pq ++= (0 until 100)
+ val droppq = pq.drop(50)
+ assertPriority(droppq)
+
+ pq.clear
+ pq ++= droppq
+ assertPriorityDestructive(droppq)
+ assertPriority(pq)
+ assertPriorityDestructive(pq)
+ }
+
+ def testUpdates {
+ val pq = new PriorityQueue[Int]
+ pq ++= (0 until 36)
+ assertPriority(pq)
+
+ pq(0) = 100
+ assert(pq(0) == 100)
+ assert(pq.dequeue == 100)
+ assertPriority(pq)
+
+ pq.clear
+
+ pq ++= (1 to 100)
+ pq(5) = 200
+ assert(pq(0) == 200)
+ assert(pq(1) == 100)
+ assert(pq(2) == 99)
+ assert(pq(3) == 98)
+ assert(pq(4) == 97)
+ assert(pq(5) == 96)
+ assert(pq(6) == 94)
+ assert(pq(7) == 93)
+ assert(pq(98) == 2)
+ assert(pq(99) == 1)
+ assertPriority(pq)
+
+ pq(99) = 450
+ assert(pq(0) == 450)
+ assert(pq(1) == 200)
+ assert(pq(99) == 2)
+ assertPriority(pq)
+
+ pq(1) = 0
+ assert(pq(1) == 100)
+ assert(pq(99) == 0)
+ assertPriority(pq)
+ assertPriorityDestructive(pq)
+ }
+
+ def testEquality {
+ val pq1 = new PriorityQueue[Int]
+ val pq2 = new PriorityQueue[Int]
+
+ pq1 ++= (0 until 50)
+ var i = 49
+ while (i >= 0) {
+ pq2 += i
+ i -= 1
+ }
+ assert(pq1 == pq2)
+ assertPriority(pq2)
+
+ pq1 += 100
+ assert(pq1 != pq2)
+ pq2 += 100
+ assert(pq1 == pq2)
+ pq2 += 200
+ assert(pq1 != pq2)
+ pq1 += 200
+ assert(pq1 == pq2)
+ assertPriorityDestructive(pq1)
+ assertPriorityDestructive(pq2)
+ }
+
+ def testMisc {
+ val pq = new PriorityQueue[Int]
+ pq ++= (0 until 100)
+ assert(pq.size == 100)
+
+ val (p1, p2) = pq.partition(_ < 50)
+ assertPriorityDestructive(p1)
+ assertPriorityDestructive(p2)
+
+ val spq = pq.slice(25, 75)
+ assertPriorityDestructive(spq)
+
+ pq.clear
+ pq ++= (0 until 10)
+ pq += 5
+ assert(pq.size == 11)
+
+ val ind = pq.lastIndexWhere(_ == 5)
+ assert(ind == 5)
+ assertPriorityDestructive(pq)
+
+ pq.clear
+ pq ++= (0 until 10)
+ assert(pq.lastIndexWhere(_ == 9) == 0)
+ assert(pq.lastIndexOf(8) == 1)
+ assert(pq.lastIndexOf(7) == 2)
+
+ pq += 5
+ pq += 9
+ assert(pq.lastIndexOf(9) == 1)
+ assert(pq.lastIndexWhere(_ % 2 == 1) == 10)
+ assert(pq.lastIndexOf(5) == 6)
+
+ val lst = pq.reverseIterator.toList
+ for (i <- 0 until 5) assert(lst(i) == i)
+ assert(lst(5) == 5)
+ assert(lst(6) == 5)
+ assert(lst(7) == 6)
+ assert(lst(8) == 7)
+ assert(lst(9) == 8)
+ assert(lst(10) == 9)
+ assert(lst(11) == 9)
+
+ pq.clear
+ assert(pq.reverseIterator.toList.isEmpty)
+
+ pq ++= (50 to 75)
+ assert(pq.lastIndexOf(70) == 5)
- val strings = (1 to 20).toList map (i => List.fill((Math.abs(nextInt % 20)) + 1)("x").mkString)
+ pq += 55
+ pq += 70
+ assert(pq.lastIndexOf(70) == 6)
+ assert(pq.lastIndexOf(55) == 22)
+ assert(pq.lastIndexOf(55, 21) == 21)
+ assert(pq.lastIndexWhere(_ > 54) == 22)
+ assert(pq.lastIndexWhere(_ > 54, 21) == 21)
+ assert(pq.lastIndexWhere(_ > 69, 5) == 5)
+ }
+
+ def testReverse {
+ val pq = new PriorityQueue[(Int, Int)]
+ pq ++= (for (i <- 0 until 10) yield (i, i * i % 10))
+
+ assert(pq.reverse.size == pq.reverseIterator.toList.size)
+ assert((pq.reverse zip pq.reverseIterator.toList).forall(p => p._1 == p._2))
+ assert(pq.reverse.sameElements(pq.reverseIterator.toSeq))
+ assert(pq.reverse(0)._1 == pq(9)._1)
+ assert(pq.reverse(1)._1 == pq(8)._1)
+ assert(pq.reverse(4)._1 == pq(5)._1)
+ assert(pq.reverse(9)._1 == pq(0)._1)
- pq1 ++= strings
- pq2 ++= strings.reverse
- for (s <- strings) pq3 += s
- for (s <- strings.reverse) pq4 += s
+ pq += ((7, 7))
+ pq += ((7, 9))
+ pq += ((7, 8))
+ assert(pq.reverse.reverse == pq)
+ assert(pq.reverse.lastIndexWhere(_._2 == 6) == 6)
+ assertPriorityDestructive(pq.reverse.reverse)
- val pqs = List(pq1, pq2, pq3, pq4, pq1.clone, pq2.clone)
+ val iq = new PriorityQueue[Int]
+ iq ++= (0 until 50)
+ assert(iq.reverse == iq.reverseIterator.toSeq)
+ assert(iq.reverse.reverse == iq)
- for (queue1 <- pqs ; queue2 <- pqs) {
- assert(queue1 == queue2)
- assert(queue1.max == queue2.max)
+ iq += 25
+ iq += 40
+ iq += 10
+ assert(iq.reverse == iq.reverseIterator.toList)
+ assert(iq.reverse.reverse == iq)
+ assert(iq.reverse.lastIndexWhere(_ == 10) == 11)
+ assertPriorityDestructive(iq.reverse.reverse)
}
+
}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/test/files/run/randomAccessSeq-apply.scala b/test/files/run/randomAccessSeq-apply.scala
index 863a4d42a2..1f74050bb7 100644
--- a/test/files/run/randomAccessSeq-apply.scala
+++ b/test/files/run/randomAccessSeq-apply.scala
@@ -6,7 +6,7 @@ object Test extends Application {
assert(List(1) == single.toList)
val two = RandomAccessSeq("a", "b")
- assert("a" == two.first)
+ assert("a" == two.head)
assert("b" == two.apply(1))
println("OK")
diff --git a/test/files/run/t1524.scala b/test/files/run/t1524.scala
index ecd90adec7..4f6c65d052 100644
--- a/test/files/run/t1524.scala
+++ b/test/files/run/t1524.scala
@@ -3,5 +3,5 @@ object Test extends Application {
val buf = new scala.collection.mutable.ArrayBuffer[String] { override val initialSize = 0 }
buf += "initial"
buf += "second"
- println(buf.first)
+ println(buf.head)
}
diff --git a/test/files/run/t153.check b/test/files/run/t153.check
index 504fd7fc7f..648a6de7c3 100644
--- a/test/files/run/t153.check
+++ b/test/files/run/t153.check
@@ -1 +1 @@
-Stream(524288, 262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1)
+Stream(262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1)
diff --git a/test/files/run/t153.scala b/test/files/run/t153.scala
index c7b3c1c762..359e40407b 100644
--- a/test/files/run/t153.scala
+++ b/test/files/run/t153.scala
@@ -1,5 +1,5 @@
object Test extends Application {
def powers(x: Int) = if ((x&(x-1))==0) Some(x) else None
- val res = (Stream.range(1, 1000000) flatMap powers).reverse
+ val res = (Stream.range(1, 500000) flatMap powers).reverse
println(res take 42 force)
} \ No newline at end of file
diff --git a/test/files/run/t2526.scala b/test/files/run/t2526.scala
new file mode 100644
index 0000000000..5f6d60546a
--- /dev/null
+++ b/test/files/run/t2526.scala
@@ -0,0 +1,54 @@
+/**
+ * Checks that various foreach methods overridden in mutable.HashMap as part of ticket #2526
+ * still work correctly.
+ */
+object Test {
+ import collection._
+
+ def main(args: Array[String]) {
+ val m = new mutable.HashMap[String, String]
+
+ /* Use non hash-based structure for verification */
+ val keys = List("a", "b", "c", "d", "e")
+ val valueSuffix = "value"
+ val values = keys.map(_ + valueSuffix)
+ val entries = keys.zip(values)
+
+ for (k <- keys) m(k) = k + valueSuffix
+
+ assertForeach(keys, m.keySet.iterator)
+ assertForeach(keys, m.keysIterator)
+ assertForeach(keys, m.keySet)
+
+ assertForeach(values, m.valuesIterable.iterator)
+ assertForeach(values, m.valuesIterator)
+ assertForeach(values, m.valuesIterable)
+
+ assertForeach(entries, m)
+ }
+
+ /* Checks foreach of `actual` goes over all the elements in `expected` */
+ private def assertForeach[E](expected: Traversable[E], actual: Iterator[E]): Unit = {
+ val notYetFound = new mutable.ArrayBuffer[E]() ++= expected
+ actual.foreach { e =>
+ assert(notYetFound.contains(e))
+ notYetFound -= e
+ }
+ assert(notYetFound.size == 0, "mutable.HashMap.foreach should have iterated over: " + notYetFound)
+ }
+
+ /*
+ * Checks foreach of `actual` goes over all the elements in `expected`
+ * We duplicate the method above because there is no common inteface between Traverable and
+ * Iterator and we want to avoid converting between collections to ensure that we test what
+ * we mean to test.
+ */
+ private def assertForeach[E](expected: Traversable[E], actual: Traversable[E]): Unit = {
+ val notYetFound = new mutable.ArrayBuffer[E]() ++= expected
+ actual.foreach { e =>
+ assert(notYetFound.contains(e))
+ notYetFound -= e
+ }
+ assert(notYetFound.size == 0, "mutable.HashMap.foreach should have iterated over: " + notYetFound)
+ }
+}
diff --git a/test/files/run/unapply.scala b/test/files/run/unapply.scala
index 72a4b0ac64..acbce58d35 100644
--- a/test/files/run/unapply.scala
+++ b/test/files/run/unapply.scala
@@ -111,7 +111,7 @@ object StreamFoo extends TestCase("unapply for Streams") with Assert {
case Stream.cons(hd, tl) => hd + sum(tl)
}
override def runTest {
- val str: Stream[int] = Stream.fromIterator(List(1,2,3).iterator)
+ val str: Stream[Int] = Stream.fromIterator(List(1,2,3).iterator)
assertEquals(sum(str), 6)
}
}
diff --git a/test/files/run/unapplyArray.scala b/test/files/run/unapplyArray.scala
index bf6582dadf..bf7c9e2300 100644
--- a/test/files/run/unapplyArray.scala
+++ b/test/files/run/unapplyArray.scala
@@ -1,7 +1,7 @@
object Test {
def main(args:Array[String]): Unit = {
val z = Array(1,2,3,4)
- val zs: Seq[int] = z
+ val zs: Seq[Int] = z
val za: Any = z
/*
diff --git a/test/files/pos/bug1357.scala b/test/pending/pos/bug1357.scala
index fcdecb3ad3..fcdecb3ad3 100644
--- a/test/files/pos/bug1357.scala
+++ b/test/pending/pos/bug1357.scala
diff --git a/test/pending/pos/t2610.scala b/test/pending/pos/t2610.scala
new file mode 100644
index 0000000000..8dd4cde66e
--- /dev/null
+++ b/test/pending/pos/t2610.scala
@@ -0,0 +1,17 @@
+package mada; package defects; package tests
+
+package object bbb {
+ def bar = ()
+ aaa.foo // value foo is not a member of package mada.defects.tests.aaa
+}
+
+package object aaa {
+ def foo = ()
+}
+
+/* compiles successfully if placed here..
+package object bbb {
+ def bar = ()
+ aaa.foo // value foo is not a member of package mada.defects.tests.aaa
+}
+*/ \ No newline at end of file
diff --git a/test/pending/pos/t2619.scala b/test/pending/pos/t2619.scala
new file mode 100644
index 0000000000..565bc9572b
--- /dev/null
+++ b/test/pending/pos/t2619.scala
@@ -0,0 +1,80 @@
+abstract class F {
+ final def apply(x: Int): AnyRef = null
+}
+abstract class AbstractModule {
+ def as: List[AnyRef]
+ def ms: List[AbstractModule]
+ def fs: List[F] = Nil
+ def rs(x: Int): List[AnyRef] = fs.map(_(x))
+}
+abstract class ModuleType1 extends AbstractModule {}
+abstract class ModuleType2 extends AbstractModule {}
+
+object ModuleAE extends ModuleType1 {
+ def as = Nil
+ def ms = Nil
+}
+object ModuleAF extends ModuleType2 {
+ def as = Nil
+ def ms = List(ModuleAE)
+}
+object ModuleAG extends ModuleType1 {
+ def as = List("")
+ def ms = Nil
+}
+object ModuleAI extends ModuleType1 {
+ def as = Nil
+ def ms = List(ModuleAE)
+}
+object ModuleAK extends ModuleType2 {
+ def as = Nil
+ def ms = List(ModuleAF)
+}
+object ModuleAL extends ModuleType1 {
+ def as = Nil
+ def ms = List(
+ ModuleAG,
+ ModuleAI
+ )
+}
+object ModuleAM extends ModuleType1 {
+ def as = Nil
+ def ms = List(
+ ModuleAL,
+ ModuleAE
+ ) ::: List(ModuleAK)
+}
+object ModuleBE extends ModuleType1 {
+ def as = Nil
+ def ms = Nil
+}
+object ModuleBF extends ModuleType2 {
+ def as = Nil
+ def ms = List(ModuleBE)
+}
+object ModuleBG extends ModuleType1 {
+ def as = List("")
+ def ms = Nil
+}
+object ModuleBI extends ModuleType1 {
+ def as = Nil
+ def ms = List(ModuleBE)
+}
+object ModuleBK extends ModuleType2 {
+ def as = Nil
+ def ms = List(ModuleBF)
+}
+object ModuleBL extends ModuleType1 {
+ def as = Nil
+ def ms = List(
+ ModuleBG,
+ ModuleBI
+ )
+}
+object ModuleBM extends ModuleType1 {
+ def as = Nil
+ def ms = List(
+ ModuleBL,
+ ModuleBE
+ ) ::: List(ModuleBK)
+} \ No newline at end of file
diff --git a/test/pending/pos/t2625.scala b/test/pending/pos/t2625.scala
new file mode 100644
index 0000000000..94240cb6c6
--- /dev/null
+++ b/test/pending/pos/t2625.scala
@@ -0,0 +1,9 @@
+package t
+
+object T {
+ case class A(x: Int)(x: Int)
+
+ def A(x: Boolean): Int = 34
+
+ A(23)
+} \ No newline at end of file
diff --git a/test/pending/pos/t2635.scala b/test/pending/pos/t2635.scala
new file mode 100644
index 0000000000..378631b23d
--- /dev/null
+++ b/test/pending/pos/t2635.scala
@@ -0,0 +1,16 @@
+abstract class Base
+
+object Test
+{
+ def run(c: Class[_ <: Base]): Unit = {
+ }
+
+ def main(args: Array[String]): Unit =
+ {
+ val sc: Option[Class[_ <: Base]] = Some(classOf[Base])
+ sc match {
+ case Some((c: Class[_ <: Base])) => run(c)
+ case None =>
+ }
+ }
+} \ No newline at end of file
diff --git a/test/pending/pos/t2641.scala b/test/pending/pos/t2641.scala
new file mode 100644
index 0000000000..fec825c4f9
--- /dev/null
+++ b/test/pending/pos/t2641.scala
@@ -0,0 +1,16 @@
+import scala.collection._
+import scala.collection.generic._
+
+abstract class ManagedSeqStrict[+A]
+ extends Traversable[A]
+ with GenericTraversableTemplate[A, ManagedSeqStrict]
+
+trait ManagedSeq[+A, +Coll]
+ extends ManagedSeqStrict[A]
+ with TraversableView[A, ManagedSeqStrict[A]]
+ with TraversableViewLike[A, ManagedSeqStrict[A], ManagedSeq[A/*ERROR: too few type args*/]]
+{ self =>
+ trait Transformed[+B] extends ManagedSeq[B, Coll] with super.Transformed[B]
+
+ trait Sliced extends Transformed[A] with super.Sliced
+} \ No newline at end of file
diff --git a/test/pending/pos/t2660.scala b/test/pending/pos/t2660.scala
new file mode 100644
index 0000000000..b1908b201b
--- /dev/null
+++ b/test/pending/pos/t2660.scala
@@ -0,0 +1,25 @@
+package hoho
+
+class G
+
+class H extends G
+
+class A[T](x: T) {
+
+ def this(y: G, z: T) = {
+ this(z)
+ print(1)
+ }
+
+ def this(z: H, h: T) = {
+ this(h)
+ print(2)
+ }
+}
+
+object T {
+ def main(args: Array[String]) {
+ implicit def g2h(g: G): H = new H
+ new A(new H, 23)
+ }
+} \ No newline at end of file
diff --git a/test/pending/pos/t2691.scala b/test/pending/pos/t2691.scala
new file mode 100644
index 0000000000..ba2e52f1fe
--- /dev/null
+++ b/test/pending/pos/t2691.scala
@@ -0,0 +1,9 @@
+object Breakdown {
+ def unapplySeq(x: Int): Some[List[String]] = Some(List("", "there"))
+}
+object Test {
+ 42 match {
+ case Breakdown("") => // needed to trigger bug
+ case Breakdown("", who) => println ("hello " + who)
+ }
+} \ No newline at end of file
diff --git a/test/postreview.py b/test/postreview.py
new file mode 100644
index 0000000000..2e2518f7ee
--- /dev/null
+++ b/test/postreview.py
@@ -0,0 +1,2540 @@
+#!/usr/bin/env python
+import cookielib
+import difflib
+import getpass
+import marshal
+import mimetools
+import ntpath
+import os
+import re
+import socket
+import stat
+import subprocess
+import sys
+import tempfile
+import urllib
+import urllib2
+from optparse import OptionParser
+from tempfile import mkstemp
+from urlparse import urljoin, urlparse
+
+try:
+ from hashlib import md5
+except ImportError:
+ # Support Python versions before 2.5.
+ from md5 import md5
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+# This specific import is necessary to handle the paths for
+# cygwin enabled machines.
+if (sys.platform.startswith('win')
+ or sys.platform.startswith('cygwin')):
+ import ntpath as cpath
+else:
+ import posixpath as cpath
+
+###
+# Default configuration -- user-settable variables follow.
+###
+
+# The following settings usually aren't needed, but if your Review
+# Board crew has specific preferences and doesn't want to express
+# them with command line switches, set them here and you're done.
+# In particular, setting the REVIEWBOARD_URL variable will allow
+# you to make it easy for people to submit reviews regardless of
+# their SCM setup.
+#
+# Note that in order for this script to work with a reviewboard site
+# that uses local paths to access a repository, the 'Mirror path'
+# in the repository setup page must be set to the remote URL of the
+# repository.
+
+#
+# Reviewboard URL.
+#
+# Set this if you wish to hard-code a default server to always use.
+# It's generally recommended to set this using your SCM repository
+# (for those that support it -- currently only SVN, Git, and Perforce).
+#
+# For example, on SVN:
+# $ svn propset reviewboard:url http://reviewboard.example.com .
+#
+# Or with Git:
+# $ git config reviewboard.url http://reviewboard.example.com
+#
+# On Perforce servers version 2008.1 and above:
+# $ p4 counter reviewboard.url http://reviewboard.example.com
+#
+# Older Perforce servers only allow numerical counters, so embedding
+# the url in the counter name is also supported:
+# $ p4 counter reviewboard.url.http:\|\|reviewboard.example.com 1
+#
+# Note that slashes are not allowed in Perforce counter names, so replace them
+# with pipe characters (they are a safe substitute as they are not used
+# unencoded in URLs). You may need to escape them when issuing the p4 counter
+# command as above.
+#
+# If this is not possible or desired, setting the value here will let
+# you get started quickly.
+#
+# For all other repositories, a .reviewboardrc file present at the top of
+# the checkout will also work. For example:
+#
+# $ cat .reviewboardrc
+# REVIEWBOARD_URL = "http://reviewboard.example.com"
+#
+REVIEWBOARD_URL = None
+
+# Default submission arguments. These are all optional; run this
+# script with --help for descriptions of each argument.
+TARGET_GROUPS = None
+TARGET_PEOPLE = None
+SUBMIT_AS = None
+PUBLISH = False
+OPEN_BROWSER = False
+
+# Debugging. For development...
+DEBUG = False
+
+###
+# End user-settable variables.
+###
+
+
+VERSION = "0.8"
+
+user_config = None
+tempfiles = []
+options = None
+
+
+class APIError(Exception):
+ pass
+
+
+class RepositoryInfo:
+ """
+ A representation of a source code repository.
+ """
+ def __init__(self, path=None, base_path=None, supports_changesets=False,
+ supports_parent_diffs=False):
+ self.path = path
+ self.base_path = base_path
+ self.supports_changesets = supports_changesets
+ self.supports_parent_diffs = supports_parent_diffs
+ debug("repository info: %s" % self)
+
+ def __str__(self):
+ return "Path: %s, Base path: %s, Supports changesets: %s" % \
+ (self.path, self.base_path, self.supports_changesets)
+
+ def set_base_path(self, base_path):
+ if not base_path.startswith('/'):
+ base_path = '/' + base_path
+ debug("changing repository info base_path from %s to %s" % \
+ (self.base_path, base_path))
+ self.base_path = base_path
+
+ def find_server_repository_info(self, server):
+ """
+ Try to find the repository from the list of repositories on the server.
+ For Subversion, this could be a repository with a different URL. For
+ all other clients, this is a noop.
+ """
+ return self
+
+
+class SvnRepositoryInfo(RepositoryInfo):
+ """
+ A representation of a SVN source code repository. This version knows how to
+ find a matching repository on the server even if the URLs differ.
+ """
+ def __init__(self, path, base_path, uuid, supports_parent_diffs=False):
+ RepositoryInfo.__init__(self, path, base_path,
+ supports_parent_diffs=supports_parent_diffs)
+ self.uuid = uuid
+
+ def find_server_repository_info(self, server):
+ """
+ The point of this function is to find a repository on the server that
+ matches self, even if the paths aren't the same. (For example, if self
+ uses an 'http' path, but the server uses a 'file' path for the same
+ repository.) It does this by comparing repository UUIDs. If the
+ repositories use the same path, you'll get back self, otherwise you'll
+ get a different SvnRepositoryInfo object (with a different path).
+ """
+ repositories = server.get_repositories()
+
+ for repository in repositories:
+ if repository['tool'] != 'Subversion':
+ continue
+
+ info = self._get_repository_info(server, repository)
+
+ if not info or self.uuid != info['uuid']:
+ continue
+
+ repos_base_path = info['url'][len(info['root_url']):]
+ relpath = self._get_relative_path(self.base_path, repos_base_path)
+ if relpath:
+ return SvnRepositoryInfo(info['url'], relpath, self.uuid)
+
+ # We didn't find a matching repository on the server. We'll just return
+ # self and hope for the best.
+ return self
+
+ def _get_repository_info(self, server, repository):
+ try:
+ return server.get_repository_info(repository['id'])
+ except APIError, e:
+ # If the server couldn't fetch the repository info, it will return
+ # code 210. Ignore those.
+ # Other more serious errors should still be raised, though.
+ rsp = e.args[0]
+ if rsp['err']['code'] == 210:
+ return None
+
+ raise e
+
+ def _get_relative_path(self, path, root):
+ pathdirs = self._split_on_slash(path)
+ rootdirs = self._split_on_slash(root)
+
+ # root is empty, so anything relative to that is itself
+ if len(rootdirs) == 0:
+ return path
+
+ # If one of the directories doesn't match, then path is not relative
+ # to root.
+ if rootdirs != pathdirs:
+ return None
+
+ # All the directories matched, so the relative path is whatever
+ # directories are left over. The base_path can't be empty, though, so
+ # if the paths are the same, return '/'
+ if len(pathdirs) == len(rootdirs):
+ return '/'
+ else:
+ return '/'.join(pathdirs[len(rootdirs):])
+
+ def _split_on_slash(self, path):
+ # Split on slashes, but ignore multiple slashes and throw away any
+ # trailing slashes.
+ split = re.split('/*', path)
+ if split[-1] == '':
+ split = split[0:-1]
+ return split
+
+
+class ReviewBoardHTTPPasswordMgr(urllib2.HTTPPasswordMgr):
+ """
+ Adds HTTP authentication support for URLs.
+
+ Python 2.4's password manager has a bug in http authentication when the
+ target server uses a non-standard port. This works around that bug on
+ Python 2.4 installs. This also allows post-review to prompt for passwords
+ in a consistent way.
+
+ See: http://bugs.python.org/issue974757
+ """
+ def __init__(self, reviewboard_url):
+ self.passwd = {}
+ self.rb_url = reviewboard_url
+ self.rb_user = None
+ self.rb_pass = None
+
+ def find_user_password(self, realm, uri):
+ if uri.startswith(self.rb_url):
+ if self.rb_user is None or self.rb_pass is None:
+ print "==> HTTP Authentication Required"
+ print 'Enter username and password for "%s" at %s' % \
+ (realm, urlparse(uri)[1])
+ self.rb_user = raw_input('Username: ')
+ self.rb_pass = getpass.getpass('Password: ')
+
+ return self.rb_user, self.rb_pass
+ else:
+ # If this is an auth request for some other domain (since HTTP
+ # handlers are global), fall back to standard password management.
+ return urllib2.HTTPPasswordMgr.find_user_password(self, realm, uri)
+
+
+class ReviewBoardServer(object):
+ """
+ An instance of a Review Board server.
+ """
+ def __init__(self, url, info, cookie_file):
+ self.url = url
+ if self.url[-1] != '/':
+ self.url += '/'
+ self._info = info
+ self._server_info = None
+ self.cookie_file = cookie_file
+ self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
+
+ # Set up the HTTP libraries to support all of the features we need.
+ cookie_handler = urllib2.HTTPCookieProcessor(self.cookie_jar)
+ password_mgr = ReviewBoardHTTPPasswordMgr(self.url)
+ auth_handler = urllib2.HTTPBasicAuthHandler(password_mgr)
+
+ opener = urllib2.build_opener(cookie_handler, auth_handler)
+ opener.addheaders = [('User-agent', 'post-review/' + VERSION)]
+ urllib2.install_opener(opener)
+
+ def login(self, force=False):
+ """
+ Logs in to a Review Board server, prompting the user for login
+ information if needed.
+ """
+ if not force and self.has_valid_cookie():
+ return
+
+ print "==> Review Board Login Required"
+ print "Enter username and password for Review Board at %s" % self.url
+ if options.username:
+ username = options.username
+ elif options.submit_as:
+ username = options.submit_as
+ else:
+ username = raw_input('Username: ')
+
+ if not options.password:
+ password = getpass.getpass('Password: ')
+ else:
+ password = options.password
+
+ debug('Logging in with username "%s"' % username)
+ try:
+ self.api_post('api/json/accounts/login/', {
+ 'username': username,
+ 'password': password,
+ })
+ except APIError, e:
+ rsp, = e.args
+
+ die("Unable to log in: %s (%s)" % (rsp["err"]["msg"],
+ rsp["err"]["code"]))
+
+ debug("Logged in.")
+
+ def has_valid_cookie(self):
+ """
+ Load the user's cookie file and see if they have a valid
+ 'rbsessionid' cookie for the current Review Board server. Returns
+ true if so and false otherwise.
+ """
+ try:
+ parsed_url = urlparse(self.url)
+ host = parsed_url[1]
+ path = parsed_url[2] or '/'
+
+ # Cookie files don't store port numbers, unfortunately, so
+ # get rid of the port number if it's present.
+ host = host.split(":")[0]
+
+ debug("Looking for '%s %s' cookie in %s" % \
+ (host, path, self.cookie_file))
+ self.cookie_jar.load(self.cookie_file, ignore_expires=True)
+
+ try:
+ cookie = self.cookie_jar._cookies[host][path]['rbsessionid']
+
+ if not cookie.is_expired():
+ debug("Loaded valid cookie -- no login required")
+ return True
+
+ debug("Cookie file loaded, but cookie has expired")
+ except KeyError:
+ debug("Cookie file loaded, but no cookie for this server")
+ except IOError, error:
+ debug("Couldn't load cookie file: %s" % error)
+
+ return False
+
+ def new_review_request(self, changenum, submit_as=None):
+ """
+ Creates a review request on a Review Board server, updating an
+ existing one if the changeset number already exists.
+
+ If submit_as is provided, the specified user name will be recorded as
+ the submitter of the review request (given that the logged in user has
+ the appropriate permissions).
+ """
+ try:
+ debug("Attempting to create review request for %s" % changenum)
+ data = { 'repository_path': self.info.path }
+
+ if changenum:
+ data['changenum'] = changenum
+
+ if submit_as:
+ debug("Submitting the review request as %s" % submit_as)
+ data['submit_as'] = submit_as
+
+ rsp = self.api_post('api/json/reviewrequests/new/', data)
+ except APIError, e:
+ rsp, = e.args
+
+ if not options.diff_only:
+ if rsp['err']['code'] == 204: # Change number in use
+ debug("Review request already exists. Updating it...")
+ rsp = self.api_post(
+ 'api/json/reviewrequests/%s/update_from_changenum/' %
+ rsp['review_request']['id'])
+ else:
+ raise e
+
+ debug("Review request created")
+ return rsp['review_request']
+
+ def set_review_request_field(self, review_request, field, value):
+ """
+ Sets a field in a review request to the specified value.
+ """
+ rid = review_request['id']
+
+ debug("Attempting to set field '%s' to '%s' for review request '%s'" %
+ (field, value, rid))
+
+ self.api_post('api/json/reviewrequests/%s/draft/set/' % rid, {
+ field: value,
+ })
+
+ def get_review_request(self, rid):
+ """
+ Returns the review request with the specified ID.
+ """
+ rsp = self.api_get('api/json/reviewrequests/%s/' % rid)
+ return rsp['review_request']
+
+ def get_repositories(self):
+ """
+ Returns the list of repositories on this server.
+ """
+ rsp = self.api_get('/api/json/repositories/')
+ return rsp['repositories']
+
+ def get_repository_info(self, rid):
+ """
+ Returns detailed information about a specific repository.
+ """
+ rsp = self.api_get('/api/json/repositories/%s/info/' % rid)
+ return rsp['info']
+
+ def save_draft(self, review_request):
+ """
+ Saves a draft of a review request.
+ """
+ self.api_post("api/json/reviewrequests/%s/draft/save/" %
+ review_request['id'])
+ debug("Review request draft saved")
+
+ def upload_diff(self, review_request, diff_content, parent_diff_content):
+ """
+ Uploads a diff to a Review Board server.
+ """
+ debug("Uploading diff, size: %d" % len(diff_content))
+
+ if parent_diff_content:
+ debug("Uploading parent diff, size: %d" % len(parent_diff_content))
+
+ fields = {}
+ files = {}
+
+ if self.info.base_path:
+ fields['basedir'] = self.info.base_path
+
+ files['path'] = {
+ 'filename': 'diff',
+ 'content': diff_content
+ }
+
+ if parent_diff_content:
+ files['parent_diff_path'] = {
+ 'filename': 'parent_diff',
+ 'content': parent_diff_content
+ }
+
+ self.api_post('api/json/reviewrequests/%s/diff/new/' %
+ review_request['id'], fields, files)
+
+ def publish(self, review_request):
+ """
+ Publishes a review request.
+ """
+ debug("Publishing")
+ self.api_post('api/json/reviewrequests/%s/publish/' %
+ review_request['id'])
+
+ def _get_server_info(self):
+ if not self._server_info:
+ self._server_info = self._info.find_server_repository_info(self)
+
+ return self._server_info
+
+ info = property(_get_server_info)
+
+ def process_json(self, data):
+ """
+ Loads in a JSON file and returns the data if successful. On failure,
+ APIError is raised.
+ """
+ rsp = json.loads(data)
+
+ if rsp['stat'] == 'fail':
+ raise APIError, rsp
+
+ return rsp
+
+ def http_get(self, path):
+ """
+ Performs an HTTP GET on the specified path, storing any cookies that
+ were set.
+ """
+ debug('HTTP GETting %s' % path)
+
+ url = self._make_url(path)
+
+ try:
+ rsp = urllib2.urlopen(url).read()
+ self.cookie_jar.save(self.cookie_file)
+ return rsp
+ except urllib2.HTTPError, e:
+ print "Unable to access %s (%s). The host path may be invalid" % \
+ (url, e.code)
+ try:
+ debug(e.read())
+ except AttributeError:
+ pass
+ die()
+
+ def _make_url(self, path):
+ """Given a path on the server returns a full http:// style url"""
+ app = urlparse(self.url)[2]
+ if path[0] == '/':
+ url = urljoin(self.url, app[:-1] + path)
+ else:
+ url = urljoin(self.url, app + path)
+
+ if not url.startswith('http'):
+ url = 'http://%s' % url
+ return url
+
+ def api_get(self, path):
+ """
+ Performs an API call using HTTP GET at the specified path.
+ """
+ return self.process_json(self.http_get(path))
+
+ def http_post(self, path, fields, files=None):
+ """
+ Performs an HTTP POST on the specified path, storing any cookies that
+ were set.
+ """
+ if fields:
+ debug_fields = fields.copy()
+ else:
+ debug_fields = {}
+
+ if 'password' in debug_fields:
+ debug_fields["password"] = "**************"
+ url = self._make_url(path)
+ debug('HTTP POSTing to %s: %s' % (url, debug_fields))
+
+ content_type, body = self._encode_multipart_formdata(fields, files)
+ headers = {
+ 'Content-Type': content_type,
+ 'Content-Length': str(len(body))
+ }
+
+ try:
+ r = urllib2.Request(url, body, headers)
+ data = urllib2.urlopen(r).read()
+ self.cookie_jar.save(self.cookie_file)
+ return data
+ except urllib2.URLError, e:
+ try:
+ debug(e.read())
+ except AttributeError:
+ pass
+
+ die("Unable to access %s. The host path may be invalid\n%s" % \
+ (url, e))
+ except urllib2.HTTPError, e:
+ die("Unable to access %s (%s). The host path may be invalid\n%s" % \
+ (url, e.code, e.read()))
+
+ def api_post(self, path, fields=None, files=None):
+ """
+ Performs an API call using HTTP POST at the specified path.
+ """
+ return self.process_json(self.http_post(path, fields, files))
+
+ def _encode_multipart_formdata(self, fields, files):
+ """
+ Encodes data for use in an HTTP POST.
+ """
+ BOUNDARY = mimetools.choose_boundary()
+ content = ""
+
+ fields = fields or {}
+ files = files or {}
+
+ for key in fields:
+ content += "--" + BOUNDARY + "\r\n"
+ content += "Content-Disposition: form-data; name=\"%s\"\r\n" % key
+ content += "\r\n"
+ content += fields[key] + "\r\n"
+
+ for key in files:
+ filename = files[key]['filename']
+ value = files[key]['content']
+ content += "--" + BOUNDARY + "\r\n"
+ content += "Content-Disposition: form-data; name=\"%s\"; " % key
+ content += "filename=\"%s\"\r\n" % filename
+ content += "\r\n"
+ content += value + "\r\n"
+
+ content += "--" + BOUNDARY + "--\r\n"
+ content += "\r\n"
+
+ content_type = "multipart/form-data; boundary=%s" % BOUNDARY
+
+ return content_type, content
+
+
+class SCMClient(object):
+ """
+ A base representation of an SCM tool for fetching repository information
+ and generating diffs.
+ """
+ def get_repository_info(self):
+ return None
+
+ def scan_for_server(self, repository_info):
+ """
+ Scans the current directory on up to find a .reviewboard file
+ containing the server path.
+ """
+ server_url = self._get_server_from_config(user_config, repository_info)
+ if server_url:
+ return server_url
+
+ for path in walk_parents(os.getcwd()):
+ filename = os.path.join(path, ".reviewboardrc")
+ if os.path.exists(filename):
+ config = load_config_file(filename)
+ server_url = self._get_server_from_config(config,
+ repository_info)
+ if server_url:
+ return server_url
+
+ return None
+
+ def diff(self, args):
+ """
+ Returns the generated diff and optional parent diff for this
+ repository.
+
+ The returned tuple is (diff_string, parent_diff_string)
+ """
+ return (None, None)
+
+ def diff_between_revisions(self, revision_range, args, repository_info):
+ """
+ Returns the generated diff between revisions in the repository.
+ """
+ return None
+
+ def _get_server_from_config(self, config, repository_info):
+ if 'REVIEWBOARD_URL' in config:
+ return config['REVIEWBOARD_URL']
+ elif 'TREES' in config:
+ trees = config['TREES']
+ if not isinstance(trees, dict):
+ die("Warning: 'TREES' in config file is not a dict!")
+
+ if repository_info.path in trees and \
+ 'REVIEWBOARD_URL' in trees[repository_info.path]:
+ return trees[repository_info.path]['REVIEWBOARD_URL']
+
+ return None
+
+
+class CVSClient(SCMClient):
+ """
+ A wrapper around the cvs tool that fetches repository
+ information and generates compatible diffs.
+ """
+ def get_repository_info(self):
+ if not check_install("cvs"):
+ return None
+
+ cvsroot_path = os.path.join("CVS", "Root")
+
+ if not os.path.exists(cvsroot_path):
+ return None
+
+ fp = open(cvsroot_path, "r")
+ repository_path = fp.read().strip()
+ fp.close()
+
+ i = repository_path.find("@")
+ if i != -1:
+ repository_path = repository_path[i + 1:]
+
+ i = repository_path.find(":")
+ if i != -1:
+ host = repository_path[:i]
+ try:
+ canon = socket.getfqdn(host)
+ repository_path = repository_path.replace('%s:' % host,
+ '%s:' % canon)
+ except socket.error, msg:
+ debug("failed to get fqdn for %s, msg=%s" % (host, msg))
+
+ return RepositoryInfo(path=repository_path)
+
+ def diff(self, files):
+ """
+ Performs a diff across all modified files in a CVS repository.
+
+ CVS repositories do not support branches of branches in a way that
+ makes parent diffs possible, so we never return a parent diff
+ (the second value in the tuple).
+ """
+ return (self.do_diff(files), None)
+
+ def diff_between_revisions(self, revision_range, args, repository_info):
+ """
+ Performs a diff between 2 revisions of a CVS repository.
+ """
+ revs = []
+
+ for rev in revision_range.split(":"):
+ revs += ["-r", rev]
+
+ return self.do_diff(revs)
+
+ def do_diff(self, params):
+ """
+ Performs the actual diff operation through cvs diff, handling
+ fake errors generated by CVS.
+ """
+ # Diff returns "1" if differences were found.
+ return execute(["cvs", "diff", "-uN"] + params,
+ extra_ignore_errors=(1,))
+
+
+class ClearCaseClient(SCMClient):
+ """
+ A wrapper around the clearcase tool that fetches repository
+ information and generates compatible diffs.
+ This client assumes that cygwin is installed on windows.
+ """
+ ccroot_path = "/view/reviewboard.diffview/vobs/"
+ viewinfo = ""
+ viewtype = "snapshot"
+
+ def get_filename_hash(self, fname):
+ # Hash the filename string so its easy to find the file later on.
+ return md5(fname).hexdigest()
+
+ def get_repository_info(self):
+ if not check_install('cleartool help'):
+ return None
+
+ # We must be running this from inside a view.
+ # Otherwise it doesn't make sense.
+ self.viewinfo = execute(["cleartool", "pwv", "-short"])
+ if self.viewinfo.startswith('\*\* NONE'):
+ return None
+
+ # Returning the hardcoded clearcase root path to match the server
+ # respository path.
+ # There is no reason to have a dynamic path unless you have
+ # multiple clearcase repositories. This should be implemented.
+ return RepositoryInfo(path=self.ccroot_path,
+ base_path=self.ccroot_path,
+ supports_parent_diffs=False)
+
+ def get_previous_version(self, files):
+ file = []
+ curdir = os.getcwd()
+
+ # Cygwin case must transform a linux-like path to windows like path
+ # including drive letter.
+ if 'cygdrive' in curdir:
+ where = curdir.index('cygdrive') + 9
+ drive_letter = curdir[where:where+1]
+ curdir = drive_letter + ":\\" + curdir[where+2:len(curdir)]
+
+ for key in files:
+ # Sometimes there is a quote in the filename. It must be removed.
+ key = key.replace('\'', '')
+ elem_path = cpath.normpath(os.path.join(curdir, key))
+
+ # Removing anything before the last /vobs
+ # because it may be repeated.
+ elem_path_idx = elem_path.rfind("/vobs")
+ if elem_path_idx != -1:
+ elem_path = elem_path[elem_path_idx:len(elem_path)].strip("\"")
+
+ # Call cleartool to get this version and the previous version
+ # of the element.
+ curr_version, pre_version = execute(
+ ["cleartool", "desc", "-pre", elem_path])
+ curr_version = cpath.normpath(curr_version)
+ pre_version = pre_version.split(':')[1].strip()
+
+ # If a specific version was given, remove it from the path
+ # to avoid version duplication
+ if "@@" in elem_path:
+ elem_path = elem_path[:elem_path.rfind("@@")]
+ file.append(elem_path + "@@" + pre_version)
+ file.append(curr_version)
+
+ # Determnine if the view type is snapshot or dynamic.
+ if os.path.exists(file[0]):
+ self.viewtype = "dynamic"
+
+ return file
+
+ def get_extended_namespace(self, files):
+ """
+ Parses the file path to get the extended namespace
+ """
+ versions = self.get_previous_version(files)
+
+ evfiles = []
+ hlist = []
+
+ for vkey in versions:
+ # Verify if it is a checkedout file.
+ if "CHECKEDOUT" in vkey:
+ # For checkedout files just add it to the file list
+ # since it cannot be accessed outside the view.
+ splversions = vkey[:vkey.rfind("@@")]
+ evfiles.append(splversions)
+ else:
+ # For checkedin files.
+ ext_path = []
+ ver = []
+ fname = "" # fname holds the file name without the version.
+ (bpath, fpath) = cpath.splitdrive(vkey)
+ if bpath :
+ # Windows.
+ # The version (if specified like file.c@@/main/1)
+ # should be kept as a single string
+ # so split the path and concat the file name
+ # and version in the last position of the list.
+ ver = fpath.split("@@")
+ splversions = fpath[:vkey.rfind("@@")].split("\\")
+ fname = splversions.pop()
+ splversions.append(fname + ver[1])
+ else :
+ # Linux.
+ bpath = vkey[:vkey.rfind("vobs")+4]
+ fpath = vkey[vkey.rfind("vobs")+5:]
+ ver = fpath.split("@@")
+ splversions = ver[0][:vkey.rfind("@@")].split("/")
+ fname = splversions.pop()
+ splversions.append(fname + ver[1])
+
+ filename = splversions.pop()
+ bpath = cpath.normpath(bpath + "/")
+ elem_path = bpath
+
+ for key in splversions:
+ # For each element (directory) in the path,
+ # get its version from clearcase.
+ elem_path = cpath.join(elem_path, key)
+
+ # This is the version to be appended to the extended
+ # path list.
+ this_version = execute(
+ ["cleartool", "desc", "-fmt", "%Vn",
+ cpath.normpath(elem_path)])
+ if this_version:
+ ext_path.append(key + "/@@" + this_version + "/")
+ else:
+ ext_path.append(key + "/")
+
+ # This must be done in case we haven't specified
+ # the version on the command line.
+ ext_path.append(cpath.normpath(fname + "/@@" +
+ vkey[vkey.rfind("@@")+2:len(vkey)]))
+ epstr = cpath.join(bpath, cpath.normpath(''.join(ext_path)))
+ evfiles.append(epstr)
+
+ """
+ In windows, there is a problem with long names(> 254).
+ In this case, we hash the string and copy the unextended
+ filename to a temp file whose name is the hash.
+ This way we can get the file later on for diff.
+ The same problem applies to snapshot views where the
+ extended name isn't available.
+ The previous file must be copied from the CC server
+ to a local dir.
+ """
+ if cpath.exists(epstr) :
+ pass
+ else:
+ if len(epstr) > 254 or self.viewtype == "snapshot":
+ name = self.get_filename_hash(epstr)
+ # Check if this hash is already in the list
+ try:
+ i = hlist.index(name)
+ die("ERROR: duplicate value %s : %s" %
+ (name, epstr))
+ except ValueError:
+ hlist.append(name)
+
+ normkey = cpath.normpath(vkey)
+ td = tempfile.gettempdir()
+ # Cygwin case must transform a linux-like path to
+ # windows like path including drive letter
+ if 'cygdrive' in td:
+ where = td.index('cygdrive') + 9
+ drive_letter = td[where:where+1] + ":"
+ td = cpath.join(drive_letter, td[where+1:])
+ tf = cpath.normpath(cpath.join(td, name))
+ if cpath.exists(tf):
+ debug("WARNING: FILE EXISTS")
+ os.unlink(tf)
+ execute(["cleartool", "get", "-to", tf, normkey])
+ else:
+ die("ERROR: FILE NOT FOUND : %s" % epstr)
+
+ return evfiles
+
+ def get_files_from_label(self, label):
+ voblist=[]
+ # Get the list of vobs for the current view
+ allvoblist = execute(["cleartool", "lsvob", "-short"]).split()
+ # For each vob, find if the label is present
+ for vob in allvoblist:
+ try:
+ execute(["cleartool", "describe", "-local",
+ "lbtype:%s@%s" % (label, vob)]).split()
+ voblist.append(vob)
+ except:
+ pass
+
+ filelist=[]
+ # For each vob containing the label, get the file list
+ for vob in voblist:
+ try:
+ res = execute(["cleartool", "find", vob, "-all", "-version",
+ "lbtype(%s)" % label, "-print"])
+ filelist.extend(res.split())
+ except :
+ pass
+
+ # Return only the unique itens
+ return set(filelist)
+
+ def diff(self, files):
+ """
+ Performs a diff of the specified file and its previous version.
+ """
+ # We must be running this from inside a view.
+ # Otherwise it doesn't make sense.
+ return self.do_diff(self.get_extended_namespace(files))
+
+ def diff_label(self, label):
+ """
+ Get the files that are attached to a label and diff them
+ TODO
+ """
+ return self.diff(self.get_files_from_label(label))
+
+ def diff_between_revisions(self, revision_range, args, repository_info):
+ """
+ Performs a diff between 2 revisions of a CC repository.
+ """
+ rev_str = ''
+
+ for rev in revision_range.split(":"):
+ rev_str += "-r %s " % rev
+
+ return self.do_diff(rev_str)
+
+ def do_diff(self, params):
+ # Diff returns "1" if differences were found.
+ # Add the view name and view type to the description
+ if options.description:
+ options.description = ("VIEW: " + self.viewinfo +
+ "VIEWTYPE: " + self.viewtype + "\n" + options.description)
+ else:
+ options.description = (self.viewinfo +
+ "VIEWTYPE: " + self.viewtype + "\n")
+
+ o = []
+ Feol = False
+ while len(params) > 0:
+ # Read both original and modified files.
+ onam = params.pop(0)
+ mnam = params.pop(0)
+ file_data = []
+ do_rem = False
+ # If the filename length is greater than 254 char for windows,
+ # we copied the file to a temp file
+ # because the open will not work for path greater than 254.
+ # This is valid for the original and
+ # modified files if the name size is > 254.
+ for filenam in (onam, mnam) :
+ if cpath.exists(filenam) and self.viewtype == "dynamic":
+ do_rem = False
+ fn = filenam
+ elif len(filenam) > 254 or self.viewtype == "snapshot":
+ fn = self.get_filename_hash(filenam)
+ fn = cpath.join(tempfile.gettempdir(), fn)
+ do_rem = True
+ fd = open(cpath.normpath(fn))
+ fdata = fd.readlines()
+ fd.close()
+ file_data.append(fdata)
+ # If the file was temp, it should be removed.
+ if do_rem:
+ os.remove(filenam)
+
+ modi = file_data.pop()
+ orig = file_data.pop()
+
+ # For snapshot views, the local directories must be removed because
+ # they will break the diff on the server. Just replacing
+ # everything before the view name (including the view name) for
+ # vobs do the work.
+ if (self.viewtype == "snapshot"
+ and (sys.platform.startswith('win')
+ or sys.platform.startswith('cygwin'))):
+ vinfo = self.viewinfo.rstrip("\r\n")
+ mnam = "c:\\\\vobs" + mnam[mnam.rfind(vinfo) + len(vinfo):]
+ onam = "c:\\\\vobs" + onam[onam.rfind(vinfo) + len(vinfo):]
+ # Call the diff lib to generate a diff.
+ # The dates are bogus, since they don't natter anyway.
+ # The only thing is that two spaces are needed to the server
+ # so it can identify the heades correctly.
+ diff = difflib.unified_diff(orig, modi, onam, mnam,
+ ' 2002-02-21 23:30:39.942229878 -0800',
+ ' 2002-02-21 23:30:50.442260588 -0800', lineterm=' \n')
+ # Transform the generator output into a string output
+ # Use a comprehension instead of a generator,
+ # so 2.3.x doesn't fail to interpret.
+ diffstr = ''.join([str(l) for l in diff])
+ # Workaround for the difflib no new line at end of file
+ # problem.
+ if not diffstr.endswith('\n'):
+ diffstr = diffstr + ("\n\\ No newline at end of file\n")
+ o.append(diffstr)
+
+ ostr = ''.join(o)
+ return (ostr, None) # diff, parent_diff (not supported)
+
+
+class SVNClient(SCMClient):
+ """
+ A wrapper around the svn Subversion tool that fetches repository
+ information and generates compatible diffs.
+ """
+ def get_repository_info(self):
+ if not check_install('svn help'):
+ return None
+
+ # Get the SVN repository path (either via a working copy or
+ # a supplied URI)
+ svn_info_params = ["svn", "info"]
+ if options.repository_url:
+ svn_info_params.append(options.repository_url)
+ data = execute(svn_info_params,
+ ignore_errors=True)
+ m = re.search(r'^Repository Root: (.+)$', data, re.M)
+ if not m:
+ return None
+
+ path = m.group(1)
+
+ m = re.search(r'^URL: (.+)$', data, re.M)
+ if not m:
+ return None
+
+ base_path = m.group(1)[len(path):] or "/"
+
+ m = re.search(r'^Repository UUID: (.+)$', data, re.M)
+ if not m:
+ return None
+
+ return SvnRepositoryInfo(path, base_path, m.group(1))
+
+ def scan_for_server(self, repository_info):
+ # Scan first for dot files, since it's faster and will cover the
+ # user's $HOME/.reviewboardrc
+ server_url = super(SVNClient, self).scan_for_server(repository_info)
+ if server_url:
+ return server_url
+
+ return self.scan_for_server_property(repository_info)
+
+ def scan_for_server_property(self, repository_info):
+ def get_url_prop(path):
+ url = execute(["svn", "propget", "reviewboard:url", path]).strip()
+ return url or None
+
+ for path in walk_parents(os.getcwd()):
+ if not os.path.exists(os.path.join(path, ".svn")):
+ break
+
+ prop = get_url_prop(path)
+ if prop:
+ return prop
+
+ return get_url_prop(repository_info.path)
+
+ def diff(self, files):
+ """
+ Performs a diff across all modified files in a Subversion repository.
+
+ SVN repositories do not support branches of branches in a way that
+ makes parent diffs possible, so we never return a parent diff
+ (the second value in the tuple).
+ """
+ return (self.do_diff(["svn", "diff", "--diff-cmd=diff"] + files),
+ None)
+
+ def diff_between_revisions(self, revision_range, args, repository_info):
+ """
+ Performs a diff between 2 revisions of a Subversion repository.
+ """
+ if options.repository_url:
+ revisions = revision_range.split(':')
+ if len(revisions) < 1:
+ return None
+ elif len(revisions) == 1:
+ revisions.append('HEAD')
+
+ # if a new path was supplied at the command line, set it
+ if len(args):
+ repository_info.set_base_path(args[0])
+
+ url = repository_info.path + repository_info.base_path
+
+ old_url = url + '@' + revisions[0]
+ new_url = url + '@' + revisions[1]
+
+ return self.do_diff(["svn", "diff", "--diff-cmd=diff", old_url,
+ new_url],
+ repository_info)
+ # Otherwise, perform the revision range diff using a working copy
+ else:
+ return self.do_diff(["svn", "diff", "--diff-cmd=diff", "-r",
+ revision_range],
+ repository_info)
+
+ def do_diff(self, cmd, repository_info=None):
+ """
+ Performs the actual diff operation, handling renames and converting
+ paths to absolute.
+ """
+ diff = execute(cmd, split_lines=True)
+ diff = self.handle_renames(diff)
+ diff = self.convert_to_absolute_paths(diff, repository_info)
+
+ return ''.join(diff)
+
+ def handle_renames(self, diff_content):
+ """
+ The output of svn diff is incorrect when the file in question came
+ into being via svn mv/cp. Although the patch for these files are
+ relative to its parent, the diff header doesn't reflect this.
+ This function fixes the relevant section headers of the patch to
+ portray this relationship.
+ """
+
+ # svn diff against a repository URL on two revisions appears to
+ # handle moved files properly, so only adjust the diff file names
+ # if they were created using a working copy.
+ if options.repository_url:
+ return diff_content
+
+ result = []
+
+ from_line = ""
+ for line in diff_content:
+ if line.startswith('--- '):
+ from_line = line
+ continue
+
+ # This is where we decide how mangle the previous '--- '
+ if line.startswith('+++ '):
+ to_file, _ = self.parse_filename_header(line[4:])
+ info = self.svn_info(to_file)
+ if info.has_key("Copied From URL"):
+ url = info["Copied From URL"]
+ root = info["Repository Root"]
+ from_file = urllib.unquote(url[len(root):])
+ result.append(from_line.replace(to_file, from_file))
+ else:
+ result.append(from_line) #as is, no copy performed
+
+ # We only mangle '---' lines. All others get added straight to
+ # the output.
+ result.append(line)
+
+ return result
+
+
+ def convert_to_absolute_paths(self, diff_content, repository_info):
+ """
+ Converts relative paths in a diff output to absolute paths.
+ This handles paths that have been svn switched to other parts of the
+ repository.
+ """
+
+ result = []
+
+ for line in diff_content:
+ front = None
+ if line.startswith('+++ ') or line.startswith('--- ') or line.startswith('Index: '):
+ front, line = line.split(" ", 1)
+
+ if front:
+ if line.startswith('/'): #already absolute
+ line = front + " " + line
+ else:
+ # filename and rest of line (usually the revision
+ # component)
+ file, rest = self.parse_filename_header(line)
+
+ # If working with a diff generated outside of a working
+ # copy, then file paths are already absolute, so just
+ # add initial slash.
+ if options.repository_url:
+ path = urllib.unquote(
+ "%s/%s" % (repository_info.base_path, file))
+ else:
+ info = self.svn_info(file)
+ url = info["URL"]
+ root = info["Repository Root"]
+ path = urllib.unquote(url[len(root):])
+
+ line = front + " " + path + rest
+
+ result.append(line)
+
+ return result
+
+ def svn_info(self, path):
+ """Return a dict which is the result of 'svn info' at a given path."""
+ svninfo = {}
+ for info in execute(["svn", "info", path],
+ split_lines=True):
+ parts = info.strip().split(": ", 1)
+ if len(parts) == 2:
+ key, value = parts
+ svninfo[key] = value
+
+ return svninfo
+
+ # Adapted from server code parser.py
+ def parse_filename_header(self, s):
+ parts = None
+ if "\t" in s:
+ # There's a \t separating the filename and info. This is the
+ # best case scenario, since it allows for filenames with spaces
+ # without much work.
+ parts = s.split("\t")
+
+ # There's spaces being used to separate the filename and info.
+ # This is technically wrong, so all we can do is assume that
+ # 1) the filename won't have multiple consecutive spaces, and
+ # 2) there's at least 2 spaces separating the filename and info.
+ if " " in s:
+ parts = re.split(r" +", s)
+
+ if parts:
+ parts[1] = '\t' + parts[1]
+ return parts
+
+ # strip off ending newline, and return it as the second component
+ return [s.split('\n')[0], '\n']
+
+
+class PerforceClient(SCMClient):
+ """
+ A wrapper around the p4 Perforce tool that fetches repository information
+ and generates compatible diffs.
+ """
+ def get_repository_info(self):
+ if not check_install('p4 help'):
+ return None
+
+ data = execute(["p4", "info"], ignore_errors=True)
+
+ m = re.search(r'^Server address: (.+)$', data, re.M)
+ if not m:
+ return None
+
+ repository_path = m.group(1).strip()
+
+ try:
+ hostname, port = repository_path.split(":")
+ info = socket.gethostbyaddr(hostname)
+ repository_path = "%s:%s" % (info[0], port)
+ except (socket.gaierror, socket.herror):
+ pass
+
+ return RepositoryInfo(path=repository_path, supports_changesets=True)
+
+ def scan_for_server(self, repository_info):
+ # Scan first for dot files, since it's faster and will cover the
+ # user's $HOME/.reviewboardrc
+ server_url = \
+ super(PerforceClient, self).scan_for_server(repository_info)
+
+ if server_url:
+ return server_url
+
+ return self.scan_for_server_counter(repository_info)
+
+ def scan_for_server_counter(self, repository_info):
+ """
+ Checks the Perforce counters to see if the Review Board server's url
+ is specified. Since Perforce only started supporting non-numeric
+ counter values in server version 2008.1, we support both a normal
+ counter 'reviewboard.url' with a string value and embedding the url in
+ a counter name like 'reviewboard.url.http:||reviewboard.example.com'.
+ Note that forward slashes aren't allowed in counter names, so
+ pipe ('|') characters should be used. These should be safe because they
+ should not be used unencoded in urls.
+ """
+
+ counters_text = execute(["p4", "counters"])
+
+ # Try for a "reviewboard.url" counter first.
+ m = re.search(r'^reviewboard.url = (\S+)', counters_text, re.M)
+
+ if m:
+ return m.group(1)
+
+ # Next try for a counter of the form:
+ # reviewboard_url.http:||reviewboard.example.com
+ m2 = re.search(r'^reviewboard.url\.(\S+)', counters_text, re.M)
+
+ if m2:
+ return m2.group(1).replace('|', '/')
+
+ return None
+
+ def get_changenum(self, args):
+ if len(args) == 1:
+ try:
+ return str(int(args[0]))
+ except ValueError:
+ pass
+ return None
+
+ def diff(self, args):
+ """
+ Goes through the hard work of generating a diff on Perforce in order
+ to take into account adds/deletes and to provide the necessary
+ revision information.
+ """
+ # set the P4 enviroment:
+ if options.p4_client:
+ os.environ['P4CLIENT'] = options.p4_client
+
+ if options.p4_port:
+ os.environ['P4PORT'] = options.p4_port
+
+ changenum = self.get_changenum(args)
+ if changenum is None:
+ return self._path_diff(args)
+ else:
+ return self._changenum_diff(changenum)
+
+
+ def _path_diff(self, args):
+ """
+ Process a path-style diff. See _changenum_diff for the alternate
+ version that handles specific change numbers.
+
+ Multiple paths may be specified in `args`. The path styles supported
+ are:
+
+ //path/to/file
+ Upload file as a "new" file.
+
+ //path/to/dir/...
+ Upload all files as "new" files.
+
+ //path/to/file[@#]rev
+ Upload file from that rev as a "new" file.
+
+ //path/to/file[@#]rev,[@#]rev
+ Upload a diff between revs.
+
+ //path/to/dir/...[@#]rev,[@#]rev
+ Upload a diff of all files between revs in that directory.
+ """
+ r_revision_range = re.compile(r'^(?P<path>//[^@#]+)' +
+ r'(?P<revision1>[#@][^,]+)?' +
+ r'(?P<revision2>,[#@][^,]+)?$')
+
+ empty_filename = make_tempfile()
+ tmp_diff_from_filename = make_tempfile()
+ tmp_diff_to_filename = make_tempfile()
+
+ diff_lines = []
+
+ for path in args:
+ m = r_revision_range.match(path)
+
+ if not m:
+ die('Path %r does not match a valid Perforce path.' % (path,))
+ revision1 = m.group('revision1')
+ revision2 = m.group('revision2')
+ first_rev_path = m.group('path')
+
+ if revision1:
+ first_rev_path += revision1
+ records = self._run_p4(['files', first_rev_path])
+
+ # Make a map for convenience.
+ files = {}
+
+ # Records are:
+ # 'rev': '1'
+ # 'func': '...'
+ # 'time': '1214418871'
+ # 'action': 'edit'
+ # 'type': 'ktext'
+ # 'depotFile': '...'
+ # 'change': '123456'
+ for record in records:
+ if record['action'] != 'delete':
+ if revision2:
+ files[record['depotFile']] = [record, None]
+ else:
+ files[record['depotFile']] = [None, record]
+
+ if revision2:
+ # [1:] to skip the comma.
+ second_rev_path = m.group('path') + revision2[1:]
+ records = self._run_p4(['files', second_rev_path])
+ for record in records:
+ if record['action'] != 'delete':
+ try:
+ m = files[record['depotFile']]
+ m[1] = record
+ except KeyError:
+ files[record['depotFile']] = [None, record]
+
+ old_file = new_file = empty_filename
+ changetype_short = None
+
+ for depot_path, (first_record, second_record) in files.items():
+ old_file = new_file = empty_filename
+ if first_record is None:
+ self._write_file(depot_path + '#' + second_record['rev'],
+ tmp_diff_to_filename)
+ new_file = tmp_diff_to_filename
+ changetype_short = 'A'
+ base_revision = 0
+ elif second_record is None:
+ self._write_file(depot_path + '#' + first_record['rev'],
+ tmp_diff_from_filename)
+ old_file = tmp_diff_from_filename
+ changetype_short = 'D'
+ base_revision = int(first_record['rev'])
+ else:
+ self._write_file(depot_path + '#' + first_record['rev'],
+ tmp_diff_from_filename)
+ self._write_file(depot_path + '#' + second_record['rev'],
+ tmp_diff_to_filename)
+ new_file = tmp_diff_to_filename
+ old_file = tmp_diff_from_filename
+ changetype_short = 'M'
+ base_revision = int(first_record['rev'])
+
+ dl = self._do_diff(old_file, new_file, depot_path,
+ base_revision, changetype_short,
+ ignore_unmodified=True)
+ diff_lines += dl
+
+ os.unlink(empty_filename)
+ os.unlink(tmp_diff_from_filename)
+ os.unlink(tmp_diff_to_filename)
+ return (''.join(diff_lines), None)
+
+ def _run_p4(self, command):
+ """Execute a perforce command using the python marshal API.
+
+ - command: A list of strings of the command to execute.
+
+ The return type depends on the command being run.
+ """
+ command = ['p4', '-G'] + command
+ p = subprocess.Popen(command, stdout=subprocess.PIPE)
+ result = []
+ has_error = False
+
+ while 1:
+ try:
+ data = marshal.load(p.stdout)
+ except EOFError:
+ break
+ else:
+ result.append(data)
+ if data.get('code', None) == 'error':
+ has_error = True
+
+ rc = p.wait()
+
+ if rc or has_error:
+ for record in result:
+ if 'data' in record:
+ print record['data']
+ die('Failed to execute command: %s\n' % (command,))
+
+ return result
+
+ def _changenum_diff(self, changenum):
+ """
+ Process a diff for a particular change number. This handles both
+ pending and submitted changelists.
+
+ See _path_diff for the alternate version that does diffs of depot
+ paths.
+ """
+ # TODO: It might be a good idea to enhance PerforceDiffParser to
+ # understand that newFile could include a revision tag for post-submit
+ # reviewing.
+ cl_is_pending = False
+
+ debug("Generating diff for changenum %s" % changenum)
+
+ description = execute(["p4", "describe", "-s", changenum],
+ split_lines=True)
+
+ if '*pending*' in description[0]:
+ cl_is_pending = True
+
+ # Get the file list
+ for line_num, line in enumerate(description):
+ if 'Affected files ...' in line:
+ break
+ else:
+ # Got to the end of all the description lines and didn't find
+ # what we were looking for.
+ die("Couldn't find any affected files for this change.")
+
+ description = description[line_num+2:]
+
+ diff_lines = []
+
+ empty_filename = make_tempfile()
+ tmp_diff_from_filename = make_tempfile()
+ tmp_diff_to_filename = make_tempfile()
+
+ for line in description:
+ line = line.strip()
+ if not line:
+ continue
+
+ m = re.search(r'\.\.\. ([^#]+)#(\d+) (add|edit|delete|integrate|branch)', line)
+ if not m:
+ die("Unsupported line from p4 opened: %s" % line)
+
+ depot_path = m.group(1)
+ base_revision = int(m.group(2))
+ if not cl_is_pending:
+ # If the changelist is pending our base revision is the one that's
+ # currently in the depot. If we're not pending the base revision is
+ # actually the revision prior to this one
+ base_revision -= 1
+
+ changetype = m.group(3)
+
+ debug('Processing %s of %s' % (changetype, depot_path))
+
+ old_file = new_file = empty_filename
+ old_depot_path = new_depot_path = None
+ changetype_short = None
+
+ if changetype == 'edit' or changetype == 'integrate':
+ # A big assumption
+ new_revision = base_revision + 1
+
+ # We have an old file, get p4 to take this old version from the
+ # depot and put it into a plain old temp file for us
+ old_depot_path = "%s#%s" % (depot_path, base_revision)
+ self._write_file(old_depot_path, tmp_diff_from_filename)
+ old_file = tmp_diff_from_filename
+
+ # Also print out the new file into a tmpfile
+ if cl_is_pending:
+ new_file = self._depot_to_local(depot_path)
+ else:
+ new_depot_path = "%s#%s" %(depot_path, new_revision)
+ self._write_file(new_depot_path, tmp_diff_to_filename)
+ new_file = tmp_diff_to_filename
+
+ changetype_short = "M"
+
+ elif changetype == 'add' or changetype == 'branch':
+ # We have a new file, get p4 to put this new file into a pretty
+ # temp file for us. No old file to worry about here.
+ if cl_is_pending:
+ new_file = self._depot_to_local(depot_path)
+ else:
+ self._write_file(depot_path, tmp_diff_to_filename)
+ new_file = tmp_diff_to_filename
+ changetype_short = "A"
+
+ elif changetype == 'delete':
+ # We've deleted a file, get p4 to put the deleted file into a temp
+ # file for us. The new file remains the empty file.
+ old_depot_path = "%s#%s" % (depot_path, base_revision)
+ self._write_file(old_depot_path, tmp_diff_from_filename)
+ old_file = tmp_diff_from_filename
+ changetype_short = "D"
+ else:
+ die("Unknown change type '%s' for %s" % (changetype, depot_path))
+
+ dl = self._do_diff(old_file, new_file, depot_path, base_revision, changetype_short)
+ diff_lines += dl
+
+ os.unlink(empty_filename)
+ os.unlink(tmp_diff_from_filename)
+ os.unlink(tmp_diff_to_filename)
+ return (''.join(diff_lines), None)
+
+ def _do_diff(self, old_file, new_file, depot_path, base_revision,
+ changetype_short, ignore_unmodified=False):
+ """
+ Do the work of producing a diff for Perforce.
+
+ old_file - The absolute path to the "old" file.
+ new_file - The absolute path to the "new" file.
+ depot_path - The depot path in Perforce for this file.
+ base_revision - The base perforce revision number of the old file as
+ an integer.
+ changetype_short - The change type as a single character string.
+ ignore_unmodified - If True, will return an empty list if the file
+ is not changed.
+
+ Returns a list of strings of diff lines.
+ """
+ if hasattr(os, 'uname') and os.uname()[0] == 'SunOS':
+ diff_cmd = ["gdiff", "-urNp", old_file, new_file]
+ else:
+ diff_cmd = ["diff", "-urNp", old_file, new_file]
+ # Diff returns "1" if differences were found.
+ dl = execute(diff_cmd, extra_ignore_errors=(1,2),
+ translate_newlines=False)
+
+ # If the input file has ^M characters at end of line, lets ignore them.
+ dl = dl.replace('\r\r\n', '\r\n')
+ dl = dl.splitlines(True)
+
+ cwd = os.getcwd()
+ if depot_path.startswith(cwd):
+ local_path = depot_path[len(cwd) + 1:]
+ else:
+ local_path = depot_path
+
+ # Special handling for the output of the diff tool on binary files:
+ # diff outputs "Files a and b differ"
+ # and the code below expects the output to start with
+ # "Binary files "
+ if len(dl) == 1 and \
+ dl[0] == ('Files %s and %s differ'% (old_file, new_file)):
+ dl = ['Binary files %s and %s differ'% (old_file, new_file)]
+
+ if dl == [] or dl[0].startswith("Binary files "):
+ if dl == []:
+ if ignore_unmodified:
+ return []
+ else:
+ print "Warning: %s in your changeset is unmodified" % \
+ local_path
+
+ dl.insert(0, "==== %s#%s ==%s== %s ====\n" % \
+ (depot_path, base_revision, changetype_short, local_path))
+ dl.append('\n')
+ else:
+ m = re.search(r'(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d)', dl[1])
+ if m:
+ timestamp = m.group(1)
+ else:
+ # Thu Sep 3 11:24:48 2007
+ m = re.search(r'(\w+)\s+(\w+)\s+(\d+)\s+(\d\d:\d\d:\d\d)\s+(\d\d\d\d)', dl[1])
+ if not m:
+ die("Unable to parse diff header: %s" % dl[1])
+
+ month_map = {
+ "Jan": "01",
+ "Feb": "02",
+ "Mar": "03",
+ "Apr": "04",
+ "May": "05",
+ "Jun": "06",
+ "Jul": "07",
+ "Aug": "08",
+ "Sep": "09",
+ "Oct": "10",
+ "Nov": "11",
+ "Dec": "12",
+ }
+ month = month_map[m.group(2)]
+ day = m.group(3)
+ timestamp = m.group(4)
+ year = m.group(5)
+
+ timestamp = "%s-%s-%s %s" % (year, month, day, timestamp)
+
+ dl[0] = "--- %s\t%s#%s\n" % (local_path, depot_path, base_revision)
+ dl[1] = "+++ %s\t%s\n" % (local_path, timestamp)
+
+ return dl
+
+ def _write_file(self, depot_path, tmpfile):
+ """
+ Grabs a file from Perforce and writes it to a temp file. p4 print sets
+ the file readonly and that causes a later call to unlink fail. So we
+ make the file read/write.
+ """
+ debug('Writing "%s" to "%s"' % (depot_path, tmpfile))
+ execute(["p4", "print", "-o", tmpfile, "-q", depot_path])
+ os.chmod(tmpfile, stat.S_IREAD | stat.S_IWRITE)
+
+ def _depot_to_local(self, depot_path):
+ """
+ Given a path in the depot return the path on the local filesystem to
+ the same file. If there are multiple results, take only the last
+ result from the where command.
+ """
+ where_output = self._run_p4(['where', depot_path])
+ return where_output[-1]['path']
+
+
+class MercurialClient(SCMClient):
+ """
+ A wrapper around the hg Mercurial tool that fetches repository
+ information and generates compatible diffs.
+ """
+ def get_repository_info(self):
+ if not check_install('hg --help'):
+ return None
+
+ data = execute(["hg", "root"], ignore_errors=True)
+ if data.startswith('abort:'):
+ # hg aborted => no mercurial repository here.
+ return None
+
+ # Elsewhere, hg root output give us the repository path.
+
+ # We save data here to use it as a fallback. See below
+ local_data = data.strip()
+
+ svn = execute(["hg", "svn", "info", ], ignore_errors=True)
+
+ if (not svn.startswith('abort:') and
+ not svn.startswith("hg: unknown command")):
+ self.type = 'svn'
+ m = re.search(r'^Repository Root: (.+)$', svn, re.M)
+
+ if not m:
+ return None
+
+ path = m.group(1)
+ m2 = re.match(r'^(svn\+ssh|http|https)://([-a-zA-Z0-9.]*@)(.*)$',
+ path)
+ if m2:
+ path = '%s://%s' % (m2.group(1), m2.group(3))
+
+ m = re.search(r'^URL: (.+)$', svn, re.M)
+
+ if not m:
+ return None
+
+ base_path = m.group(1)[len(path):] or "/"
+ return RepositoryInfo(path=path,
+ base_path=base_path,
+ supports_parent_diffs=True)
+
+ self.type = 'hg'
+
+ # We are going to search .hg/hgrc for the default path.
+ file_name = os.path.join(local_data,'.hg', 'hgrc')
+
+ if not os.path.exists(file_name):
+ return RepositoryInfo(path=local_data, base_path='/',
+ supports_parent_diffs=True)
+
+ f = open(file_name)
+ data = f.read()
+ f.close()
+
+ m = re.search(r'^default\s+=\s+(.+)$', data, re.M)
+
+ if not m:
+ # Return the local path, if no default value is found.
+ return RepositoryInfo(path=local_data, base_path='/',
+ supports_parent_diffs=True)
+
+ path = m.group(1).strip()
+
+ return RepositoryInfo(path=path, base_path='',
+ supports_parent_diffs=True)
+
+ def diff(self, files):
+ """
+ Performs a diff across all modified files in a Mercurial repository.
+ """
+ # We don't support parent diffs with Mercurial yet, so we always
+ # return None for the parent diff.
+ if self.type == 'svn':
+ parent = execute(['hg', 'parent', '--svn', '--template',
+ '{node}\n']).strip()
+
+ if options.parent_branch:
+ parent = options.parent_branch
+
+ if options.guess_summary and not options.summary:
+ options.summary = execute(['hg', 'log', '-r.', '--template',
+ r'{desc|firstline}\n'])
+
+ if options.guess_description and not options.description:
+ numrevs = len(execute(['hg', 'log', '-r.:%s' % parent,
+ '--follow', '--template',
+ r'{rev}\n']).strip().split('\n'))
+ options.description = execute(['hg', 'log', '-r.:%s' % parent,
+ '--follow', '--template',
+ r'{desc}\n\n', '--limit',
+ str(numrevs-1)]).strip()
+
+ return (execute(["hg", "diff", "--svn", '-r%s:.' % parent]), None)
+
+ return (execute(["hg", "diff"] + files), None)
+
+ def diff_between_revisions(self, revision_range, args, repository_info):
+ """
+ Performs a diff between 2 revisions of a Mercurial repository.
+ """
+ if self.type != 'hg':
+ raise NotImplementedError
+
+ r1, r2 = revision_range.split(':')
+ return execute(["hg", "diff", "-r", r1, "-r", r2])
+
+
+class GitClient(SCMClient):
+ """
+ A wrapper around git that fetches repository information and generates
+ compatible diffs. This will attempt to generate a diff suitable for the
+ remote repository, whether git, SVN or Perforce.
+ """
+ def get_repository_info(self):
+ if not check_install('git --help'):
+ return None
+
+ git_dir = execute(["git", "rev-parse", "--git-dir"],
+ ignore_errors=True).strip()
+
+ if git_dir.startswith("fatal:") or not os.path.isdir(git_dir):
+ return None
+
+ # post-review in directories other than the top level of
+ # of a work-tree would result in broken diffs on the server
+ os.chdir(os.path.dirname(os.path.abspath(git_dir)))
+
+ # We know we have something we can work with. Let's find out
+ # what it is. We'll try SVN first.
+ data = execute(["git", "svn", "info"], ignore_errors=True)
+
+ m = re.search(r'^Repository Root: (.+)$', data, re.M)
+ if m:
+ path = m.group(1)
+ m = re.search(r'^URL: (.+)$', data, re.M)
+
+ if m:
+ base_path = m.group(1)[len(path):] or "/"
+ m = re.search(r'^Repository UUID: (.+)$', data, re.M)
+
+ if m:
+ uuid = m.group(1)
+ self.type = "svn"
+
+ return SvnRepositoryInfo(path=path, base_path=base_path,
+ uuid=uuid,
+ supports_parent_diffs=True)
+ else:
+ # Versions of git-svn before 1.5.4 don't (appear to) support
+ # 'git svn info'. If we fail because of an older git install,
+ # here, figure out what version of git is installed and give
+ # the user a hint about what to do next.
+ version = execute(["git", "svn", "--version"], ignore_errors=True)
+ version_parts = re.search('version (\d+)\.(\d+)\.(\d+)',
+ version)
+ svn_remote = execute(["git", "config", "--get",
+ "svn-remote.svn.url"], ignore_errors=True)
+
+ if (version_parts and
+ not self.is_valid_version((int(version_parts.group(1)),
+ int(version_parts.group(2)),
+ int(version_parts.group(3))),
+ (1, 5, 4)) and
+ svn_remote):
+ die("Your installation of git-svn must be upgraded to " + \
+ "version 1.5.4 or later")
+
+ # Okay, maybe Perforce.
+ # TODO
+
+ # Nope, it's git then.
+ origin = execute(["git", "remote", "show", "origin"])
+ m = re.search(r'URL: (.+)', origin)
+ if m:
+ url = m.group(1).rstrip('/')
+ if url:
+ self.type = "git"
+ return RepositoryInfo(path=url, base_path='',
+ supports_parent_diffs=True)
+
+ return None
+
+ def is_valid_version(self, actual, expected):
+ """
+ Takes two tuples, both in the form:
+ (major_version, minor_version, micro_version)
+ Returns true if the actual version is greater than or equal to
+ the expected version, and false otherwise.
+ """
+ return (actual[0] > expected[0]) or \
+ (actual[0] == expected[0] and actual[1] > expected[1]) or \
+ (actual[0] == expected[0] and actual[1] == expected[1] and \
+ actual[2] >= expected[2])
+
+ def scan_for_server(self, repository_info):
+ # Scan first for dot files, since it's faster and will cover the
+ # user's $HOME/.reviewboardrc
+ server_url = super(GitClient, self).scan_for_server(repository_info)
+
+ if server_url:
+ return server_url
+
+ # TODO: Maybe support a server per remote later? Is that useful?
+ url = execute(["git", "config", "--get", "reviewboard.url"],
+ ignore_errors=True).strip()
+ if url:
+ return url
+
+ if self.type == "svn":
+ # Try using the reviewboard:url property on the SVN repo, if it
+ # exists.
+ prop = SVNClient().scan_for_server_property(repository_info)
+
+ if prop:
+ return prop
+
+ return None
+
+ def diff(self, args):
+ """
+ Performs a diff across all modified files in the branch, taking into
+ account a parent branch.
+ """
+ parent_branch = options.parent_branch or "master"
+
+ diff_lines = self.make_diff(parent_branch)
+
+ if parent_branch != "master":
+ parent_diff_lines = self.make_diff("master", parent_branch)
+ else:
+ parent_diff_lines = None
+
+ if options.guess_summary and not options.summary:
+ options.summary = execute(["git", "log", "--pretty=format:%s",
+ "HEAD^.."], ignore_errors=True).strip()
+
+ if options.guess_description and not options.description:
+ options.description = execute(
+ ["git", "log", "--pretty=format:%s%n%n%b", parent_branch + ".."],
+ ignore_errors=True).strip()
+
+ return (diff_lines, parent_diff_lines)
+
+ def make_diff(self, parent_branch, source_branch=""):
+ """
+ Performs a diff on a particular branch range.
+ """
+ if self.type == "svn":
+ diff_lines = execute(["git", "diff", "--no-color", "--no-prefix",
+ "-r", "-u", "%s..%s" % (parent_branch,
+ source_branch)],
+ split_lines=True)
+ return self.make_svn_diff(parent_branch, diff_lines)
+ elif self.type == "git":
+ return execute(["git", "diff", "--no-color", "--full-index",
+ parent_branch])
+
+ return None
+
+ def make_svn_diff(self, parent_branch, diff_lines):
+ """
+ Formats the output of git diff such that it's in a form that
+ svn diff would generate. This is needed so the SVNTool in Review
+ Board can properly parse this diff.
+ """
+ rev = execute(["git", "svn", "find-rev", "master"]).strip()
+
+ if not rev:
+ return None
+
+ diff_data = ""
+ filename = ""
+ revision = ""
+ newfile = False
+
+ for line in diff_lines:
+ if line.startswith("diff "):
+ # Grab the filename and then filter this out.
+ # This will be in the format of:
+ #
+ # diff --git a/path/to/file b/path/to/file
+ info = line.split(" ")
+ diff_data += "Index: %s\n" % info[2]
+ diff_data += "=" * 67
+ diff_data += "\n"
+ elif line.startswith("index "):
+ # Filter this out.
+ pass
+ elif line.strip() == "--- /dev/null":
+ # New file
+ newfile = True
+ elif line.startswith("--- "):
+ newfile = False
+ diff_data += "--- %s\t(revision %s)\n" % \
+ (line[4:].strip(), rev)
+ elif line.startswith("+++ "):
+ filename = line[4:].strip()
+ if newfile:
+ diff_data += "--- %s\t(revision 0)\n" % filename
+ diff_data += "+++ %s\t(revision 0)\n" % filename
+ else:
+ # We already printed the "--- " line.
+ diff_data += "+++ %s\t(working copy)\n" % filename
+ else:
+ diff_data += line
+
+ return diff_data
+
+ def diff_between_revisions(self, revision_range, args, repository_info):
+ pass
+
+
+SCMCLIENTS = (
+ SVNClient(),
+ CVSClient(),
+ GitClient(),
+ MercurialClient(),
+ PerforceClient(),
+ ClearCaseClient(),
+)
+
+def debug(s):
+ """
+ Prints debugging information if post-review was run with --debug
+ """
+ if DEBUG or options and options.debug:
+ print ">>> %s" % s
+
+
+def make_tempfile():
+ """
+ Creates a temporary file and returns the path. The path is stored
+ in an array for later cleanup.
+ """
+ fd, tmpfile = mkstemp()
+ os.close(fd)
+ tempfiles.append(tmpfile)
+ return tmpfile
+
+
+def check_install(command):
+ """
+ Try executing an external command and return a boolean indicating whether
+ that command is installed or not. The 'command' argument should be
+ something that executes quickly, without hitting the network (for
+ instance, 'svn help' or 'git --version').
+ """
+ try:
+ p = subprocess.Popen(command.split(' '),
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ return True
+ except OSError:
+ return False
+
+
+def execute(command, env=None, split_lines=False, ignore_errors=False,
+ extra_ignore_errors=(), translate_newlines=True):
+ """
+ Utility function to execute a command and return the output.
+ """
+ if isinstance(command, list):
+ debug(subprocess.list2cmdline(command))
+ else:
+ debug(command)
+
+ if env:
+ env.update(os.environ)
+ else:
+ env = os.environ.copy()
+
+ env['LC_ALL'] = 'en_US.UTF-8'
+ env['LANGUAGE'] = 'en_US.UTF-8'
+
+ if sys.platform.startswith('win'):
+ p = subprocess.Popen(command,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ shell=False,
+ universal_newlines=translate_newlines,
+ env=env)
+ else:
+ p = subprocess.Popen(command,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ shell=False,
+ close_fds=True,
+ universal_newlines=translate_newlines,
+ env=env)
+ if split_lines:
+ data = p.stdout.readlines()
+ else:
+ data = p.stdout.read()
+ rc = p.wait()
+ if rc and not ignore_errors and rc not in extra_ignore_errors:
+ die('Failed to execute command: %s\n%s' % (command, data))
+
+ return data
+
+
+def die(msg=None):
+ """
+ Cleanly exits the program with an error message. Erases all remaining
+ temporary files.
+ """
+ for tmpfile in tempfiles:
+ try:
+ os.unlink(tmpfile)
+ except:
+ pass
+
+ if msg:
+ print msg
+
+ sys.exit(1)
+
+
+def walk_parents(path):
+ """
+ Walks up the tree to the root directory.
+ """
+ while os.path.splitdrive(path)[1] != os.sep:
+ yield path
+ path = os.path.dirname(path)
+
+
+def load_config_file(filename):
+ """
+ Loads data from a config file.
+ """
+ config = {
+ 'TREES': {},
+ }
+
+ if os.path.exists(filename):
+ try:
+ execfile(filename, config)
+ except:
+ pass
+
+ return config
+
+
+def tempt_fate(server, tool, changenum, diff_content=None,
+ parent_diff_content=None, submit_as=None, retries=3):
+ """
+ Attempts to create a review request on a Review Board server and upload
+ a diff. On success, the review request path is displayed.
+ """
+ try:
+ save_draft = False
+
+ if options.rid:
+ review_request = server.get_review_request(options.rid)
+ else:
+ review_request = server.new_review_request(changenum, submit_as)
+
+ if options.target_groups:
+ server.set_review_request_field(review_request, 'target_groups',
+ options.target_groups)
+ save_draft = True
+
+ if options.target_people:
+ server.set_review_request_field(review_request, 'target_people',
+ options.target_people)
+ save_draft = True
+
+ if options.summary:
+ server.set_review_request_field(review_request, 'summary',
+ options.summary)
+ save_draft = True
+
+ if options.branch:
+ server.set_review_request_field(review_request, 'branch',
+ options.branch)
+ save_draft = True
+
+ if options.bugs_closed:
+ server.set_review_request_field(review_request, 'bugs_closed',
+ options.bugs_closed)
+ save_draft = True
+
+ if options.description:
+ server.set_review_request_field(review_request, 'description',
+ options.description)
+ save_draft = True
+
+ if options.testing_done:
+ server.set_review_request_field(review_request, 'testing_done',
+ options.testing_done)
+ save_draft = True
+
+ if save_draft:
+ server.save_draft(review_request)
+ except APIError, e:
+ rsp, = e.args
+ if rsp['err']['code'] == 103: # Not logged in
+ retries = retries - 1
+
+ # We had an odd issue where the server ended up a couple of
+ # years in the future. Login succeeds but the cookie date was
+ # "odd" so use of the cookie appeared to fail and eventually
+ # ended up at max recursion depth :-(. Check for a maximum
+ # number of retries.
+ if retries >= 0:
+ server.login(force=True)
+ tempt_fate(server, tool, changenum, diff_content,
+ parent_diff_content, submit_as, retries=retries)
+ return
+
+ if options.rid:
+ die("Error getting review request %s: %s (code %s)" % \
+ (options.rid, rsp['err']['msg'], rsp['err']['code']))
+ else:
+ die("Error creating review request: %s (code %s)" % \
+ (rsp['err']['msg'], rsp['err']['code']))
+
+
+ if not server.info.supports_changesets or not options.change_only:
+ try:
+ server.upload_diff(review_request, diff_content,
+ parent_diff_content)
+ except APIError, e:
+ rsp, = e.args
+ print "Error uploading diff: %s (%s)" % (rsp['err']['msg'],
+ rsp['err']['code'])
+ debug(rsp)
+ die("Your review request still exists, but the diff is not " +
+ "attached.")
+
+ if options.publish:
+ server.publish(review_request)
+
+ request_url = 'r/' + str(review_request['id'])
+ review_url = urljoin(server.url, request_url)
+
+ if not review_url.startswith('http'):
+ review_url = 'http://%s' % review_url
+
+ print "Review request #%s posted." % (review_request['id'],)
+ print
+ print review_url
+
+ return review_url
+
+
+def parse_options(args):
+ parser = OptionParser(usage="%prog [-pond] [-r review_id] [changenum]",
+ version="%prog " + VERSION)
+
+ parser.add_option("-p", "--publish",
+ dest="publish", action="store_true", default=PUBLISH,
+ help="publish the review request immediately after "
+ "submitting")
+ parser.add_option("-r", "--review-request-id",
+ dest="rid", metavar="ID", default=None,
+ help="existing review request ID to update")
+ parser.add_option("-o", "--open",
+ dest="open_browser", action="store_true",
+ default=OPEN_BROWSER,
+ help="open a web browser to the review request page")
+ parser.add_option("-n", "--output-diff",
+ dest="output_diff_only", action="store_true",
+ default=False,
+ help="outputs a diff to the console and exits. "
+ "Does not post")
+ parser.add_option("--server",
+ dest="server", default=REVIEWBOARD_URL,
+ metavar="SERVER",
+ help="specify a different Review Board server "
+ "to use")
+ parser.add_option("--diff-only",
+ dest="diff_only", action="store_true", default=False,
+ help="uploads a new diff, but does not update "
+ "info from changelist")
+ parser.add_option("--target-groups",
+ dest="target_groups", default=TARGET_GROUPS,
+ help="names of the groups who will perform "
+ "the review")
+ parser.add_option("--target-people",
+ dest="target_people", default=TARGET_PEOPLE,
+ help="names of the people who will perform "
+ "the review")
+ parser.add_option("--summary",
+ dest="summary", default=None,
+ help="summary of the review ")
+ parser.add_option("--description",
+ dest="description", default=None,
+ help="description of the review ")
+ parser.add_option("--description-file",
+ dest="description_file", default=None,
+ help="text file containing a description of the review")
+ parser.add_option("--guess-summary",
+ dest="guess_summary", action="store_true",
+ default=False,
+ help="guess summary from the latest commit (git/"
+ "hgsubversion only)")
+ parser.add_option("--guess-description",
+ dest="guess_description", action="store_true",
+ default=False,
+ help="guess description based on commits on this branch "
+ "(git/hgsubversion only)")
+ parser.add_option("--testing-done",
+ dest="testing_done", default=None,
+ help="details of testing done ")
+ parser.add_option("--testing-done-file",
+ dest="testing_file", default=None,
+ help="text file containing details of testing done ")
+ parser.add_option("--branch",
+ dest="branch", default=None,
+ help="affected branch ")
+ parser.add_option("--bugs-closed",
+ dest="bugs_closed", default=None,
+ help="list of bugs closed ")
+ parser.add_option("--revision-range",
+ dest="revision_range", default=None,
+ help="generate the diff for review based on given "
+ "revision range")
+ parser.add_option("--label",
+ dest="label", default=None,
+ help="label (ClearCase Only) ")
+ parser.add_option("--submit-as",
+ dest="submit_as", default=SUBMIT_AS, metavar="USERNAME",
+ help="user name to be recorded as the author of the "
+ "review request, instead of the logged in user")
+ parser.add_option("--username",
+ dest="username", default=None, metavar="USERNAME",
+ help="user name to be supplied to the reviewboard server")
+ parser.add_option("--password",
+ dest="password", default=None, metavar="PASSWORD",
+ help="password to be supplied to the reviewboard server")
+ parser.add_option("--change-only",
+ dest="change_only", action="store_true",
+ default=False,
+ help="updates info from changelist, but does "
+ "not upload a new diff (only available if your "
+ "repository supports changesets)")
+ parser.add_option("--parent",
+ dest="parent_branch", default=None,
+ metavar="PARENT_BRANCH",
+ help="the parent branch this diff should be against "
+ "(only available if your repository supports "
+ "parent diffs)")
+ parser.add_option("--p4-client",
+ dest="p4_client", default=None,
+ help="the Perforce client name that the review is in")
+ parser.add_option("--p4-port",
+ dest="p4_port", default=None,
+ help="the Perforce servers IP address that the review is on")
+ parser.add_option("--repository-url",
+ dest="repository_url", default=None,
+ help="the url for a repository for creating a diff "
+ "outside of a working copy (currently only supported "
+ "by Subversion). Requires --revision-range")
+ parser.add_option("-d", "--debug",
+ action="store_true", dest="debug", default=DEBUG,
+ help="display debug output")
+
+ (globals()["options"], args) = parser.parse_args(args)
+
+ if options.description and options.description_file:
+ sys.stderr.write("The --description and --description-file options "
+ "are mutually exclusive.\n")
+ sys.exit(1)
+
+ if options.description_file:
+ if os.path.exists(options.description_file):
+ fp = open(options.description_file, "r")
+ options.description = fp.read()
+ fp.close()
+ else:
+ sys.stderr.write("The description file %s does not exist.\n" %
+ options.description_file)
+ sys.exit(1)
+
+ if options.testing_done and options.testing_file:
+ sys.stderr.write("The --testing-done and --testing-done-file options "
+ "are mutually exclusive.\n")
+ sys.exit(1)
+
+ if options.testing_file:
+ if os.path.exists(options.testing_file):
+ fp = open(options.testing_file, "r")
+ options.testing_done = fp.read()
+ fp.close()
+ else:
+ sys.stderr.write("The testing file %s does not exist.\n" %
+ options.testing_file)
+ sys.exit(1)
+
+ if options.repository_url and not options.revision_range:
+ sys.stderr.write("The --repository-url option requires the "
+ "--revision-range option.\n")
+ sys.exit(1)
+
+ return args
+
+def determine_client():
+
+ repository_info = None
+ tool = None
+
+ # Try to find the SCM Client we're going to be working with.
+ for tool in SCMCLIENTS:
+ repository_info = tool.get_repository_info()
+
+ if repository_info:
+ break
+
+ if not repository_info:
+ if options.repository_url:
+ print "No supported repository could be access at the supplied url."
+ else:
+ print "The current directory does not contain a checkout from a"
+ print "supported source code repository."
+ sys.exit(1)
+
+ # Verify that options specific to an SCM Client have not been mis-used.
+ if options.change_only and not repository_info.supports_changesets:
+ sys.stderr.write("The --change-only option is not valid for the "
+ "current SCM client.\n")
+ sys.exit(1)
+
+ if options.parent_branch and not repository_info.supports_parent_diffs:
+ sys.stderr.write("The --parent option is not valid for the "
+ "current SCM client.\n")
+ sys.exit(1)
+
+ if ((options.p4_client or options.p4_port) and \
+ not isinstance(tool, PerforceClient)):
+ sys.stderr.write("The --p4-client and --p4-port options are not valid "
+ "for the current SCM client.\n")
+ sys.exit(1)
+
+ return (repository_info, tool)
+
+def main():
+ if 'USERPROFILE' in os.environ:
+ homepath = os.path.join(os.environ["USERPROFILE"], "Local Settings",
+ "Application Data")
+ elif 'HOME' in os.environ:
+ homepath = os.environ["HOME"]
+ else:
+ homepath = ''
+
+ # Load the config and cookie files
+ globals()['user_config'] = \
+ load_config_file(os.path.join(homepath, ".reviewboardrc"))
+ cookie_file = os.path.join(homepath, ".post-review-cookies.txt")
+
+ args = parse_options(sys.argv[1:])
+
+ repository_info, tool = determine_client()
+
+ # Try to find a valid Review Board server to use.
+ if options.server:
+ server_url = options.server
+ else:
+ server_url = tool.scan_for_server(repository_info)
+
+ if not server_url:
+ print "Unable to find a Review Board server for this source code tree."
+ sys.exit(1)
+
+ server = ReviewBoardServer(server_url, repository_info, cookie_file)
+
+ if repository_info.supports_changesets:
+ changenum = tool.get_changenum(args)
+ else:
+ changenum = None
+
+ if options.revision_range:
+ diff = tool.diff_between_revisions(options.revision_range, args,
+ repository_info)
+ parent_diff = None
+ elif options.label and isinstance(tool, ClearCaseClient):
+ diff, parent_diff = tool.diff_label(options.label)
+ else:
+ diff, parent_diff = tool.diff(args)
+
+ if options.output_diff_only:
+ print diff
+ sys.exit(0)
+
+ # Let's begin.
+ server.login()
+
+ review_url = tempt_fate(server, tool, changenum, diff_content=diff,
+ parent_diff_content=parent_diff,
+ submit_as=options.submit_as)
+
+ # Load the review up in the browser if requested to:
+ if options.open_browser:
+ try:
+ import webbrowser
+ if 'open_new_tab' in dir(webbrowser):
+ # open_new_tab is only in python 2.5+
+ webbrowser.open_new_tab(review_url)
+ elif 'open_new' in dir(webbrowser):
+ webbrowser.open_new(review_url)
+ else:
+ os.system( 'start %s' % review_url )
+ except:
+ print 'Error opening review URL: %s' % review_url
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/review b/test/review
new file mode 100755
index 0000000000..e1ccb9c0af
--- /dev/null
+++ b/test/review
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+if [ -z $1 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ] || [ "$1" = "-help" ] || [ "$1" = "-?" ]; then
+ echo "Usage: `basename $0` [rev] [args]\n"
+ echo " [rev] : either the revision number without leading 'r' (post-commit),"
+ echo " or '-loc' to create a review from current local changes (pre-commit)\n"
+ echo " [args] : optional arguments:"
+ echo " -r ID existing review request ID to update\n"
+ exit 1
+fi
+
+POSTREVIEW=`dirname $0`/postreview.py
+
+if [ "$1" = "-loc" ]; then
+ echo "creating review request from local changes..."
+ REVARG=""
+ LOG=""
+ SUMMARY="local changes"
+ REPO=""
+else
+ REV=$1
+ PREV=`expr $REV - 1`
+ if [ $? -ne 0 ]; then
+ echo "argument revision not a number: $REV"
+ exit 1
+ fi
+
+ echo "creating review request for changeset $REV..."
+
+ LOG="`svn log http://lampsvn.epfl.ch/svn-repos/scala -c $REV`"
+ if [ $? -ne 0 ]; then
+ echo "could not get svn log for revision $REV"
+ exit 1
+ fi
+
+ REVARG="--revision-range=$PREV:$REV"
+ SUMMARY="r$REV"
+ REPO="--repository-url=http://lampsvn.epfl.ch/svn-repos/scala"
+fi
+
+
+shift # remove parameter $1 (revision)
+
+python $POSTREVIEW --server="https://chara2.epfl.ch" $REVARG --summary="$SUMMARY" --description="$LOG" $REPO -o $@
diff --git a/test/simplejson/__init__.py b/test/simplejson/__init__.py
new file mode 100644
index 0000000000..d5b4d39913
--- /dev/null
+++ b/test/simplejson/__init__.py
@@ -0,0 +1,318 @@
+r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
+JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
+interchange format.
+
+:mod:`simplejson` exposes an API familiar to users of the standard library
+:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
+version of the :mod:`json` library contained in Python 2.6, but maintains
+compatibility with Python 2.4 and Python 2.5 and (currently) has
+significant performance advantages, even without using the optional C
+extension for speedups.
+
+Encoding basic Python object hierarchies::
+
+ >>> import simplejson as json
+ >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
+ '["foo", {"bar": ["baz", null, 1.0, 2]}]'
+ >>> print json.dumps("\"foo\bar")
+ "\"foo\bar"
+ >>> print json.dumps(u'\u1234')
+ "\u1234"
+ >>> print json.dumps('\\')
+ "\\"
+ >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
+ {"a": 0, "b": 0, "c": 0}
+ >>> from StringIO import StringIO
+ >>> io = StringIO()
+ >>> json.dump(['streaming API'], io)
+ >>> io.getvalue()
+ '["streaming API"]'
+
+Compact encoding::
+
+ >>> import simplejson as json
+ >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
+ '[1,2,3,{"4":5,"6":7}]'
+
+Pretty printing::
+
+ >>> import simplejson as json
+ >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
+ >>> print '\n'.join([l.rstrip() for l in s.splitlines()])
+ {
+ "4": 5,
+ "6": 7
+ }
+
+Decoding JSON::
+
+ >>> import simplejson as json
+ >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
+ >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
+ True
+ >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
+ True
+ >>> from StringIO import StringIO
+ >>> io = StringIO('["streaming API"]')
+ >>> json.load(io)[0] == 'streaming API'
+ True
+
+Specializing JSON object decoding::
+
+ >>> import simplejson as json
+ >>> def as_complex(dct):
+ ... if '__complex__' in dct:
+ ... return complex(dct['real'], dct['imag'])
+ ... return dct
+ ...
+ >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
+ ... object_hook=as_complex)
+ (1+2j)
+ >>> import decimal
+ >>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
+ True
+
+Specializing JSON object encoding::
+
+ >>> import simplejson as json
+ >>> def encode_complex(obj):
+ ... if isinstance(obj, complex):
+ ... return [obj.real, obj.imag]
+ ... raise TypeError(repr(o) + " is not JSON serializable")
+ ...
+ >>> json.dumps(2 + 1j, default=encode_complex)
+ '[2.0, 1.0]'
+ >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
+ '[2.0, 1.0]'
+ >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
+ '[2.0, 1.0]'
+
+
+Using simplejson.tool from the shell to validate and pretty-print::
+
+ $ echo '{"json":"obj"}' | python -m simplejson.tool
+ {
+ "json": "obj"
+ }
+ $ echo '{ 1.2:3.4}' | python -m simplejson.tool
+ Expecting property name: line 1 column 2 (char 2)
+"""
+__version__ = '2.0.9'
+__all__ = [
+ 'dump', 'dumps', 'load', 'loads',
+ 'JSONDecoder', 'JSONEncoder',
+]
+
+__author__ = 'Bob Ippolito <bob@redivi.com>'
+
+from decoder import JSONDecoder
+from encoder import JSONEncoder
+
+_default_encoder = JSONEncoder(
+ skipkeys=False,
+ ensure_ascii=True,
+ check_circular=True,
+ allow_nan=True,
+ indent=None,
+ separators=None,
+ encoding='utf-8',
+ default=None,
+)
+
+def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
+ allow_nan=True, cls=None, indent=None, separators=None,
+ encoding='utf-8', default=None, **kw):
+ """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
+ ``.write()``-supporting file-like object).
+
+ If ``skipkeys`` is true then ``dict`` keys that are not basic types
+ (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+ will be skipped instead of raising a ``TypeError``.
+
+ If ``ensure_ascii`` is false, then the some chunks written to ``fp``
+ may be ``unicode`` instances, subject to normal Python ``str`` to
+ ``unicode`` coercion rules. Unless ``fp.write()`` explicitly
+ understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
+ to cause an error.
+
+ If ``check_circular`` is false, then the circular reference check
+ for container types will be skipped and a circular reference will
+ result in an ``OverflowError`` (or worse).
+
+ If ``allow_nan`` is false, then it will be a ``ValueError`` to
+ serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
+ in strict compliance of the JSON specification, instead of using the
+ JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+ If ``indent`` is a non-negative integer, then JSON array elements and object
+ members will be pretty-printed with that indent level. An indent level
+ of 0 will only insert newlines. ``None`` is the most compact representation.
+
+ If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+ then it will be used instead of the default ``(', ', ': ')`` separators.
+ ``(',', ':')`` is the most compact JSON representation.
+
+ ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+ ``default(obj)`` is a function that should return a serializable version
+ of obj or raise TypeError. The default simply raises TypeError.
+
+ To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+ ``.default()`` method to serialize additional types), specify it with
+ the ``cls`` kwarg.
+
+ """
+ # cached encoder
+ if (not skipkeys and ensure_ascii and
+ check_circular and allow_nan and
+ cls is None and indent is None and separators is None and
+ encoding == 'utf-8' and default is None and not kw):
+ iterable = _default_encoder.iterencode(obj)
+ else:
+ if cls is None:
+ cls = JSONEncoder
+ iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+ check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+ separators=separators, encoding=encoding,
+ default=default, **kw).iterencode(obj)
+ # could accelerate with writelines in some versions of Python, at
+ # a debuggability cost
+ for chunk in iterable:
+ fp.write(chunk)
+
+
+def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
+ allow_nan=True, cls=None, indent=None, separators=None,
+ encoding='utf-8', default=None, **kw):
+ """Serialize ``obj`` to a JSON formatted ``str``.
+
+ If ``skipkeys`` is false then ``dict`` keys that are not basic types
+ (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+ will be skipped instead of raising a ``TypeError``.
+
+ If ``ensure_ascii`` is false, then the return value will be a
+ ``unicode`` instance subject to normal Python ``str`` to ``unicode``
+ coercion rules instead of being escaped to an ASCII ``str``.
+
+ If ``check_circular`` is false, then the circular reference check
+ for container types will be skipped and a circular reference will
+ result in an ``OverflowError`` (or worse).
+
+ If ``allow_nan`` is false, then it will be a ``ValueError`` to
+ serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
+ strict compliance of the JSON specification, instead of using the
+ JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+ If ``indent`` is a non-negative integer, then JSON array elements and
+ object members will be pretty-printed with that indent level. An indent
+ level of 0 will only insert newlines. ``None`` is the most compact
+ representation.
+
+ If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+ then it will be used instead of the default ``(', ', ': ')`` separators.
+ ``(',', ':')`` is the most compact JSON representation.
+
+ ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+ ``default(obj)`` is a function that should return a serializable version
+ of obj or raise TypeError. The default simply raises TypeError.
+
+ To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+ ``.default()`` method to serialize additional types), specify it with
+ the ``cls`` kwarg.
+
+ """
+ # cached encoder
+ if (not skipkeys and ensure_ascii and
+ check_circular and allow_nan and
+ cls is None and indent is None and separators is None and
+ encoding == 'utf-8' and default is None and not kw):
+ return _default_encoder.encode(obj)
+ if cls is None:
+ cls = JSONEncoder
+ return cls(
+ skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+ check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+ separators=separators, encoding=encoding, default=default,
+ **kw).encode(obj)
+
+
+_default_decoder = JSONDecoder(encoding=None, object_hook=None)
+
+
+def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, **kw):
+ """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
+ a JSON document) to a Python object.
+
+ If the contents of ``fp`` is encoded with an ASCII based encoding other
+ than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
+ be specified. Encodings that are not ASCII based (such as UCS-2) are
+ not allowed, and should be wrapped with
+ ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
+ object and passed to ``loads()``
+
+ ``object_hook`` is an optional function that will be called with the
+ result of any object literal decode (a ``dict``). The return value of
+ ``object_hook`` will be used instead of the ``dict``. This feature
+ can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+ To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+ kwarg.
+
+ """
+ return loads(fp.read(),
+ encoding=encoding, cls=cls, object_hook=object_hook,
+ parse_float=parse_float, parse_int=parse_int,
+ parse_constant=parse_constant, **kw)
+
+
+def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, **kw):
+ """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
+ document) to a Python object.
+
+ If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
+ other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
+ must be specified. Encodings that are not ASCII based (such as UCS-2)
+ are not allowed and should be decoded to ``unicode`` first.
+
+ ``object_hook`` is an optional function that will be called with the
+ result of any object literal decode (a ``dict``). The return value of
+ ``object_hook`` will be used instead of the ``dict``. This feature
+ can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+ ``parse_float``, if specified, will be called with the string
+ of every JSON float to be decoded. By default this is equivalent to
+ float(num_str). This can be used to use another datatype or parser
+ for JSON floats (e.g. decimal.Decimal).
+
+ ``parse_int``, if specified, will be called with the string
+ of every JSON int to be decoded. By default this is equivalent to
+ int(num_str). This can be used to use another datatype or parser
+ for JSON integers (e.g. float).
+
+ ``parse_constant``, if specified, will be called with one of the
+ following strings: -Infinity, Infinity, NaN, null, true, false.
+ This can be used to raise an exception if invalid JSON numbers
+ are encountered.
+
+ To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+ kwarg.
+
+ """
+ if (cls is None and encoding is None and object_hook is None and
+ parse_int is None and parse_float is None and
+ parse_constant is None and not kw):
+ return _default_decoder.decode(s)
+ if cls is None:
+ cls = JSONDecoder
+ if object_hook is not None:
+ kw['object_hook'] = object_hook
+ if parse_float is not None:
+ kw['parse_float'] = parse_float
+ if parse_int is not None:
+ kw['parse_int'] = parse_int
+ if parse_constant is not None:
+ kw['parse_constant'] = parse_constant
+ return cls(encoding=encoding, **kw).decode(s)
diff --git a/test/simplejson/decoder.py b/test/simplejson/decoder.py
new file mode 100644
index 0000000000..b769ea486c
--- /dev/null
+++ b/test/simplejson/decoder.py
@@ -0,0 +1,354 @@
+"""Implementation of JSONDecoder
+"""
+import re
+import sys
+import struct
+
+from simplejson.scanner import make_scanner
+try:
+ from simplejson._speedups import scanstring as c_scanstring
+except ImportError:
+ c_scanstring = None
+
+__all__ = ['JSONDecoder']
+
+FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
+
+def _floatconstants():
+ _BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
+ if sys.byteorder != 'big':
+ _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
+ nan, inf = struct.unpack('dd', _BYTES)
+ return nan, inf, -inf
+
+NaN, PosInf, NegInf = _floatconstants()
+
+
+def linecol(doc, pos):
+ lineno = doc.count('\n', 0, pos) + 1
+ if lineno == 1:
+ colno = pos
+ else:
+ colno = pos - doc.rindex('\n', 0, pos)
+ return lineno, colno
+
+
+def errmsg(msg, doc, pos, end=None):
+ # Note that this function is called from _speedups
+ lineno, colno = linecol(doc, pos)
+ if end is None:
+ #fmt = '{0}: line {1} column {2} (char {3})'
+ #return fmt.format(msg, lineno, colno, pos)
+ fmt = '%s: line %d column %d (char %d)'
+ return fmt % (msg, lineno, colno, pos)
+ endlineno, endcolno = linecol(doc, end)
+ #fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
+ #return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
+ fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
+ return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
+
+
+_CONSTANTS = {
+ '-Infinity': NegInf,
+ 'Infinity': PosInf,
+ 'NaN': NaN,
+}
+
+STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
+BACKSLASH = {
+ '"': u'"', '\\': u'\\', '/': u'/',
+ 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
+}
+
+DEFAULT_ENCODING = "utf-8"
+
+def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
+ """Scan the string s for a JSON string. End is the index of the
+ character in s after the quote that started the JSON string.
+ Unescapes all valid JSON string escape sequences and raises ValueError
+ on attempt to decode an invalid string. If strict is False then literal
+ control characters are allowed in the string.
+
+ Returns a tuple of the decoded string and the index of the character in s
+ after the end quote."""
+ if encoding is None:
+ encoding = DEFAULT_ENCODING
+ chunks = []
+ _append = chunks.append
+ begin = end - 1
+ while 1:
+ chunk = _m(s, end)
+ if chunk is None:
+ raise ValueError(
+ errmsg("Unterminated string starting at", s, begin))
+ end = chunk.end()
+ content, terminator = chunk.groups()
+ # Content is contains zero or more unescaped string characters
+ if content:
+ if not isinstance(content, unicode):
+ content = unicode(content, encoding)
+ _append(content)
+ # Terminator is the end of string, a literal control character,
+ # or a backslash denoting that an escape sequence follows
+ if terminator == '"':
+ break
+ elif terminator != '\\':
+ if strict:
+ msg = "Invalid control character %r at" % (terminator,)
+ #msg = "Invalid control character {0!r} at".format(terminator)
+ raise ValueError(errmsg(msg, s, end))
+ else:
+ _append(terminator)
+ continue
+ try:
+ esc = s[end]
+ except IndexError:
+ raise ValueError(
+ errmsg("Unterminated string starting at", s, begin))
+ # If not a unicode escape sequence, must be in the lookup table
+ if esc != 'u':
+ try:
+ char = _b[esc]
+ except KeyError:
+ msg = "Invalid \\escape: " + repr(esc)
+ raise ValueError(errmsg(msg, s, end))
+ end += 1
+ else:
+ # Unicode escape sequence
+ esc = s[end + 1:end + 5]
+ next_end = end + 5
+ if len(esc) != 4:
+ msg = "Invalid \\uXXXX escape"
+ raise ValueError(errmsg(msg, s, end))
+ uni = int(esc, 16)
+ # Check for surrogate pair on UCS-4 systems
+ if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
+ msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
+ if not s[end + 5:end + 7] == '\\u':
+ raise ValueError(errmsg(msg, s, end))
+ esc2 = s[end + 7:end + 11]
+ if len(esc2) != 4:
+ raise ValueError(errmsg(msg, s, end))
+ uni2 = int(esc2, 16)
+ uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
+ next_end += 6
+ char = unichr(uni)
+ end = next_end
+ # Append the unescaped character
+ _append(char)
+ return u''.join(chunks), end
+
+
+# Use speedup if available
+scanstring = c_scanstring or py_scanstring
+
+WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
+WHITESPACE_STR = ' \t\n\r'
+
+def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+ pairs = {}
+ # Use a slice to prevent IndexError from being raised, the following
+ # check will raise a more specific ValueError if the string is empty
+ nextchar = s[end:end + 1]
+ # Normally we expect nextchar == '"'
+ if nextchar != '"':
+ if nextchar in _ws:
+ end = _w(s, end).end()
+ nextchar = s[end:end + 1]
+ # Trivial empty object
+ if nextchar == '}':
+ return pairs, end + 1
+ elif nextchar != '"':
+ raise ValueError(errmsg("Expecting property name", s, end))
+ end += 1
+ while True:
+ key, end = scanstring(s, end, encoding, strict)
+
+ # To skip some function call overhead we optimize the fast paths where
+ # the JSON key separator is ": " or just ":".
+ if s[end:end + 1] != ':':
+ end = _w(s, end).end()
+ if s[end:end + 1] != ':':
+ raise ValueError(errmsg("Expecting : delimiter", s, end))
+
+ end += 1
+
+ try:
+ if s[end] in _ws:
+ end += 1
+ if s[end] in _ws:
+ end = _w(s, end + 1).end()
+ except IndexError:
+ pass
+
+ try:
+ value, end = scan_once(s, end)
+ except StopIteration:
+ raise ValueError(errmsg("Expecting object", s, end))
+ pairs[key] = value
+
+ try:
+ nextchar = s[end]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end]
+ except IndexError:
+ nextchar = ''
+ end += 1
+
+ if nextchar == '}':
+ break
+ elif nextchar != ',':
+ raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
+
+ try:
+ nextchar = s[end]
+ if nextchar in _ws:
+ end += 1
+ nextchar = s[end]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end]
+ except IndexError:
+ nextchar = ''
+
+ end += 1
+ if nextchar != '"':
+ raise ValueError(errmsg("Expecting property name", s, end - 1))
+
+ if object_hook is not None:
+ pairs = object_hook(pairs)
+ return pairs, end
+
+def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+ values = []
+ nextchar = s[end:end + 1]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end:end + 1]
+ # Look-ahead for trivial empty array
+ if nextchar == ']':
+ return values, end + 1
+ _append = values.append
+ while True:
+ try:
+ value, end = scan_once(s, end)
+ except StopIteration:
+ raise ValueError(errmsg("Expecting object", s, end))
+ _append(value)
+ nextchar = s[end:end + 1]
+ if nextchar in _ws:
+ end = _w(s, end + 1).end()
+ nextchar = s[end:end + 1]
+ end += 1
+ if nextchar == ']':
+ break
+ elif nextchar != ',':
+ raise ValueError(errmsg("Expecting , delimiter", s, end))
+
+ try:
+ if s[end] in _ws:
+ end += 1
+ if s[end] in _ws:
+ end = _w(s, end + 1).end()
+ except IndexError:
+ pass
+
+ return values, end
+
+class JSONDecoder(object):
+ """Simple JSON <http://json.org> decoder
+
+ Performs the following translations in decoding by default:
+
+ +---------------+-------------------+
+ | JSON | Python |
+ +===============+===================+
+ | object | dict |
+ +---------------+-------------------+
+ | array | list |
+ +---------------+-------------------+
+ | string | unicode |
+ +---------------+-------------------+
+ | number (int) | int, long |
+ +---------------+-------------------+
+ | number (real) | float |
+ +---------------+-------------------+
+ | true | True |
+ +---------------+-------------------+
+ | false | False |
+ +---------------+-------------------+
+ | null | None |
+ +---------------+-------------------+
+
+ It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
+ their corresponding ``float`` values, which is outside the JSON spec.
+
+ """
+
+ def __init__(self, encoding=None, object_hook=None, parse_float=None,
+ parse_int=None, parse_constant=None, strict=True):
+ """``encoding`` determines the encoding used to interpret any ``str``
+ objects decoded by this instance (utf-8 by default). It has no
+ effect when decoding ``unicode`` objects.
+
+ Note that currently only encodings that are a superset of ASCII work,
+ strings of other encodings should be passed in as ``unicode``.
+
+ ``object_hook``, if specified, will be called with the result
+ of every JSON object decoded and its return value will be used in
+ place of the given ``dict``. This can be used to provide custom
+ deserializations (e.g. to support JSON-RPC class hinting).
+
+ ``parse_float``, if specified, will be called with the string
+ of every JSON float to be decoded. By default this is equivalent to
+ float(num_str). This can be used to use another datatype or parser
+ for JSON floats (e.g. decimal.Decimal).
+
+ ``parse_int``, if specified, will be called with the string
+ of every JSON int to be decoded. By default this is equivalent to
+ int(num_str). This can be used to use another datatype or parser
+ for JSON integers (e.g. float).
+
+ ``parse_constant``, if specified, will be called with one of the
+ following strings: -Infinity, Infinity, NaN.
+ This can be used to raise an exception if invalid JSON numbers
+ are encountered.
+
+ """
+ self.encoding = encoding
+ self.object_hook = object_hook
+ self.parse_float = parse_float or float
+ self.parse_int = parse_int or int
+ self.parse_constant = parse_constant or _CONSTANTS.__getitem__
+ self.strict = strict
+ self.parse_object = JSONObject
+ self.parse_array = JSONArray
+ self.parse_string = scanstring
+ self.scan_once = make_scanner(self)
+
+ def decode(self, s, _w=WHITESPACE.match):
+ """Return the Python representation of ``s`` (a ``str`` or ``unicode``
+ instance containing a JSON document)
+
+ """
+ obj, end = self.raw_decode(s, idx=_w(s, 0).end())
+ end = _w(s, end).end()
+ if end != len(s):
+ raise ValueError(errmsg("Extra data", s, end, len(s)))
+ return obj
+
+ def raw_decode(self, s, idx=0):
+ """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
+ with a JSON document) and return a 2-tuple of the Python
+ representation and the index in ``s`` where the document ended.
+
+ This can be used to decode a JSON document from a string that may
+ have extraneous data at the end.
+
+ """
+ try:
+ obj, end = self.scan_once(s, idx)
+ except StopIteration:
+ raise ValueError("No JSON object could be decoded")
+ return obj, end
diff --git a/test/simplejson/encoder.py b/test/simplejson/encoder.py
new file mode 100644
index 0000000000..cf58290366
--- /dev/null
+++ b/test/simplejson/encoder.py
@@ -0,0 +1,440 @@
+"""Implementation of JSONEncoder
+"""
+import re
+
+try:
+ from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
+except ImportError:
+ c_encode_basestring_ascii = None
+try:
+ from simplejson._speedups import make_encoder as c_make_encoder
+except ImportError:
+ c_make_encoder = None
+
+ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
+ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
+HAS_UTF8 = re.compile(r'[\x80-\xff]')
+ESCAPE_DCT = {
+ '\\': '\\\\',
+ '"': '\\"',
+ '\b': '\\b',
+ '\f': '\\f',
+ '\n': '\\n',
+ '\r': '\\r',
+ '\t': '\\t',
+}
+for i in range(0x20):
+ #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
+ ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
+
+# Assume this produces an infinity on all machines (probably not guaranteed)
+INFINITY = float('1e66666')
+FLOAT_REPR = repr
+
+def encode_basestring(s):
+ """Return a JSON representation of a Python string
+
+ """
+ def replace(match):
+ return ESCAPE_DCT[match.group(0)]
+ return '"' + ESCAPE.sub(replace, s) + '"'
+
+
+def py_encode_basestring_ascii(s):
+ """Return an ASCII-only JSON representation of a Python string
+
+ """
+ if isinstance(s, str) and HAS_UTF8.search(s) is not None:
+ s = s.decode('utf-8')
+ def replace(match):
+ s = match.group(0)
+ try:
+ return ESCAPE_DCT[s]
+ except KeyError:
+ n = ord(s)
+ if n < 0x10000:
+ #return '\\u{0:04x}'.format(n)
+ return '\\u%04x' % (n,)
+ else:
+ # surrogate pair
+ n -= 0x10000
+ s1 = 0xd800 | ((n >> 10) & 0x3ff)
+ s2 = 0xdc00 | (n & 0x3ff)
+ #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
+ return '\\u%04x\\u%04x' % (s1, s2)
+ return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
+
+
+encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
+
+class JSONEncoder(object):
+ """Extensible JSON <http://json.org> encoder for Python data structures.
+
+ Supports the following objects and types by default:
+
+ +-------------------+---------------+
+ | Python | JSON |
+ +===================+===============+
+ | dict | object |
+ +-------------------+---------------+
+ | list, tuple | array |
+ +-------------------+---------------+
+ | str, unicode | string |
+ +-------------------+---------------+
+ | int, long, float | number |
+ +-------------------+---------------+
+ | True | true |
+ +-------------------+---------------+
+ | False | false |
+ +-------------------+---------------+
+ | None | null |
+ +-------------------+---------------+
+
+ To extend this to recognize other objects, subclass and implement a
+ ``.default()`` method with another method that returns a serializable
+ object for ``o`` if possible, otherwise it should call the superclass
+ implementation (to raise ``TypeError``).
+
+ """
+ item_separator = ', '
+ key_separator = ': '
+ def __init__(self, skipkeys=False, ensure_ascii=True,
+ check_circular=True, allow_nan=True, sort_keys=False,
+ indent=None, separators=None, encoding='utf-8', default=None):
+ """Constructor for JSONEncoder, with sensible defaults.
+
+ If skipkeys is false, then it is a TypeError to attempt
+ encoding of keys that are not str, int, long, float or None. If
+ skipkeys is True, such items are simply skipped.
+
+ If ensure_ascii is true, the output is guaranteed to be str
+ objects with all incoming unicode characters escaped. If
+ ensure_ascii is false, the output will be unicode object.
+
+ If check_circular is true, then lists, dicts, and custom encoded
+ objects will be checked for circular references during encoding to
+ prevent an infinite recursion (which would cause an OverflowError).
+ Otherwise, no such check takes place.
+
+ If allow_nan is true, then NaN, Infinity, and -Infinity will be
+ encoded as such. This behavior is not JSON specification compliant,
+ but is consistent with most JavaScript based encoders and decoders.
+ Otherwise, it will be a ValueError to encode such floats.
+
+ If sort_keys is true, then the output of dictionaries will be
+ sorted by key; this is useful for regression tests to ensure
+ that JSON serializations can be compared on a day-to-day basis.
+
+ If indent is a non-negative integer, then JSON array
+ elements and object members will be pretty-printed with that
+ indent level. An indent level of 0 will only insert newlines.
+ None is the most compact representation.
+
+ If specified, separators should be a (item_separator, key_separator)
+ tuple. The default is (', ', ': '). To get the most compact JSON
+ representation you should specify (',', ':') to eliminate whitespace.
+
+ If specified, default is a function that gets called for objects
+ that can't otherwise be serialized. It should return a JSON encodable
+ version of the object or raise a ``TypeError``.
+
+ If encoding is not None, then all input strings will be
+ transformed into unicode using that encoding prior to JSON-encoding.
+ The default is UTF-8.
+
+ """
+
+ self.skipkeys = skipkeys
+ self.ensure_ascii = ensure_ascii
+ self.check_circular = check_circular
+ self.allow_nan = allow_nan
+ self.sort_keys = sort_keys
+ self.indent = indent
+ if separators is not None:
+ self.item_separator, self.key_separator = separators
+ if default is not None:
+ self.default = default
+ self.encoding = encoding
+
+ def default(self, o):
+ """Implement this method in a subclass such that it returns
+ a serializable object for ``o``, or calls the base implementation
+ (to raise a ``TypeError``).
+
+ For example, to support arbitrary iterators, you could
+ implement default like this::
+
+ def default(self, o):
+ try:
+ iterable = iter(o)
+ except TypeError:
+ pass
+ else:
+ return list(iterable)
+ return JSONEncoder.default(self, o)
+
+ """
+ raise TypeError(repr(o) + " is not JSON serializable")
+
+ def encode(self, o):
+ """Return a JSON string representation of a Python data structure.
+
+ >>> JSONEncoder().encode({"foo": ["bar", "baz"]})
+ '{"foo": ["bar", "baz"]}'
+
+ """
+ # This is for extremely simple cases and benchmarks.
+ if isinstance(o, basestring):
+ if isinstance(o, str):
+ _encoding = self.encoding
+ if (_encoding is not None
+ and not (_encoding == 'utf-8')):
+ o = o.decode(_encoding)
+ if self.ensure_ascii:
+ return encode_basestring_ascii(o)
+ else:
+ return encode_basestring(o)
+ # This doesn't pass the iterator directly to ''.join() because the
+ # exceptions aren't as detailed. The list call should be roughly
+ # equivalent to the PySequence_Fast that ''.join() would do.
+ chunks = self.iterencode(o, _one_shot=True)
+ if not isinstance(chunks, (list, tuple)):
+ chunks = list(chunks)
+ return ''.join(chunks)
+
+ def iterencode(self, o, _one_shot=False):
+ """Encode the given object and yield each string
+ representation as available.
+
+ For example::
+
+ for chunk in JSONEncoder().iterencode(bigobject):
+ mysocket.write(chunk)
+
+ """
+ if self.check_circular:
+ markers = {}
+ else:
+ markers = None
+ if self.ensure_ascii:
+ _encoder = encode_basestring_ascii
+ else:
+ _encoder = encode_basestring
+ if self.encoding != 'utf-8':
+ def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
+ if isinstance(o, str):
+ o = o.decode(_encoding)
+ return _orig_encoder(o)
+
+ def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
+ # Check for specials. Note that this type of test is processor- and/or
+ # platform-specific, so do tests which don't depend on the internals.
+
+ if o != o:
+ text = 'NaN'
+ elif o == _inf:
+ text = 'Infinity'
+ elif o == _neginf:
+ text = '-Infinity'
+ else:
+ return _repr(o)
+
+ if not allow_nan:
+ raise ValueError(
+ "Out of range float values are not JSON compliant: " +
+ repr(o))
+
+ return text
+
+
+ if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
+ _iterencode = c_make_encoder(
+ markers, self.default, _encoder, self.indent,
+ self.key_separator, self.item_separator, self.sort_keys,
+ self.skipkeys, self.allow_nan)
+ else:
+ _iterencode = _make_iterencode(
+ markers, self.default, _encoder, self.indent, floatstr,
+ self.key_separator, self.item_separator, self.sort_keys,
+ self.skipkeys, _one_shot)
+ return _iterencode(o, 0)
+
+def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
+ ## HACK: hand-optimized bytecode; turn globals into locals
+ False=False,
+ True=True,
+ ValueError=ValueError,
+ basestring=basestring,
+ dict=dict,
+ float=float,
+ id=id,
+ int=int,
+ isinstance=isinstance,
+ list=list,
+ long=long,
+ str=str,
+ tuple=tuple,
+ ):
+
+ def _iterencode_list(lst, _current_indent_level):
+ if not lst:
+ yield '[]'
+ return
+ if markers is not None:
+ markerid = id(lst)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = lst
+ buf = '['
+ if _indent is not None:
+ _current_indent_level += 1
+ newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+ separator = _item_separator + newline_indent
+ buf += newline_indent
+ else:
+ newline_indent = None
+ separator = _item_separator
+ first = True
+ for value in lst:
+ if first:
+ first = False
+ else:
+ buf = separator
+ if isinstance(value, basestring):
+ yield buf + _encoder(value)
+ elif value is None:
+ yield buf + 'null'
+ elif value is True:
+ yield buf + 'true'
+ elif value is False:
+ yield buf + 'false'
+ elif isinstance(value, (int, long)):
+ yield buf + str(value)
+ elif isinstance(value, float):
+ yield buf + _floatstr(value)
+ else:
+ yield buf
+ if isinstance(value, (list, tuple)):
+ chunks = _iterencode_list(value, _current_indent_level)
+ elif isinstance(value, dict):
+ chunks = _iterencode_dict(value, _current_indent_level)
+ else:
+ chunks = _iterencode(value, _current_indent_level)
+ for chunk in chunks:
+ yield chunk
+ if newline_indent is not None:
+ _current_indent_level -= 1
+ yield '\n' + (' ' * (_indent * _current_indent_level))
+ yield ']'
+ if markers is not None:
+ del markers[markerid]
+
+ def _iterencode_dict(dct, _current_indent_level):
+ if not dct:
+ yield '{}'
+ return
+ if markers is not None:
+ markerid = id(dct)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = dct
+ yield '{'
+ if _indent is not None:
+ _current_indent_level += 1
+ newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+ item_separator = _item_separator + newline_indent
+ yield newline_indent
+ else:
+ newline_indent = None
+ item_separator = _item_separator
+ first = True
+ if _sort_keys:
+ items = dct.items()
+ items.sort(key=lambda kv: kv[0])
+ else:
+ items = dct.iteritems()
+ for key, value in items:
+ if isinstance(key, basestring):
+ pass
+ # JavaScript is weakly typed for these, so it makes sense to
+ # also allow them. Many encoders seem to do something like this.
+ elif isinstance(key, float):
+ key = _floatstr(key)
+ elif key is True:
+ key = 'true'
+ elif key is False:
+ key = 'false'
+ elif key is None:
+ key = 'null'
+ elif isinstance(key, (int, long)):
+ key = str(key)
+ elif _skipkeys:
+ continue
+ else:
+ raise TypeError("key " + repr(key) + " is not a string")
+ if first:
+ first = False
+ else:
+ yield item_separator
+ yield _encoder(key)
+ yield _key_separator
+ if isinstance(value, basestring):
+ yield _encoder(value)
+ elif value is None:
+ yield 'null'
+ elif value is True:
+ yield 'true'
+ elif value is False:
+ yield 'false'
+ elif isinstance(value, (int, long)):
+ yield str(value)
+ elif isinstance(value, float):
+ yield _floatstr(value)
+ else:
+ if isinstance(value, (list, tuple)):
+ chunks = _iterencode_list(value, _current_indent_level)
+ elif isinstance(value, dict):
+ chunks = _iterencode_dict(value, _current_indent_level)
+ else:
+ chunks = _iterencode(value, _current_indent_level)
+ for chunk in chunks:
+ yield chunk
+ if newline_indent is not None:
+ _current_indent_level -= 1
+ yield '\n' + (' ' * (_indent * _current_indent_level))
+ yield '}'
+ if markers is not None:
+ del markers[markerid]
+
+ def _iterencode(o, _current_indent_level):
+ if isinstance(o, basestring):
+ yield _encoder(o)
+ elif o is None:
+ yield 'null'
+ elif o is True:
+ yield 'true'
+ elif o is False:
+ yield 'false'
+ elif isinstance(o, (int, long)):
+ yield str(o)
+ elif isinstance(o, float):
+ yield _floatstr(o)
+ elif isinstance(o, (list, tuple)):
+ for chunk in _iterencode_list(o, _current_indent_level):
+ yield chunk
+ elif isinstance(o, dict):
+ for chunk in _iterencode_dict(o, _current_indent_level):
+ yield chunk
+ else:
+ if markers is not None:
+ markerid = id(o)
+ if markerid in markers:
+ raise ValueError("Circular reference detected")
+ markers[markerid] = o
+ o = _default(o)
+ for chunk in _iterencode(o, _current_indent_level):
+ yield chunk
+ if markers is not None:
+ del markers[markerid]
+
+ return _iterencode
diff --git a/test/simplejson/scanner.py b/test/simplejson/scanner.py
new file mode 100644
index 0000000000..adbc6ec979
--- /dev/null
+++ b/test/simplejson/scanner.py
@@ -0,0 +1,65 @@
+"""JSON token scanner
+"""
+import re
+try:
+ from simplejson._speedups import make_scanner as c_make_scanner
+except ImportError:
+ c_make_scanner = None
+
+__all__ = ['make_scanner']
+
+NUMBER_RE = re.compile(
+ r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
+ (re.VERBOSE | re.MULTILINE | re.DOTALL))
+
+def py_make_scanner(context):
+ parse_object = context.parse_object
+ parse_array = context.parse_array
+ parse_string = context.parse_string
+ match_number = NUMBER_RE.match
+ encoding = context.encoding
+ strict = context.strict
+ parse_float = context.parse_float
+ parse_int = context.parse_int
+ parse_constant = context.parse_constant
+ object_hook = context.object_hook
+
+ def _scan_once(string, idx):
+ try:
+ nextchar = string[idx]
+ except IndexError:
+ raise StopIteration
+
+ if nextchar == '"':
+ return parse_string(string, idx + 1, encoding, strict)
+ elif nextchar == '{':
+ return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
+ elif nextchar == '[':
+ return parse_array((string, idx + 1), _scan_once)
+ elif nextchar == 'n' and string[idx:idx + 4] == 'null':
+ return None, idx + 4
+ elif nextchar == 't' and string[idx:idx + 4] == 'true':
+ return True, idx + 4
+ elif nextchar == 'f' and string[idx:idx + 5] == 'false':
+ return False, idx + 5
+
+ m = match_number(string, idx)
+ if m is not None:
+ integer, frac, exp = m.groups()
+ if frac or exp:
+ res = parse_float(integer + (frac or '') + (exp or ''))
+ else:
+ res = parse_int(integer)
+ return res, m.end()
+ elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
+ return parse_constant('NaN'), idx + 3
+ elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
+ return parse_constant('Infinity'), idx + 8
+ elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
+ return parse_constant('-Infinity'), idx + 9
+ else:
+ raise StopIteration
+
+ return _scan_once
+
+make_scanner = c_make_scanner or py_make_scanner
diff --git a/test/simplejson/tool.py b/test/simplejson/tool.py
new file mode 100644
index 0000000000..90443317b2
--- /dev/null
+++ b/test/simplejson/tool.py
@@ -0,0 +1,37 @@
+r"""Command-line tool to validate and pretty-print JSON
+
+Usage::
+
+ $ echo '{"json":"obj"}' | python -m simplejson.tool
+ {
+ "json": "obj"
+ }
+ $ echo '{ 1.2:3.4}' | python -m simplejson.tool
+ Expecting property name: line 1 column 2 (char 2)
+
+"""
+import sys
+import simplejson
+
+def main():
+ if len(sys.argv) == 1:
+ infile = sys.stdin
+ outfile = sys.stdout
+ elif len(sys.argv) == 2:
+ infile = open(sys.argv[1], 'rb')
+ outfile = sys.stdout
+ elif len(sys.argv) == 3:
+ infile = open(sys.argv[1], 'rb')
+ outfile = open(sys.argv[2], 'wb')
+ else:
+ raise SystemExit(sys.argv[0] + " [infile [outfile]]")
+ try:
+ obj = simplejson.load(infile)
+ except ValueError, e:
+ raise SystemExit(e)
+ simplejson.dump(obj, outfile, sort_keys=True, indent=4)
+ outfile.write('\n')
+
+
+if __name__ == '__main__':
+ main()