summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/cross.md162
-rw-r--r--docs/extending.md0
-rw-r--r--docs/internals.md357
-rw-r--r--docs/intro.md578
-rw-r--r--docs/modules.md125
-rw-r--r--docs/publishing.md0
-rw-r--r--docs/tasks.md310
-rw-r--r--integration/test/resources/ammonite/build.sc5
-rw-r--r--integration/test/resources/jawn/build.sc5
-rw-r--r--readme.md348
-rw-r--r--scalalib/src/mill/scalalib/MiscModule.scala79
11 files changed, 1586 insertions, 383 deletions
diff --git a/docs/cross.md b/docs/cross.md
new file mode 100644
index 00000000..9a2bbe92
--- /dev/null
+++ b/docs/cross.md
@@ -0,0 +1,162 @@
+Mill handles cross-building of all sorts via the `Cross[T]` module.
+
+
+## Defining Cross Modules
+
+You can use this as follows:
+
+```scala
+object foo extends mill.Cross[FooModule]("2.10", "2.11", "2.12")
+class FooModule(crossVersion: String) extends Module{
+ def suffix = T{ crossVersion }
+ def bigSuffix = T{ suffix().toUpperCase() }
+}
+```
+
+This defines three copies of `FooModule`: `"210"`, `"211"` and `"212"`, each of
+which has their own `suffix` target. You can then run them via
+
+```bash
+mill --show foo[2.10].suffix
+mill --show foo[2.10].bigSuffix
+mill --show foo[2.11].suffix
+mill --show foo[2.11].bigSuffix
+mill --show foo[2.12].suffix
+mill --show foo[2.12].bigSuffix
+```
+
+The modules each also have a `basePath` of
+
+```text
+foo/2.10
+foo/2.11
+foo/2.12
+```
+
+And the `suffix` targets will have the corresponding output paths for their
+metadata and files:
+
+```text
+foo/2.10/suffix
+foo/2.10/bigSuffix
+foo/2.11/suffix
+foo/2.11/bigSuffix
+foo/2.12/suffix
+foo/2.12/bigSuffix
+```
+
+You can also have a cross-build with multiple inputs:
+
+```scala
+val crossMatrix = for{
+ crossVersion <- Seq("210", "211", "212")
+ platform <- Seq("jvm", "js", "native")
+ if !(platform == "native" && crossVersion != "212")
+} yield (crossVersion, platform)
+
+object foo extends mill.Cross[FooModule](crossMatrix:_*)
+class FooModule(crossVersion: String, platform: String) extends Module{
+ def suffix = T{ crossVersion + "_" + platform }
+}
+```
+
+Here, we define our cross-values programmatically using a `for`-loop that spits
+out tuples instead of individual values. Our `FooModule` template class then
+takes two parameters instead of one. This creates the following modules each
+with their own `suffix` target:
+
+```bash
+mill --show foo[210,jvm].suffix
+mill --show foo[211,jvm].suffix
+mill --show foo[212,jvm].suffix
+mill --show foo[210,js].suffix
+mill --show foo[211,js].suffix
+mill --show foo[212,js].suffix
+mill --show foo[212,native].suffix
+```
+
+## Using Cross Modules from Outside
+
+You can refer to targets defined in cross-modules as follows:
+
+```scala
+object foo extends mill.Cross[FooModule]("2.10", "2.11", "2.12")
+class FooModule(crossVersion: String) extends Module{
+ def suffix = T{ crossVersion }
+}
+
+def bar = T{ "hello " + foo("2.10").suffix }
+```
+
+Here, `foo("2.10")` references the `"2.10"` instance of `FooModule`. You can
+refer to whatever versions of the cross-module you want, even using multiple
+versions of the cross-module in the same target:
+
+```scala
+object foo extends mill.Cross[FooModule]("2.10", "2.11", "2.12")
+class FooModule(crossVersion: String) extends Module{
+ def suffix = T{ crossVersion }
+}
+
+def bar = T{ "hello " + foo("2.10").suffix + " world " + foo("2.12").suffix }
+```
+
+## Using Cross Modules from other Cross Modules
+
+Targets in cross-modules can depend on one another the same way that external
+targets:
+
+```scala
+object foo extends mill.Cross[FooModule]("2.10", "2.11", "2.12")
+class FooModule(crossVersion: String) extends Module{
+ def suffix = T{ crossVersion }
+}
+
+object bar extends mill.Cross[BarModule]("2.10", "2.11", "2.12")
+class BarModule(crossVersion: String) extends Module{
+ def bigSuffix = T{ foo(crossVersion).suffix().toUpperCase() }
+}
+```
+
+Here, you can run:
+
+```bash
+mill --show foo[2.10].suffix
+mill --show foo[2.11].suffix
+mill --show foo[2.12].suffix
+mill --show bar[2.10].bigSuffix
+mill --show bar[2.11].bigSuffix
+mill --show bar[2.12].bigSuffix
+```
+
+
+## Cross Resolvers
+
+You can define an implicit `mill.define.Cross.Resolve` within your
+cross-modules, which would let you use a shorthand `foo()` syntax when referring
+to other cross-modules with an identical set of cross values:
+
+```scala
+trait MyModule extends Module{
+ def crossVersion: String
+ implicit object resolver extends mill.define.Cross.Resolve[ResolvedModule]{
+ def resolve[V <: ResolvedModule](c: Cross[V]): V = c.itemMap(crossVersion)
+ }
+}
+
+object foo extends mill.Cross[FooModule]("2.10", "2.11", "2.12")
+class FooModule(crossVersion: String) extends MyModule{
+ def suffix = T{ crossVersion }
+}
+
+object bar extends mill.Cross[BarModule]("2.10", "2.11", "2.12")
+class BarModule(crossVersion: String) extends MyModule{
+ def bigSuffix = T{ foo().suffix().toUpperCase() }
+}
+```
+
+While the example `resolver` simply looks up the target `Cross` value for the
+cross-module instance with the same `crossVersion`, you can make the resolver
+arbitrarily complex. e.g. the `resolver` for `mill.scalalib.CrossSbtModule`
+looks for a cross-module instance whose `scalaVersion` is binary compatible
+(e.g. 2.10.5 is compatible with 2.10.3) with the current cross-module. \ No newline at end of file
diff --git a/docs/extending.md b/docs/extending.md
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/docs/extending.md
diff --git a/docs/internals.md b/docs/internals.md
new file mode 100644
index 00000000..d10860dc
--- /dev/null
+++ b/docs/internals.md
@@ -0,0 +1,357 @@
+
+## Mill Design Principles
+
+A lot of mills design principles are intended to fix SBT's flaws, as described
+in the blog post
+[What's wrong with SBT](http://www.lihaoyi.com/post/SowhatswrongwithSBT.html),
+building on the best ideas from tools like [CBT](https://github.com/cvogt/cbt)
+and [Bazel](https://bazel.build/), and the ideas from my blog post
+[Build Tools as
+Pure Functional Programs](http://www.lihaoyi.com/post/BuildToolsasPureFunctionalPrograms.html).
+Before working on Mill, read through that post to understand where it is coming
+from!
+
+### Dependency graph first
+
+Mill's most important abstraction is the dependency graph of `Task`s.
+Constructed using the `T{...}` `T.task{...}` `T.command{...}` syntax, these
+track the dependencies between steps of a build, so those steps can be executed
+in the correct order, queried, or parallelized.
+
+While Mill provides helpers like `ScalaModule` and other things you can use to
+quickly instantiate a bunch of related tasks (resolve dependencies, find
+sources, compile, package into jar, ...) these are secondary. When Mill
+executes, the dependency graph is what matters: any other mode of organization
+(hierarchies, modules, inheritence, etc.) is only important to create this
+dependency graph of `Task`s.
+
+### Builds are hierarchical
+
+The syntax for running targets from the command line `mill Foo.bar.baz` is
+the same as referencing a target in Scala code, `Foo.bar.baz`
+
+Everything that you can run from the command line lives in an object hierarchy
+in your `build.sc` file. Different parts of the hierarchy can have different
+`Target`s available: just add a new `def foo = T{...}` somewhere and you'll be
+able to run it.
+
+Cross builds, using the `Cross` data structure, are just another kind of node in
+the object hierarchy. The only difference is syntax: from the command line you'd
+run something via `mill core.cross[a].printIt` while from code you use
+`core.cross("a").printIt` due to different restrictions in Scala/Bash syntax.
+
+### Caching by default
+
+Every `Target` in a build, defined by `def foo = T{...}`, is cached by default.
+Currently this is done using a `foo/meta.json` file in the `out/` folder. The
+`Target` is also provided a `foo/` path on the filesystem dedicated to it, for
+it to store output files etc.
+
+This happens whether you want it to or not. Every `Target` is cached, not just
+the "slow" ones like `compile` or `assembly`.
+
+Caching is keyed on the `.hashCode` of the returned value. For `Target`s
+returning the contents of a file/folder on disk, they return `PathRef` instances
+whose hashcode is based on the hash of the disk contents. Serialization of the
+returned values is tentatively done using uPickle.
+
+### Short-lived build processes
+
+The Mill build process is meant to be run over and over, not only as a
+long-lived daemon/console. That means we must minimize the startup time of the
+process, and that a new process must be able to re-construct the in-memory data
+structures where a previous process left off, in order to continue the build.
+
+Re-construction is done via the hierarchical nature of the build: each `Target`
+`foo.bar.baz` has a fixed position in the build hierarchy, and thus a fixed
+position on disk `out/foo/bar/baz/meta.json`. When the old process dies and a
+new process starts, there will be a new instance of `Target` with the same
+implementation code and same position in the build hierarchy: this new `Target`
+can then load the `out/foo/bar/baz/meta.json` file and pick up where the
+previous process left off.
+
+Minimizing startup time means aggressive caching, as well as minimizing the
+total amount of bytecode used: Mill's current 1-2s startup time is dominated by
+JVM classloading. In future, we may have a long lived console or
+nailgun/drip-based server/client models to speed up interactive usage, but we
+should always keep "cold" startup as fast as possible.
+
+### Static dependency graph and Applicative tasks
+
+`Task`s are *Applicative*, not *Monadic*. There is `.map`, `.zip`, but no
+`.flatMap` operation. That means that we can know the structure of the entire
+dependency graph before we start executing `Task`s. This lets us perform all
+sorts of useful operations on the graph before running it:
+
+- Given a Target the user wants to run, pre-compute and display what targets
+ will be evaluated ("dry run"), without running them
+
+- Automatically parallelize different parts of the dependency graph that do not
+ depend on each other, perhaps even distributing it to different worker
+ machines like Bazel/Pants can
+
+- Visualize the dependency graph easily, e.g. by dumping to a DOT file
+
+- Query the graph, e.g. "why does this thing depend on that other thing?"
+
+- Avoid running tasks "halfway": if a Target's upstream Targets fail, we can
+ skip the Target completely rather than running halfway and then bailing out
+ with an exception
+
+In order to avoid making people using `.map` and `.zip` all over the place when
+defining their `Task`s, we use the `T{...}`/`T.task{...}`/`T.command{...}`
+macros which allow you to use `Task#apply()` within the block to "extract" a
+value.
+
+```scala
+def test() = T.command{
+ TestRunner.apply(
+ "mill.UTestFramework",
+ runDepClasspath().map(_.path) :+ compile().path,
+ Seq(compile().path)
+
+}
+```
+
+This is roughly to the following:
+
+```scala
+def test() = T.command{ T.zipMap(runDepClasspath, compile, compile){
+ (runDepClasspath1, compile2, compile3) =>
+ TestRunner.apply(
+ "mill.UTestFramework",
+ runDepClasspath1.map(_.path) :+ compile2.path,
+ Seq(compile3.path)
+ )
+}
+```
+
+This is similar to SBT's `:=`/`.value` macros, or `scala-async`'s
+`async`/`await`. Like those, the `T{...}` macro should let users program most of
+their code in a "direct" style and have it "automatically" lifted into a graph
+of `Task`s.
+
+## How Mill aims for Simple
+
+Why should you expect that the Mill build tool can achieve simple, easy &
+flexible, where other build tools in the past have failed?
+
+Build tools inherently encompass a huge number of different concepts:
+
+- What "Tasks" depends on what?
+- How do I define my own tasks?
+- Where do source files come from?
+- What needs to run in what order to do what I want?
+- What can be parallelized and what can't?
+- How do tasks pass data to each other? What data do they pass?
+- What tasks are cached? Where?
+- How are tasks run from the command line?
+- How do you deal with the repetition inherent a build? (e.g. compile, run &
+ test tasks for every "module")
+- What is a "Module"? How do they relate to "Tasks"?
+- How do you configure a Module to do something different?
+- How are cross-builds (across different configurations) handled?
+
+These are a lot of questions to answer, and we haven't even started talking
+about the actually compiling/running any code yet! If each such facet of a build
+was modelled separately, it's easy to have an explosion of different concepts
+that would make a build tool hard to understand.
+
+Before you continue, take a moment to think: how would you answer to each of
+those questions using an existing build tool you are familiar with? Different
+tools like [SBT](http://www.scala-sbt.org/),
+[Fake](https://fake.build/legacy-index.html), [Gradle](https://gradle.org/) or
+[Grunt](https://gruntjs.com/) have very different answers.
+
+Mill aims to provide the answer to these questions using as few, as familiar
+core concepts as possible. The entire Mill build is oriented around a few
+concepts:
+
+- The Object Hierarchy
+- The Call Graph
+- Instantiating Traits & Classes
+
+These concepts are already familiar to anyone experienced in Scala (or any other
+programming language...), but are enough to answer all of the complicated
+build-related questions listed above.
+
+## The Object Hierarchy
+
+The module hierarchy is the graph of objects, starting from the root of the
+`build.sc` file, that extend `mill.Module`. At the leaves of the hierarchy are
+the `Target`s you can run.
+
+A `Target`'s position in the module hierarchy tells you many things. For
+example, a `Target` at position `core.test.compile` would:
+
+- Cache output metadata at `out/core/test/compile/meta.json`
+
+- Output files to the folder `out/core/test/compile/dest/`
+
+- Source files default to a folder in `core/test/`, `core/test/src/`
+
+- Be runnable from the command-line via `mill core.test.compile`
+
+- Be referenced programmatically (from other `Target`s) via `core.test.compile`
+
+From the position of any `Target` within the object hierarchy, you immediately
+know how to run it, find its output files, find any caches, or refer to it from
+other `Target`s. You know up-front where the `Target`'s data "lives" on disk, and
+are sure that it will never clash with any other `Target`'s data.
+
+## The Call Graph
+
+The Scala call graph of "which target references which other target" is core to
+how Mill operates. This graph is reified via the `T{...}` macro to make it
+available to the Mill execution engine at runtime. The call graph tells you:
+
+- Which `Target`s depend on which other `Target`s
+
+- For a given `Target` to be built, what other `Target`s need to be run and in
+ what order
+
+- Which `Target`s can be evaluated in parallel
+
+- What source files need to be watched when using `--watch` on a given target (by
+ tracing the call graph up to the `Source`s)
+
+- What a given `Target` makes available for other `Target`s to depend on (via
+ its return value)
+
+- Defining your own task that depends on others is as simple as `def foo =
+ T{...}`
+
+The call graph within your Scala code is essentially a data-flow graph: by
+defining a snippet of code:
+
+```scala
+val b = ...
+val c = ...
+val d = ...
+val a = f(b, c, d)
+```
+
+you are telling everyone that the value `a` depends on the values of `b` `c` and
+`d`, processed by `f`. A build tool needs exactly the same data structure:
+knowing what `Target` depends on what other `Target`s, and what processing it
+does on its inputs!
+
+With Mill, you can take the Scala call graph, wrap everything in the `T{...}`
+macro, and get a `Target`-dependency graph that matches exactly the call-graph
+you already had:
+
+```scala
+val b = T{ ... }
+val c = T{ ... }
+val d = T{ ... }
+val a = T{ f(b(), c(), d()) }
+```
+
+Thus, if you are familiar with how data flows through a normal Scala program,
+you already know how data flows through a Mill build! The Mill build evaluation
+may be incremental, it may cache things, it may read and write from disk, but
+the fundamental syntax, and the data-flow that syntax represents, is unchanged
+from your normal Scala code.
+
+## Instantiating Traits & Classes
+
+Classes and traits are a common way of re-using common data structures in Scala:
+if you have a bunch of fields which are related and you want to make multiple
+copies of those fields, you put them in a class/trait and instantiate it over
+and over.
+
+In Mill, inheriting from traits is the primary way for re-using common parts of
+a build:
+
+- Scala "project"s with multiple related `Target`s within them, are just a
+ `Trait` you instantiate
+
+- Replacing the default `Target`s within a project, making them do new
+ things or depend on new `Target`s, is simply `override`-ing them during
+ inheritence.
+
+- Modifying the default `Target`s within a project, making use of the old value
+ to compute the new value, is simply `override`ing them and using `super.foo()`
+
+- Required configuration parameters within a `project` are `abstract` members.
+
+- Cross-builds are modelled as instantiating a (possibly anonymous) class
+ multiple times, each instance with its own distinct set of `Target`s
+
+In normal Scala, you bundle up common fields & functionality into a `class` you
+can instantiate over and over, and you can override the things you want to
+customize. Similarly, in Mill, you bundle up common parts of a build into
+`trait`s you can instantiate over and over, and you can override the things you
+want to customize. "Subprojects", "cross-builds", and many other concepts are
+reduced to simply instantiating a `trait` over and over, with tweaks.
+
+## Prior Work
+
+### SBT
+
+Mill is built as a substitute for SBT, whose problems are
+[described here](http://www.lihaoyi.com/post/SowhatswrongwithSBT.html).
+Nevertheless, Mill takes on some parts of SBT (builds written in Scala, Task
+graph with an Applicative "idiom bracket" macro) where it makes sense.
+
+### Bazel
+
+Mill is largely inspired by [Bazel](https://bazel.build/). In particular, the
+single-build-hierarchy, where every Target has an on-disk-cache/output-directory
+according to their position in the hierarchy, comes from Bazel.
+
+Bazel is a bit odd in it’s own right. the underlying data model is good
+(hierarchy + cached dependency graph) but getting there is hell it (like SBT) is
+also a 3-layer interpretation model, but layers 1 & 2 are almost exactly the
+same: mutable python which performs global side effects (layer 3 is the same
+dependency-graph evaluator as SBT/mill)
+
+You end up having to deal with a non-trivial python codebase where everything
+happens via
+
+```python
+do_something(name="blah")
+```
+
+or
+
+```python
+do_other_thing(dependencies=["blah"])
+
+```
+where `"blah"` is a global identifier that is often constructed programmatically
+via string concatenation and passed around. This is quite challenging.
+
+Having the two layers be “just python” is great since people know python, but I
+think unnecessary two have two layers ("evaluating macros" and "evaluating rule
+impls") that are almost exactly the same, and I think making them interact via
+return values rather than via a global namespace of programmatically-constructed
+strings would make it easier to follow.
+
+With Mill, I’m trying to collapse Bazel’s Python layer 1 & 2 into just 1 layer
+of Scala, and have it define its dependency graph/hierarchy by returning
+values, rather than by calling global-side-effecting APIs. I've had trouble
+trying to teach people how-to-bazel at work, and am pretty sure we can make
+something that's easier to use.
+
+### Scala.Rx
+
+Mill's "direct-style" applicative syntax is inspired by my old
+[Scala.Rx](https://github.com/lihaoyi/scala.rx) project. While there are
+differences (Mill captures the dependency graph lexically using Macros, Scala.Rx
+captures it at runtime, they are pretty similar.
+
+The end-goal is the same: to write code in a "direct style" and have it
+automatically "lifted" into a dependency graph, which you can introspect and use
+for incremental updates at runtime.
+
+Scala.Rx is itself build upon the 2010 paper
+[Deprecating the Observer Pattern](https://infoscience.epfl.ch/record/148043/files/DeprecatingObserversTR2010.pdf).
+
+### CBT
+
+Mill looks a lot like [CBT](https://github.com/cvogt/cbt). The inheritance based
+model for customizing `Module`s/`ScalaModule`s comes straight from there, as
+does the "command line path matches Scala selector path" idea. Most other things
+are different though: the reified dependency graph, the execution model, the
+caching module all follow Bazel more than they do CBT
diff --git a/docs/intro.md b/docs/intro.md
new file mode 100644
index 00000000..bdd49bc4
--- /dev/null
+++ b/docs/intro.md
@@ -0,0 +1,578 @@
+Mill is a general purpose build-tool. It has built in support for the
+[Scala](https://www.scala-lang.org/) programming language, and can serve as a
+replacement for [SBT](http://www.scala-sbt.org/), but can also be extended to
+support any other language or platform via modules (written in Java or Scala) or
+through external subprocesses. Mill aims for simplicity by re-using concepts you
+are already familiar with to let you define your project's build. Mill's
+`build.sc` files are Scala scripts.
+
+
+## Hello Mill
+
+The simplest Mill build for a Scala project looks as follows:
+
+```scala
+import mill._
+import mill.scalalib._
+
+object foo extends ScalaModule {
+ def scalaVersion = "2.12.4"
+}
+```
+
+This would build a project laid out as follows:
+
+```
+build.sc
+foo/
+ src/
+ Main.scala
+ resources/
+ ...
+out/
+ foo/
+ ...
+```
+
+The source code for this module would live in the `foo/src/` folder, matching
+the name you assigned to the module. Output for this module (compiled files,
+resolved dependency lists, ...) would live in `out/foo/`.
+
+This can be run from the Bash shell via:
+
+```bash
+$ mill foo.compile # compile sources into classfiles
+
+$ mill foo.run # run the main method, if any
+
+$ mill foo.jar # bundle the classfiles into a jar
+
+$ mill foo.assembly # bundle the classfiles and all dependencies into a jar
+```
+
+The most common **tasks** that Mill can run are cached **targets**, such as
+`compile`, and un-cached **commands** such as `foo.run`. Targets do not
+re-evaluate unless one of their inputs changes, where-as commands re-run every
+time.
+
+### Watch and Re-evaluate
+
+You can use the `--watch` flag to make Mill watch a task's inputs, re-evaluating
+the task as necessary when the inputs change:
+
+```bash
+$ mill --watch foo.compile
+$ mill --watch foo.run
+```
+
+### Show Target Output
+
+By default, Mill does not print out the metadata from evaluating a task. Most
+people would not be interested in e.g. viewing the metadata related to
+incremental compilation: they just want to compile their code! However, if you
+want to inspect the build to debug problems, you can make Mill show you the
+metadata output for a task using the `--show` flag:
+
+You can also ask Mill to display the metadata output of a task using `--show`:
+
+```bash
+$ mill --show foo.compile
+{
+ "analysisFile": "/Users/lihaoyi/Dropbox/Github/test/out/foo/compile/dest/zinc",
+ "classes": {
+ "path": "/Users/lihaoyi/Dropbox/Github/test/out/foo/compile/dest/classes"
+ }
+}
+```
+
+This also applies to tasks which hold simple configurable values:
+
+```bash
+$ mill --show foo.sources
+[
+ {"path": "/Users/lihaoyi/Dropbox/Github/test/foo/src"}
+]
+
+$ mill --show foo.compileDepClasspath
+[
+ {"path": ".../org/scala-lang/scala-compiler/2.12.4/scala-compiler-2.12.4.jar"},
+ {"path": ".../org/scala-lang/scala-library/2.12.4/scala-library-2.12.4.jar"},
+ {"path": ".../org/scala-lang/scala-reflect/2.12.4/scala-reflect-2.12.4.jar"},
+ {"path": ".../org/scala-lang/modules/scala-xml_2.12/1.0.6/scala-xml_2.12-1.0.6.jar"}
+]
+```
+
+Any flags passed *before* the name of the task (e.g. `foo.compile`) are given to
+Mill, while any arguments passed *after* the task are given to the task itself.
+For example:
+
+```bash
+$ mill --watch foo.run
+```
+
+Makes Mill watch-and-re-evaluate the `foo.run` task, while `mill foo.run
+--watch` evaluates `foo.run` once and passes it the `--watch` flag. This matches
+the behavior of other executables such as `java` or `python`.
+
+### The Build Repl
+
+```bash
+$ mill
+Loading...
+@ foo
+res1: foo.type = ammonite.predef.build#foo:2
+Commands:
+ .runLocal(args: String*)()
+ .run(args: String*)()
+ .runMainLocal(mainClass: String, args: String*)()
+ .runMain(mainClass: String, args: String*)()
+ .console()()
+Targets:
+ .allSources()
+ .artifactId()
+ .artifactName()
+...
+
+@ foo.compile
+res3: mill.package.T[mill.scalalib.CompilationResult] = mill.scalalib.ScalaModule#compile:152
+Inputs:
+ foo.scalaVersion
+ foo.allSources
+ foo.compileDepClasspath
+...
+
+@ foo.compile()
+res2: mill.scalalib.CompilationResult = CompilationResult(
+ root/'Users/'lihaoyi/'Dropbox/'Github/'test/'out/'foo/'compile/'dest/'zinc,
+ PathRef(root/'Users/'lihaoyi/'Dropbox/'Github/'test/'out/'foo/'compile/'dest/'classes, false)
+)
+```
+
+You can run `mill` alone to open a build REPL; this is a Scala console with your
+`build.sc` loaded, which lets you run tasks interactively. The task-running
+syntax is slightly different from the command-line, but more in-line with how
+you would depend on tasks from within your build file.
+
+You can use this REPL to run build commands quicker, due to keeping the JVM warm
+between runs, or to interactively explore your build to see what is available.
+
+
+## Configuring Mill
+
+You can configure your Mill build in a number of ways:
+
+### Compilation & Execution Flags
+
+```scala
+import mill._
+import mill.scalalib._
+object foo extends ScalaModule {
+ def scalaVersion = "2.12.4"
+
+ def scalacOptions = Seq("-Ydelambdafy:inline")
+
+ def forkArgs = Seq("-Xmx4g")
+
+ def forkEnv = Map("HELLO_MY_ENV_VAR" -> "WORLD")
+}
+```
+
+You can pass flags to the Scala compiler via `scalacOptions`. By default,
+`foo.run` runs the compiled code in a subprocess, and you can pass in JVM flags
+via `forkArgs` or environment-variables via `forkEnv`.
+
+You can also run your code via
+
+```bash
+mill foo.runLocal
+```
+
+Which runs it in-process within an isolated classloader. This may be faster
+since you avoid the JVM startup, but does not support `forkArgs` or `forkEnv`.
+
+### Adding Ivy Dependencies
+
+```scala
+import mill._
+import mill.scalalib._
+object foo extends ScalaModule {
+ def scalaVersion = "2.12.4"
+ def ivyDeps = Agg(
+ ivy"com.lihaoyi::upickle:0.5.1",
+ ivy"com.lihaoyi::pprint:0.5.2",
+ ivy"com.lihaoyi::fansi:0.2.4"
+ )
+}
+```
+
+You can define the `ivyDeps` field to add ivy dependencies to your module. The
+`ivy"com.lihaoyi::upickle:0.5.1"` syntax (with `::`) represents Scala
+dependencies; for Java dependencies you would use a single `:` e.g.
+`ivy"com.lihaoyi:upickle:0.5.1"`.
+
+By default these are resolved from maven central, but you can add your own
+resolvers by overriding the `repositories` definition in the module:
+
+```scala
+def repositories = super.repositories ++ Seq(
+ MavenRepository("https://oss.sonatype.org/content/repositories/releases")
+)
+```
+
+### Adding a Test Suite
+
+```scala
+import mill._
+import mill.scalalib._
+object foo extends ScalaModule {
+ def scalaVersion = "2.12.4"
+
+ object test extends Tests{
+ def ivyDeps = Agg(ivy"com.lihaoyi::utest:0.6.0")
+ def testFramework = "mill.UTestFramework"
+ }
+}
+```
+
+You can define a test suite by creating a nested module extending `Tests`, and
+specifying the ivy coordinates and name of your test framework. This expects the
+tests to be laid out as follows:
+
+```
+build.sc
+foo/
+ src/
+ Main.scala
+ resources/
+ ...
+ test/
+ src/
+ MainTest.scala
+ resources/
+ ...
+out/
+ foo/
+ ...
+ test/
+ ...
+```
+
+The above example can be run via
+
+```bash
+mill foo.test
+```
+
+By default, tests are run in a subprocess, and `forkArg` and `forkEnv` can be
+overriden to pass JVM flags & environment variables. You can also use
+
+```bash
+mill foo.test.testLocal
+```
+
+To run tests in-process in an isolated classloader.
+
+You can define multiple test suites if you want, e.g.:
+
+```scala
+import mill._
+import mill.scalalib._
+object foo extends ScalaModule {
+ def scalaVersion = "2.12.4"
+
+ object test extends Tests{
+ def ivyDeps = Agg(ivy"com.lihaoyi::utest:0.6.0")
+ def testFramework = "mill.UTestFramework"
+ }
+ object integration extends Tests{
+ def ivyDeps = Agg(ivy"com.lihaoyi::utest:0.6.0")
+ def testFramework = "mill.UTestFramework"
+ }
+}
+```
+
+Each of which will expect their sources to be in their respective `foo/test` and
+`foo/integration` folder.
+
+`Tests` modules are `ScalaModule`s like any other, and all the same
+configuration options apply.
+
+### Multiple Modules
+
+```scala
+import mill._
+import mill.scalalib._
+object foo extends ScalaModule {
+ def scalaVersion = "2.12.4"
+}
+object bar extends ScalaModule {
+ def moduleDeps = Seq(foo)
+ def scalaVersion = "2.12.4"
+}
+```
+
+You can define multiple modules the same way you define a single module, using
+`def moduleDeps` to define the relationship between them. The above build
+expects the following project layout:
+
+```
+build.sc
+foo/
+ src/
+ Main.scala
+ resources/
+ ...
+bar/
+ src/
+ Main2.scala
+ resources/
+ ...
+out/
+ foo/
+ ...
+ bar/
+ ...
+```
+
+Mill's evaluator will ensure that the modules are compiled in the right order,
+and re-compiled as necessary when source code in each module changes.
+
+Modules can also be nested:
+
+```scala
+import mill._
+import mill.scalalib._
+object foo extends ScalaModule {
+ def scalaVersion = "2.12.4"
+ object bar extends ScalaModule {
+ def moduleDeps = Seq(foo)
+ def scalaVersion = "2.12.4"
+ }
+}
+```
+
+Which would result in a similarly nested project layout:
+
+```
+build.sc
+foo/
+ src/
+ Main.scala
+ resources/
+ ...
+ bar/
+ src/
+ Main2.scala
+ resources/
+ ...
+out/
+ foo/
+ ...
+ bar/
+ ...
+```
+
+### Scala Compiler Plugins
+
+```scala
+import mill._
+import mill.scalalib._
+object foo extends ScalaModule {
+ def scalaVersion = "2.12.4"
+
+ def compileIvyDeps = Agg(ivy"com.lihaoyi::acyclic:0.1.7")
+ def scalacOptions = Seq("-P:acyclic:force")
+ def scalacPluginIvyDeps = Agg(ivy"com.lihaoyi::acyclic:0.1.7")
+}
+```
+
+You can use Scala compiler plugins by setting `scalacPluginIvyDeps`. The above
+example also adds the plugin to `compileIvyDeps`, since that plugin's artifact
+is needed on the compilation classpath (though not at runtime).
+
+### Common Configuration
+
+```scala
+import mill._
+import mill.scalalib._
+trait CommonModule extends ScalaModule{
+ def scalaVersion = "2.12.4"
+}
+
+object foo extends CommonModule
+object bar extends CommonModule {
+ def moduleDeps = Seq(foo)
+}
+```
+
+You can extract out configuration common to multiple modules into a `trait` that
+those modules extend. This is useful for providing convenience & ensuring
+consistent configuration: every module often has the same scala-version, uses
+the same testing framework, etc. and all that can be extracted out into the
+`trait`.
+
+### Custom Tasks
+
+```scala
+import mill._
+import mill.scalalib._
+object foo extends ScalaModule {
+ def scalaVersion = "2.12.4"
+}
+
+def lineCount = T{
+ import ammonite.ops._
+ foo.sources().flatMap(ref => ls.rec(ref.path)).flatMap(read.lines).size
+}
+
+def printLineCount() = T.command{
+ println(lineCount())
+}
+```
+
+You can define new cached Targets using the `T{...}` syntax, depending on
+existing Targets e.g. `foo.sources` via the `foo.sources()` syntax to extract
+their current value, as shown in `lineCount` above. The return-type of a Target
+has to be JSON-serializable (using
+[uPickle](https://github.com/lihaoyi/upickle)) and the Target is cached when
+first run until it's inputs change (in this case, if someone edits the
+`foo.sources` files which live in `foo/src`. Cached Targets cannot take
+parameters.
+
+You can print the value of your custom target using `--show`, e.g.
+
+```bash
+mill run --show lineCount
+```
+
+You can define new un-cached Commands using the `T.command{...}` syntax. These
+are un-cached and re-evaluate every time you run them, but can take parameters.
+Their return type needs to be JSON-writable as well, or `(): Unit` if you want
+to return nothing.
+
+### Custom Modules
+
+```scala
+import mill._
+import mill.scalalib._
+object qux extends Module{
+ object foo extends ScalaModule {
+ def scalaVersion = "2.12.4"
+ }
+ object bar extends ScalaModule {
+ def moduleDeps = Seq(foo)
+ def scalaVersion = "2.12.4"
+ }
+}
+```
+
+Not every Module needs to be a `ScalaModule`; sometimes you just want to group
+things together for neatness. In the above example, you can run `foo` and `bar`
+namespaced inside `qux`:
+
+```bash
+mill qux.foo.compile
+mill qux.bar.run
+```
+
+You can also define your own module traits, with their own set of custom tasks,
+to represent other things e.g. Javascript bundles, docker image building,:
+
+```scala
+trait MySpecialModule extends Module{
+ ...
+}
+object foo extends MySpecialModule
+object bar extends MySpecialModule
+```
+
+### Overriding Tasks
+
+```scala
+import mill._
+import mill.scalalib._
+
+object foo extends ScalaModule {
+ def scalaVersion = "2.12.4"
+ def compile = T{
+ println("Compiling...")
+ super.compile()
+ }
+ def run(args: String*) = T.command{
+ println("Running... + args.mkString(" "))
+ super.run(args:_*)
+ }
+}
+```
+
+You can re-define targets and commands to override them, and use `super` if you
+want to refer to the originally defined task. The above example shows how to
+override `compile` and `run` to add additional logging messages.
+
+In Mill builds the `override` keyword is optional.
+
+### Publishing Modules
+
+## Common Project Layouts
+
+Above, we have shown how to work with the Mill default Scala module layout. Here
+we will explore some other common project layouts that you may want in your
+Scala build:
+
+### Cross Scala-Version Modules
+
+### Scala.js Modules
+
+### Cross Scala-JVM/Scala.js Modules
+
+### Cross Scala-Version Scala-JVM/JS Modules
+
+### SBT-Compatible Modules
+
+### SBT-Compatible Cross Scala-Version Modules
+
+### SBT-Compatible Cross Scala-Version Scala-JVM/JS Modules
+
+
+
+## Example Builds
+
+Mill comes bundled with example builds for existing open-source projects, as
+integration tests and examples:
+
+
+### Acyclic
+
+- [Mill Build](https://github.com/lihaoyi/mill/blob/master/integration/test/resources/acyclic/build.sc#L1)
+
+A small single-module cross-build, with few sources minimal dependencies
+
+
+### Better-Files
+
+- [Mill Build](https://github.com/lihaoyi/mill/blob/master/integration/test/resources/better-files/build.sc#L1)
+
+A collection of small modules compiled for a single Scala version.
+
+Also demonstrates how to define shared configuration in a `trait`, enable Scala
+compiler flags, and download artifacts as part of the build.
+
+### Jawn
+
+- [Mill Build](https://github.com/lihaoyi/mill/blob/master/integration/test/resources/jawn/build.sc#L1)
+
+A collection of relatively small modules, all cross-built across the same few
+versions of Scala.
+
+
+### Ammonite
+
+- [Mill Build](https://github.com/lihaoyi/mill/blob/master/integration/test/resources/ammonite/build.sc#L1)
+
+A relatively complex build with numerous submodules, some cross-built across
+Scala major versions while others are cross-built against Scala minor versions.
+
+Also demonstrates how to pass one module's compiled artifacts to the
+`run`/`test` commands of another, via their `forkEnv`.
+
+## Extending Mill \ No newline at end of file
diff --git a/docs/modules.md b/docs/modules.md
new file mode 100644
index 00000000..86f807de
--- /dev/null
+++ b/docs/modules.md
@@ -0,0 +1,125 @@
+Mill modules are `object`s extending `mill.Module`, and let you group related
+tasks together to keep things neat and organized. Mill's comes with built in
+modules such as `mill.scalalib.ScalaModule` and `mill.scalalib.CrossSbtModule`,
+but you can use modules for other purposes as well.
+
+## Using Modules
+
+The path to a Mill module from the root of your build file corresponds to the
+path you would use to run tasks within that module from the command line. e.g.
+for the following build:
+
+```scala
+object foo extends mill.Module{
+ def bar = T{ "hello" }
+ object baz extends mill.Module{
+ def qux = T{ "world" }
+ }
+}
+```
+
+You would be able to run the two targets via `mill foo.bar` or `mill
+foo.baz.qux`. You can use `mill --show foo.bar` or `mill --show foo.baz.qux` to
+make Mill echo out the string value being returned by each Target. The two
+targets will store their output metadata & files at `./out/foo/bar` and
+`./out/foo/baz/qux` respectively.
+
+Modules also provide a way to define and re-use common collections of tasks, via
+Scala `trait`s. For example, you can define your own `FooModule` trait:
+
+```scala
+trait FooModule extends mill.Module{
+ def bar = T{ "hello" }
+ def baz = T{ "world" }
+}
+```
+
+And use it to define multiple modules with the same `bar` and `baz` targets,
+along with any other customizations such as `qux`:
+
+```scala
+object foo1 extends FooModule
+object foo2 extends FooModule{
+ def qux = T{ "I am Cow" }
+}
+```
+
+This would make the following targets available from the command line
+
+- `mill --show foo1.bar`
+- `mill --show foo1.baz`
+- `mill --show foo2.bar`
+- `mill --show foo2.baz`
+- `mill --show foo2.qux`
+
+The built in `mill.scalalib` package uses this to define
+`mill.scalalib.ScalaModule`, `mill.scalalib.SbtModule` and
+`mill.scalalib.TestScalaModule`, all of which contain a set of "standard"
+operations such as `compile` `jar` or `assembly` that you may expect from a
+typical Scala module.
+
+## Overriding Targets
+
+```scala
+trait BaseModule extends Module {
+ def foo = T{ Seq("base") }
+ def cmd(i: Int) = T.command{ Seq("base" + i) }
+}
+
+object canOverrideSuper with BaseModule {
+ def foo = T{ super.foo() ++ Seq("object") }
+ def cmd(i: Int) = T.command{ super.cmd(i)() ++ Seq("object" + i) }
+}
+```
+
+You can override targets and commands to customize them or change what they do.
+The overriden version is available via `super`. You can omit the `override`
+keyword in Mill builds.
+
+## basePath
+
+Each Module has a `basePath` field that corresponds to the path that module
+expects it's input files to be on disk. Re-visiting our examples above:
+
+```scala
+object foo extends mill.Module{
+ def bar = T{ "hello" }
+ object baz extends mill.Module{
+ def qux = T{ "world" }
+ }
+}
+```
+
+The `foo` module has a `basePath` of `./foo`, while the `foo.baz` module has a
+`basePath` of `./foo/baz`.
+
+You can use `basePath` to automatically set the source directories of your
+modules to match the build structure. You are not forced to rigidly use
+`basePath` to define the source folders of all your code, but it can simplify
+the common case where you probably want your build-layout on on-disk-layout to
+be the same.
+
+e.g. for `mill.scalalib.ScalaModule`, the Scala source code is assumed by
+default to be in `basePath/"src"` while resources are automatically assumed to
+be in `basePath/"resources"`.
+
+You can override `basePath`:
+
+```scala
+object foo extends mill.Module{
+ def basePath = super.basePath / "lols"
+ def bar = T{ "hello" }
+ object baz extends mill.Module{
+ def qux = T{ "world" }
+ }
+}
+```
+
+And any overrides propagate down to the module's children: in the above example,
+module `foo` would have it's `basePath` be `./foo/lols` while module` foo.baz`
+would have it's `basePath` be `./foo/lols/baz`.
+
+Note that `basePath` is generally only used for a module's input source files.
+Output is always in the `out/` folder and cannot be changed, e.g. even with the
+overriden `basePath` the output paths are still the default `./out/foo/bar` and
+`./out/foo/baz/qux` folders. \ No newline at end of file
diff --git a/docs/publishing.md b/docs/publishing.md
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/docs/publishing.md
diff --git a/docs/tasks.md b/docs/tasks.md
new file mode 100644
index 00000000..ef9270bb
--- /dev/null
+++ b/docs/tasks.md
@@ -0,0 +1,310 @@
+One of Mill's core abstractions is it's *Task Graph*: this is how Mill defines,
+orders and caches work it needs to do, and exists independently of any support
+for building Scala.
+
+The following is a simple self-contained example using Mill to compile Java:
+
+```scala
+import ammonite.ops._, mill._
+
+def sourceRootPath = pwd / 'src
+def resourceRootPath = pwd / 'resources
+
+def sourceRoot = T.source{ sourceRootPath }
+
+def resourceRoot = T.source{ resourceRootPath }
+
+def allSources = T{ ls.rec(sourceRoot().path).map(PathRef(_)) }
+
+def classFiles = T{
+ mkdir(T.ctx().dest)
+ import ammonite.ops._
+ %("javac", sources().map(_.path.toString()), "-d", T.ctx().dest)(wd = T.ctx().dest)
+ PathRef(T.ctx().dest)
+}
+
+def jar = T{ mill.modules.Jvm.createJar(Agg(resourceRoot().path, classFiles().path)) }
+
+def run(mainClsName: String) = T.command{
+ %%('java, "-cp", classFiles().path, mainClsName)
+}
+```
+
+Here, we have two `T.source`s, `sourceRoot` and `resourceRoot`, which act as the
+roots of our task graph. `allSources` depends on `sourceRoot` by calling
+`sourceRoot()` to extract it's value, `classFiles` depends on `allSources` the
+same way, and `jar` depends on both `classFiles` and `resourceRoot`.
+
+Filesystem o1perations in Mill are done using the
+[Ammonite-Ops](http://ammonite.io/#Ammonite-Ops) library.
+
+The above build defines the following task graph:
+
+```
+sourceRoot -> allSources -> classFiles
+ |
+ v
+ resourceRoot ----> jar
+```
+
+When you first evaluate `jar` (e.g. via `mill jar` at the command line), it will
+evaluate all the defined targets: `sourceRoot`, `allSources`, `classFiles`,
+`resourceRoot` and `jar`.
+
+Subsequent `mill jars` will evaluate only as much as is necessary, depending on
+what input sources changed:
+
+- If the files in `sourceRoot` change, it will re-evaluate `allSources`,
+ compiling to `classFiles`, and building the `jar`
+
+- If the files in `resourceRoot` change, it will only re-evaluate `jar` and use
+ the cached output of `allSources` and `classFiles`
+
+Apart from the `foo()` call-sites which define what each targets depend on, the
+code within each `T{...}` wrapper is arbirary Scala code that can compute an
+arbitrary result from it's inputs.
+
+## Different Kinds of Tasks
+
+There are four primary kinds of *Tasks* that you should care about:
+
+- [Targets](#targets), defined using `T{...}`
+- [Sources](#sources), defined using `T.source{...}`
+- [Commands](#commands), defined using `T.command{...}`
+
+### Targets
+
+```scala
+def allSources = T{ ls.rec(sourceRoot().path).map(PathRef(_)) }
+```
+
+`Target`s are defined using the `def foo = T{...}` syntax, and dependencies on
+other targets are defined using `foo()` to extract the value from them. Apart
+from the `foo()` calls, the `T{...}` block contains arbitrary code that does
+some work and returns a result.
+
+Each target e.g. `classFiles` is assigned a path on disk as scratch space & to
+store it's output files at `out/classFiles/dest/`, and it's returned metadata is
+automatically JSON-serialized and stored at `out/classFiles/meta.json`. The
+return-value of targets has to be JSON-serializable via
+[uPickle](https://github.com/lihaoyi/upickle).
+
+If you want to return a file or a set of files as the result of a `Target`,
+write them to disk within your `T.ctx().dest` available through the
+[Task Context API](#task-context-api) and return a `PathRef` to the files you
+wrote.
+
+If a target's inputs change but it's output does not, e.g. someone changes a
+comment within the source files that doesn't affect the classfiles, then
+downstream targets do not re-evaluate. This is determined using the `.hashCode`
+of the Target's return value. For target's returning `ammonite.ops.Path`s that
+reference files on disk, you can wrap the `Path` in a `PathRef` (shown above)
+whose `.hashCode()` will include the hashes of all files on disk at time of
+creation.
+
+The graph of inter-dependent targets is evaluated in topological order; that
+means that the body of a target will not even begin to evaluate if one of it's
+upstream dependencies has failed. This is unlike normal Scala functions: a plain
+old function `foo` would evaluate halfway and then blow up if one of `foo`'s
+dependencies throws an exception.
+
+Targets cannot take parameters and must be 0-argument `def`s defined directly
+within a `Module` body
+
+### Sources
+
+```scala
+def sourceRootPath = pwd / 'src
+
+def sourceRoot = T.source{ sourceRootPath }
+```
+
+`Source`s are defined using `T.source{ ... }`, taking an `ammonite.ops.Path` as
+an input. A `Source` is a subclass of `Target[PathRef]`: this means that it's
+build signature/`hashCode` depends not just on the path it refers to (e.g.
+`foo/bar/baz`) but also the MD5 hash of the filesystem tree under that path.
+
+### Commands
+
+```scala
+def run(mainClsName: String) = T.command{
+ %%('java, "-cp", classFiles().path, mainClsName)
+}
+```
+
+Defined using `T.command{ ... }` syntax, `Command`s can run arbitrary code, with
+dependencies declared using the same `foo()` syntax (e.g. `classFiles()` above).
+Commands can be parametrized, but their output is not cached, so they will
+re-evaluate every time even if none of their inputs have changed.
+
+Like [Targets](#targets), a command only evaluates after all it's upstream
+dependencies have completed, and will not begin to run if any upstream
+dependency has failed.
+
+Commands are assigned the same scratch/output directory `out/run/dest/` as
+Targets are, and it's returned metadata stored at the same `out/run/meta.json`
+path for consumption by external tools.
+
+Commands can only be defined directly within a `Module` body.
+
+## Task Context API
+
+There are several APIs available to you within the body of a `T{...}` or
+`T.command{...}` block to help your write the code implementing your Target or
+Command:
+
+### mill.util.Ctx.DefCtx
+
+- `T.ctx().dest`
+- `implicitly[mill.util.Ctx.DefCtx]`
+
+This is the unique `out/classFiles/dest/` path or `out/run/dest/` path that is
+assigned to every Target or Command. It is cleared before your task runs, and
+you can use it as a scratch space for temporary files or a place to put returned
+artifacts. This is guaranteed to be unique for every `Target` or `Command`, so
+you can be sure that you will not collide or interfere with anyone else writing
+to those same paths.
+
+### mill.util.Ctx.LogCtx
+
+- `T.ctx().log`
+- `implicitly[mill.util.Ctx.LogCtx]`
+
+This is the default logger provided for every task. While your task is running,
+`System.out` and `System.in` are also redirected to this logger. The logs for a
+task are streamed to standard out/error as you would expect, but each task's
+specific output is also streamed to a log file on disk e.g. `out/run/log` or
+`out/classFiles/log` for you to inspect later.
+
+## Other Tasks
+
+- [Anonymous Tasks](#anonymous-tasks), defined using `T.task{...}`
+- [Persistent Targets](#persistent-targets)
+- [Inputs](#inputs)
+- [Workers](#workers)
+
+
+### Anonymous Tasks
+
+```scala
+def foo(x: Int) = T.task{ ... x ... bar() ... }
+```
+
+You can define anonymous tasks using the `T.task{ ... }` syntax. These are not
+runnable from the command-line, but can be used to share common code you find
+yourself repeating in `Target`s and `Command`s.
+
+```scala
+def downstreamTarget = T{ ... foo() ... }
+def downstreamCommand = T.command{ ... foo() ... }
+```
+Anonymous tasks's output does not need to be JSON-serializable, their output is
+not cached, and they can be defined with or without arguments. Unlike
+[Targets](#targets) or [Commands](#commands), anonymous tasks can be defined
+anywhere and passed around any way you want, until you finally make use of them
+within a downstream target or command.
+
+While an anonymous task `foo`'s own output is not cached, if it is used in a
+downstream target `bar` and the upstream targets's `baz` `qux` haven't changed,
+`bar`'s cached output will be used and `foo`'s evaluation will be skipped
+altogether.
+
+### Persistent Targets
+```scala
+def foo = T.persistent{ ... }
+```
+
+Identical to [Targets](#targets), except that the `dest/` directory is not
+cleared in between runs.
+
+This is useful if you are running external incremental-compilers, such as
+Scala's [Zinc](https://github.com/sbt/zinc), Javascript's
+[WebPack](https://webpack.js.org/), which rely on filesystem caches to speed up
+incremental execution of their particular build step.
+
+Since Mill no longer forces a "clean slate" re-evaluation of `T.persistent`
+targets, it is up to you to ensure your code (or the third-party incremental
+compilers you rely on!) are deterministic. They should always converge to the
+same outputs for a given set of inputs, regardless of what builds and what
+filesystem states existed before.
+
+### Inputs
+
+```scala
+def foo = T.input{ ... }
+```
+
+A generalization of [Sources](#sources), `T.input`s are tasks that re-evaluate
+*every time* (Unlike [Anonymous Tasks](#anonymous-tasks)), containing an
+arbitrary block of code.
+
+Inputs can be used to force re-evaluation of some external property that may
+affect your build. For example, if I have a [Target](#targets) `bar` that makes
+use of the current git version:
+
+```scala
+def bar = T{ ... %%("git", "rev-parse", "HEAD").out.string ... }
+```
+
+`bar` will not know that `git rev-parse` can change, and will
+not know to re-evaluate when your `git rev-parse HEAD` *does* change. This means
+`bar` will continue to use any previously cached value, and `bar`'s output will
+be out of date!
+
+To fix this, you can wrap your `git rev-parse HEAD` in a `T.input`:
+
+```scala
+def foo = T.input{ %%("git", "rev-parse", "HEAD").out.string }
+def bar = T{ ... foo() ... }
+```
+
+This makes `foo` will always re-evaluate every build; if `git rev-parse HEAD`
+does not change, that will not invalidate `bar`'s caches. But if `git rev-parse
+HEAD` *does* change, `foo`'s output will change and `bar` will be correctly
+invalidated and re-compute using the new version of `foo`.
+
+Note that because `T.input`s re-evaluate every time, you should ensure that the
+code you put in `T.input` runs quickly. Ideally it should just be a simple check
+"did anything change?" and any heavy-lifting can be delegated to downstream
+targets.
+
+### Workers
+
+```scala
+def foo = T.worker{ ... }
+```
+
+Most tasks dispose of their in-memory return-value every evaluation; in the case
+of [Targets](#targets), this is stored on disk and loaded next time if
+necessary, while [Commands](#commands) just re-compute them each time. Even if
+you use `--watch` or the Build REPL to keep the Mill process running, all this
+state is still discarded and re-built every evaluation.
+
+Workers are unique in that they store their in-memory return-value between
+evaluations. This makes them useful for storing in-memory caches or references
+to long-lived external worker processes that you can re-use.
+
+Mill uses workers to managed long-lived instances of the
+[Zinc Incremental Scala Compiler](https://github.com/sbt/zinc) and the
+[Scala.js Optimizer](https://github.com/scala-js/scala-js). This lets us keep
+them in-memory with warm caches and fast incremental execution.
+
+Like [Persistent Targets](#persistent-targets), Workers inherently involve
+mutable state, and it is up to the implementation to ensure that this mutable
+state is only used for caching/performance and does not affect the
+externally-visible behavior of the worker.
+
+## Cheat Sheet
+
+The following table might help you make sense of the small collection of
+different Task types:
+
+| | Target | Command | Source/Input | Anonymous Task | Persistent Target | Worker |
+|:-------------------------------|:-------|:--------|:-------------|:---------------|:------------------|:-------|
+| Cached on Disk | X | X | | | X | |
+| Must be JSON Writable | X | X | | | X | |
+| Must be JSON Readable | X | | | | X | |
+| Runnable from the Command Line | X | X | | | X | |
+| Can Take Arguments | | X | | X | | |
+| Cached between Evaluations | | | | | | X |
+
diff --git a/integration/test/resources/ammonite/build.sc b/integration/test/resources/ammonite/build.sc
index ebc264d4..4b8db461 100644
--- a/integration/test/resources/ammonite/build.sc
+++ b/integration/test/resources/ammonite/build.sc
@@ -150,11 +150,6 @@ class ShellModule(val crossScalaVersion: String) extends AmmModule{
object integration extends Cross[IntegrationModule](fullCrossScalaVersions:_*)
class IntegrationModule(val crossScalaVersion: String) extends AmmModule{
def moduleDeps = Seq(ops(), amm())
- // (test in Test) := (test in Test).dependsOn(integrationTasks:_*).value,
- // (run in Test) := (run in Test).dependsOn(integrationTasks:_*).evaluated,
- // (testOnly in Test) := (testOnly in Test).dependsOn(integrationTasks:_*).evaluated,
- // (console in Test) := (console in Test).dependsOn(integrationTasks:_*).value,
- // initialCommands in (Test, console) := "ammonite.integration.Main.main(null)"
object test extends Tests {
def forkEnv = super.forkEnv() ++ Seq(
"AMMONITE_TEST_SHELL" -> shell().jar().path.toString,
diff --git a/integration/test/resources/jawn/build.sc b/integration/test/resources/jawn/build.sc
index 4898a3cb..0c4bdfca 100644
--- a/integration/test/resources/jawn/build.sc
+++ b/integration/test/resources/jawn/build.sc
@@ -1,9 +1,10 @@
import mill.scalalib
import mill.Cross
import mill.scalalib.{Dep, TestModule, DepSyntax}
+import ammonite.ops.up
object jawn extends Cross[JawnModule]("2.10.6", "2.11.11", "2.12.3")
class JawnModule(crossVersion: String) extends mill.Module{
- override def basePath = super.basePath / ammonite.ops.up / ammonite.ops.up
+ override def basePath = super.basePath / up / up
trait JawnModule extends scalalib.SbtModule{
def scalaVersion = crossVersion
@@ -52,7 +53,7 @@ class JawnModule(crossVersion: String) extends mill.Module{
object rojoma extends Support(ivy"com.rojoma::rojoma-json:2.4.3")
object rojomaV3 extends Support(ivy"com.rojoma::rojoma-json-v3:3.7.2"){
- override def basePath = super.basePath / ammonite.ops.up / "rojoma-v3"
+ override def basePath = super.basePath / up / "rojoma-v3"
}
object spray extends Support(ivy"io.spray::spray-json:1.3.3")
}
diff --git a/readme.md b/readme.md
index b1ef0ff8..794ba567 100644
--- a/readme.md
+++ b/readme.md
@@ -328,354 +328,6 @@ restart from scratch removing the `out` directory:
rm -rf out/
```
-## Mill Design Principles
-
-A lot of mills design principles are intended to fix SBT's flaws, as described
-in http://www.lihaoyi.com/post/SowhatswrongwithSBT.html. Before working on Mill,
-read through that post to understand where it is coming from!
-
-### Dependency graph first
-
-Mill's most important abstraction is the dependency graph of `Task`s.
-Constructed using the `T{...}` `T.task{...}` `T.command{...}` syntax, these
-track the dependencies between steps of a build, so those steps can be executed
-in the correct order, queried, or parallelized.
-
-While Mill provides helpers like `ScalaModule` and other things you can use to
-quickly instantiate a bunch of related tasks (resolve dependencies, find
-sources, compile, package into jar, ...) these are secondary. When Mill
-executes, the dependency graph is what matters: any other mode of organization
-(hierarchies, modules, inheritence, etc.) is only important to create this
-dependency graph of `Task`s.
-
-### Builds are hierarchical
-
-The syntax for running targets from the command line `mill Foo.bar.baz` is
-the same as referencing a target in Scala code, `Foo.bar.baz`
-
-Everything that you can run from the command line lives in an object hierarchy
-in your `build.sc` file. Different parts of the hierarchy can have different
-`Target`s available: just add a new `def foo = T{...}` somewhere and you'll be
-able to run it.
-
-Cross builds, using the `Cross` data structure, are just another kind of node in
-the object hierarchy. The only difference is syntax: from the command line you'd
-run something via `mill core.cross[a].printIt` while from code you use
-`core.cross("a").printIt` due to different restrictions in Scala/Bash syntax.
-
-### Caching by default
-
-Every `Target` in a build, defined by `def foo = T{...}`, is cached by default.
-Currently this is done using a `foo/meta.json` file in the `out/` folder. The
-`Target` is also provided a `foo/` path on the filesystem dedicated to it, for
-it to store output files etc.
-
-This happens whether you want it to or not. Every `Target` is cached, not just
-the "slow" ones like `compile` or `assembly`.
-
-Caching is keyed on the `.hashCode` of the returned value. For `Target`s
-returning the contents of a file/folder on disk, they return `PathRef` instances
-whose hashcode is based on the hash of the disk contents. Serialization of the
-returned values is tentatively done using uPickle.
-
-### Short-lived build processes
-
-The Mill build process is meant to be run over and over, not only as a
-long-lived daemon/console. That means we must minimize the startup time of the
-process, and that a new process must be able to re-construct the in-memory data
-structures where a previous process left off, in order to continue the build.
-
-Re-construction is done via the hierarchical nature of the build: each `Target`
-`foo.bar.baz` has a fixed position in the build hierarchy, and thus a fixed
-position on disk `out/foo/bar/baz/meta.json`. When the old process dies and a
-new process starts, there will be a new instance of `Target` with the same
-implementation code and same position in the build hierarchy: this new `Target`
-can then load the `out/foo/bar/baz/meta.json` file and pick up where the
-previous process left off.
-
-Minimizing startup time means aggressive caching, as well as minimizing the
-total amount of bytecode used: Mill's current 1-2s startup time is dominated by
-JVM classloading. In future, we may have a long lived console or
-nailgun/drip-based server/client models to speed up interactive usage, but we
-should always keep "cold" startup as fast as possible.
-
-### Static dependency graph and Applicative tasks
-
-`Task`s are *Applicative*, not *Monadic*. There is `.map`, `.zip`, but no
-`.flatMap` operation. That means that we can know the structure of the entire
-dependency graph before we start executing `Task`s. This lets us perform all
-sorts of useful operations on the graph before running it:
-
-- Given a Target the user wants to run, pre-compute and display what targets
- will be evaluated ("dry run"), without running them
-
-- Automatically parallelize different parts of the dependency graph that do not
- depend on each other, perhaps even distributing it to different worker
- machines like Bazel/Pants can
-
-- Visualize the dependency graph easily, e.g. by dumping to a DOT file
-
-- Query the graph, e.g. "why does this thing depend on that other thing?"
-
-- Avoid running tasks "halfway": if a Target's upstream Targets fail, we can
- skip the Target completely rather than running halfway and then bailing out
- with an exception
-
-In order to avoid making people using `.map` and `.zip` all over the place when
-defining their `Task`s, we use the `T{...}`/`T.task{...}`/`T.command{...}`
-macros which allow you to use `Task#apply()` within the block to "extract" a
-value.
-
-```scala
-def test() = T.command{
- TestRunner.apply(
- "mill.UTestFramework",
- runDepClasspath().map(_.path) :+ compile().path,
- Seq(compile().path)
-
-}
-```
-
-This is roughly to the following:
-
-```scala
-def test() = T.command{ T.zipMap(runDepClasspath, compile, compile){
- (runDepClasspath1, compile2, compile3) =>
- TestRunner.apply(
- "mill.UTestFramework",
- runDepClasspath1.map(_.path) :+ compile2.path,
- Seq(compile3.path)
- )
-}
-```
-
-This is similar to SBT's `:=`/`.value` macros, or `scala-async`'s
-`async`/`await`. Like those, the `T{...}` macro should let users program most of
-their code in a "direct" style and have it "automatically" lifted into a graph
-of `Task`s.
-
-## How Mill aims for Simple
-
-Why should you expect that the Mill build tool can achieve simple, easy &
-flexible, where other build tools in the past have failed?
-
-Build tools inherently encompass a huge number of different concepts:
-
-- What "Tasks" depends on what?
-- How do I define my own tasks?
-- What needs to run in what order to do what I want?
-- What can be parallelized and what can't?
-- How do tasks pass data to each other? What data do they pass?
-- What tasks are cached? Where?
-- How are tasks run from the command line?
-- How do you deal with the repetition inherent a build? (e.g. compile, run &
- test tasks for every "module")
-- What is a "Module"? How do they relate to "Tasks"?
-- How do you configure a Module to do something different?
-- How are cross-builds (across different configurations) handled?
-
-These are a lot of questions to answer, and we haven't even started talking
-about the actually compiling/running any code yet! If each such facet of a build
-was modelled separately, it's easy to have an explosion of different concepts
-that would make a build tool hard to understand.
-
-Before you continue, take a moment to think: how would you answer to each of
-those questions using an existing build tool you are familiar with? Different
-tools like [SBT](http://www.scala-sbt.org/),
-[Fake](https://fake.build/legacy-index.html), [Gradle](https://gradle.org/) or
-[Grunt](https://gruntjs.com/) have very different answers.
-
-Mill aims to provide the answer to these questions using as few, as familiar
-core concepts as possible. The entire Mill build is oriented around a few
-concepts:
-
-- The Object Hierarchy
-- The Call Graph
-- Instantiating Traits & Classes
-
-These concepts are already familiar to anyone experienced in Scala (or any other
-programming language...), but are enough to answer all of the complicated
-build-related questions listed above.
-
-## The Object Hierarchy
-
-The module hierarchy is the graph of objects, starting from the root of the
-`build.sc` file, that extend `mill.Module`. At the leaves of the hierarchy are
-the `Target`s you can run.
-
-A `Target`'s position in the module hierarchy tells you many things. For
-example, a `Target` at position `core.test.compile` would:
-
-- Cache output metadata at `out/core/test/compile/meta.json`
-
-- Output files to the folder `out/core/test/compile/dest/`
-
-- Be runnable from the command-line via `mill core.test.compile`
-
-- Be referenced programmatically (from other `Target`s) via `core.test.compile`
-
-From the position of any `Target` within the object hierarchy, you immediately
-know how to run it, find its output files, find any caches, or refer to it from
-other `Target`s. You know up-front where the `Target`'s data "lives" on disk, and
-are sure that it will never clash with any other `Target`'s data.
-
-## The Call Graph
-
-The Scala call graph of "which target references which other target" is core to
-how Mill operates. This graph is reified via the `T{...}` macro to make it
-available to the Mill execution engine at runtime. The call graph tells you:
-
-- Which `Target`s depend on which other `Target`s
-
-- For a given `Target` to be built, what other `Target`s need to be run and in
- what order
-
-- Which `Target`s can be evaluated in parallel
-
-- What source files need to be watched when using `--watch` on a given target (by
- tracing the call graph up to the `Source`s)
-
-- What a given `Target` makes available for other `Target`s to depend on (via
- its return value)
-
-- Defining your own task that depends on others is as simple as `def foo =
- T{...}`
-
-The call graph within your Scala code is essentially a data-flow graph: by
-defining a snippet of code:
-
-```scala
-val b = ...
-val c = ...
-val d = ...
-val a = f(b, c, d)
-```
-
-you are telling everyone that the value `a` depends on the values of `b` `c` and
-`d`, processed by `f`. A build tool needs exactly the same data structure:
-knowing what `Target` depends on what other `Target`s, and what processing it
-does on its inputs!
-
-With Mill, you can take the Scala call graph, wrap everything in the `T{...}`
-macro, and get a `Target`-dependency graph that matches exactly the call-graph
-you already had:
-
-```scala
-val b = T{ ... }
-val c = T{ ... }
-val d = T{ ... }
-val a = T{ f(b(), c(), d()) }
-```
-
-Thus, if you are familiar with how data flows through a normal Scala program,
-you already know how data flows through a Mill build! The Mill build evaluation
-may be incremental, it may cache things, it may read and write from disk, but
-the fundamental syntax, and the data-flow that syntax represents, is unchanged
-from your normal Scala code.
-
-## Instantiating Traits & Classes
-
-Classes and traits are a common way of re-using common data structures in Scala:
-if you have a bunch of fields which are related and you want to make multiple
-copies of those fields, you put them in a class/trait and instantiate it over
-and over.
-
-In Mill, inheriting from traits is the primary way for re-using common parts of
-a build:
-
-- Scala "project"s with multiple related `Target`s within them, are just a
- `Trait` you instantiate
-
-- Replacing the default `Target`s within a project, making them do new
- things or depend on new `Target`s, is simply `override`-ing them during
- inheritence.
-
-- Modifying the default `Target`s within a project, making use of the old value
- to compute the new value, is simply `override`ing them and using `super.foo()`
-
-- Required configuration parameters within a `project` are `abstract` members.
-
-- Cross-builds are modelled as instantiating a (possibly anonymous) class
- multiple times, each instance with its own distinct set of `Target`s
-
-In normal Scala, you bundle up common fields & functionality into a `class` you
-can instantiate over and over, and you can override the things you want to
-customize. Similarly, in Mill, you bundle up common parts of a build into
-`trait`s you can instantiate over and over, and you can override the things you
-want to customize. "Subprojects", "cross-builds", and many other concepts are
-reduced to simply instantiating a `trait` over and over, with tweaks.
-
-## Prior Work
-
-### SBT
-
-Mill is built as a substitute for SBT, whose problems are
-[described here](http://www.lihaoyi.com/post/SowhatswrongwithSBT.html).
-Nevertheless, Mill takes on some parts of SBT (builds written in Scala, Task
-graph with an Applicative "idiom bracket" macro) where it makes sense.
-
-### Bazel
-
-Mill is largely inspired by [Bazel](https://bazel.build/). In particular, the
-single-build-hierarchy, where every Target has an on-disk-cache/output-directory
-according to their position in the hierarchy, comes from Bazel.
-
-Bazel is a bit odd in it’s own right. the underlying data model is good
-(hierarchy + cached dependency graph) but getting there is hell it (like SBT) is
-also a 3-layer interpretation model, but layers 1 & 2 are almost exactly the
-same: mutable python which performs global side effects (layer 3 is the same
-dependency-graph evaluator as SBT/mill)
-
-You end up having to deal with a non-trivial python codebase where everything
-happens via
-
-```python
-do_something(name="blah")
-```
-
-or
-
-```python
-do_other_thing(dependencies=["blah"])
-
-```
-where `"blah"` is a global identifier that is often constructed programmatically
-via string concatenation and passed around. This is quite challenging.
-
-Having the two layers be “just python” is great since people know python, but I
-think unnecessary two have two layers ("evaluating macros" and "evaluating rule
-impls") that are almost exactly the same, and I think making them interact via
-return values rather than via a global namespace of programmatically-constructed
-strings would make it easier to follow.
-
-With Mill, I’m trying to collapse Bazel’s Python layer 1 & 2 into just 1 layer
-of Scala, and have it define its dependency graph/hierarchy by returning
-values, rather than by calling global-side-effecting APIs. I've had trouble
-trying to teach people how-to-bazel at work, and am pretty sure we can make
-something that's easier to use.
-
-### Scala.Rx
-
-Mill's "direct-style" applicative syntax is inspired by my old
-[Scala.Rx](https://github.com/lihaoyi/scala.rx) project. While there are
-differences (Mill captures the dependency graph lexically using Macros, Scala.Rx
-captures it at runtime, they are pretty similar.
-
-The end-goal is the same: to write code in a "direct style" and have it
-automatically "lifted" into a dependency graph, which you can introspect and use
-for incremental updates at runtime.
-
-Scala.Rx is itself build upon the 2010 paper
-[Deprecating the Observer Pattern](https://infoscience.epfl.ch/record/148043/files/DeprecatingObserversTR2010.pdf).
-
-### CBT
-
-Mill looks a lot like [CBT](https://github.com/cvogt/cbt). The inheritance based
-model for customizing `Module`s/`ScalaModule`s comes straight from there, as
-does the "command line path matches Scala selector path" idea. Most other things
-are different though: the reified dependency graph, the execution model, the
-caching module all follow Bazel more than they do CBT
-
## Mill Goals and Roadmap
diff --git a/scalalib/src/mill/scalalib/MiscModule.scala b/scalalib/src/mill/scalalib/MiscModule.scala
index 502ba461..e20fe7be 100644
--- a/scalalib/src/mill/scalalib/MiscModule.scala
+++ b/scalalib/src/mill/scalalib/MiscModule.scala
@@ -1,11 +1,53 @@
package mill
package scalalib
+import ammonite.ops.{Path, RelPath}
import mill.define.Cross.Resolver
import mill.define.{Cross, Task}
import mill.eval.{PathRef, Result}
import mill.util.Loose.Agg
+object CrossModuleBase{
+ def scalaVersionPaths(scalaVersion: String, f: String => Path) = {
+ for(segments <- scalaVersion.split('.').inits.filter(_.nonEmpty))
+ yield PathRef(f(segments.mkString(".")))
+ }
+}
+trait CrossModuleBase extends mill.Module{
+ def crossScalaVersion: String
+ override def basePath = super.basePath / ammonite.ops.up
+ implicit def crossSbtModuleResolver: Resolver[CrossModuleBase] = new Resolver[CrossModuleBase]{
+ def resolve[V <: CrossModuleBase](c: Cross[V]): V = {
+ crossScalaVersion.split('.')
+ .inits
+ .takeWhile(_.length > 1)
+ .flatMap( prefix =>
+ c.items.map(_._2).find(_.crossScalaVersion.split('.').startsWith(prefix))
+ )
+ .collectFirst{case x => x}
+ .getOrElse(
+ throw new Exception(
+ s"Unable to find compatible cross version between $crossScalaVersion and "+
+ c.items.map(_._2.crossScalaVersion).mkString(",")
+ )
+ )
+ }
+ }
+}
+trait CrossScalaModule extends ScalaModule with CrossModuleBase{ outer =>
+ override def sources = T.input{
+ super.sources() ++
+ CrossModuleBase.scalaVersionPaths(crossScalaVersion, s => basePath / s"src-$s" )
+
+ }
+
+ trait Tests extends super.Tests {
+ override def sources = T.input{
+ super.sources() ++
+ CrossModuleBase.scalaVersionPaths(crossScalaVersion, s => basePath / s"src-$s" )
+ }
+ }
+}
trait SbtModule extends ScalaModule { outer =>
override def sources = T.input{
@@ -27,45 +69,26 @@ trait SbtModule extends ScalaModule { outer =>
}
}
-trait CrossSbtModule extends SbtModule { outer =>
- override def basePath = super.basePath / ammonite.ops.up
- implicit def crossSbtModuleResolver: Resolver[CrossSbtModule] = new Resolver[CrossSbtModule]{
- def resolve[V <: CrossSbtModule](c: Cross[V]): V = {
- crossScalaVersion.split('.')
- .inits
- .takeWhile(_.length > 1)
- .flatMap( prefix =>
- c.items.map(_._2).find(_.crossScalaVersion.split('.').startsWith(prefix))
- )
- .collectFirst{case x => x}
- .getOrElse(
- throw new Exception(
- s"Unable to find compatible cross version between $crossScalaVersion and "+
- c.items.map(_._2.crossScalaVersion).mkString(",")
- )
- )
- }
- }
+trait CrossSbtModule extends SbtModule with CrossModuleBase{ outer =>
- def crossScalaVersion: String
def scalaVersion = crossScalaVersion
override def sources = T.input{
super.sources() ++
- crossScalaVersion.split('.').inits.filter(_.nonEmpty).map(_.mkString(".")).map{
- s => PathRef{ basePath / 'src / 'main / s"scala-$s" }
- }
+ CrossModuleBase.scalaVersionPaths(
+ crossScalaVersion,
+ s => basePath / 'src / 'main / s"scala-$s"
+ )
}
- override def resources = T.input{ Agg(PathRef(basePath / 'src / 'main / 'resources)) }
trait Tests extends super.Tests {
override def basePath = outer.basePath
override def sources = T.input{
super.sources() ++
- crossScalaVersion.split('.').inits.filter(_.nonEmpty).map(_.mkString(".")).map{
- s => PathRef{ basePath / 'src / 'test / s"scala-$s" }
- }
+ CrossModuleBase.scalaVersionPaths(
+ crossScalaVersion,
+ s => basePath / 'src / 'main / s"scala-$s"
+ )
}
- override def resources = T.input{ Agg(PathRef(basePath / 'src / 'test / 'resources)) }
}
}