summaryrefslogtreecommitdiff
path: root/docs
diff options
context:
space:
mode:
authorSakib Hadžiavdić <sake92@users.noreply.github.com>2018-06-07 05:41:20 +0200
committerLi Haoyi <haoyi.sg@gmail.com>2018-06-06 20:41:20 -0700
commit8d8c951d2716873f8b1a2dd00b56a8a73ff45e31 (patch)
tree54cc9b0d8cfdf5359e80559b5286d5712f37aa2f /docs
parente34e67588be6978451fb198335ae938aa17e5964 (diff)
downloadmill-8d8c951d2716873f8b1a2dd00b56a8a73ff45e31.tar.gz
mill-8d8c951d2716873f8b1a2dd00b56a8a73ff45e31.tar.bz2
mill-8d8c951d2716873f8b1a2dd00b56a8a73ff45e31.zip
Fixed typos, wordings, formatted examples (#365)
Diffstat (limited to 'docs')
-rw-r--r--docs/pages/1 - Intro to Mill.md25
-rw-r--r--docs/pages/2 - Configuring Mill.md78
-rw-r--r--docs/pages/3 - Common Project Layouts.md24
-rw-r--r--docs/pages/4 - Tasks.md106
-rw-r--r--docs/pages/5 - Modules.md60
-rw-r--r--docs/pages/6 - Cross Builds.md50
-rw-r--r--docs/pages/7 - Extending Mill.md53
-rw-r--r--docs/pages/8 - Mill Internals.md46
8 files changed, 219 insertions, 223 deletions
diff --git a/docs/pages/1 - Intro to Mill.md b/docs/pages/1 - Intro to Mill.md
index 7aac0149..84a9bc5d 100644
--- a/docs/pages/1 - Intro to Mill.md
+++ b/docs/pages/1 - Intro to Mill.md
@@ -80,7 +80,7 @@ The simplest Mill build for a Java project looks as follows:
```scala
// build.sc
-import mill._, mill.scalalib._
+import mill._, scalalib._
object foo extends JavaModule {
@@ -91,8 +91,7 @@ The simplest Mill build for a Scala project looks as follows:
```scala
// build.sc
-import mill._
-import mill.scalalib._
+import mill._, scalalib._
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
@@ -152,7 +151,7 @@ time.
## Output
-Mill puts all it's output in the top-level `out/` folder. The above commands
+Mill puts all its output in the top-level `out/` folder. The above commands
would end up in:
```text
@@ -168,9 +167,9 @@ Within the output folder for each task, there's a `meta.json` file containing
the metadata returned by that task, and a `dest/` folder containing any files
that the task generates. For example, `out/foo/compile/dest/` contains the
compiled classfiles, while `out/foo/assembly/dest/` contains the self-contained
-assembly with the project's classfiles jar-ed up with all it's dependencies.
+assembly with the project's classfiles jar-ed up with all its dependencies.
-Given a task `foo.bar`, all it's output and results can be found be within it's
+Given a task `foo.bar`, all its output and results can be found be within its
respective `out/foo/bar/` folder.
## Multiple Modules
@@ -178,7 +177,8 @@ respective `out/foo/bar/` folder.
### Java Example
```scala
// build.sc
-import mill._, mill.scalalib._
+import mill._, scalalib._
+
object foo extends JavaModule
object bar extends JavaModule {
def moduleDeps = Seq(foo)
@@ -188,7 +188,8 @@ object bar extends JavaModule {
### Scala Example
```scala
// build.sc
-import mill._, mill.scalalib._
+import mill._, scalalib._
+
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
}
@@ -244,8 +245,8 @@ Modules can also be nested:
```scala
// build.sc
-import mill._
-import mill.scalalib._
+import mill._, scalalib._
+
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
object bar extends ScalaModule {
@@ -388,7 +389,7 @@ Inputs:
```
`inspect` is a more verbose version of [resolve](#resolve). In addition to
-printing out the name of one-or-more tasks, it also display's it's source
+printing out the name of one-or-more tasks, it also displays its source
location and a list of input tasks. This is very useful for debugging and
interactively exploring the structure of your build from the command line.
@@ -614,7 +615,7 @@ build:
// build.sc
import mill._, scalalib._
-object foo extends ScalaModule{
+object foo extends ScalaModule {
def scalaVersion = "2.12.4"
}
```
diff --git a/docs/pages/2 - Configuring Mill.md b/docs/pages/2 - Configuring Mill.md
index 03489f68..036fbe6b 100644
--- a/docs/pages/2 - Configuring Mill.md
+++ b/docs/pages/2 - Configuring Mill.md
@@ -3,8 +3,8 @@ You can configure your Mill build in a number of ways:
## Compilation & Execution Flags
```scala
-import mill._
-import mill.scalalib._
+import mill._, scalalib._
+
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
@@ -40,8 +40,8 @@ mill foo.runLocal arg1 arg2 arg3
## Adding Ivy Dependencies
```scala
-import mill._
-import mill.scalalib._
+import mill._, scalalib._
+
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
def ivyDeps = Agg(
@@ -77,6 +77,7 @@ custom `ScalaWorkerModule`, and override the `scalaWorker` method in your
```scala
import coursier.maven.MavenRepository
+
object CustomScalaWorkerModule extends ScalaWorkerModule {
def repositories() = super.repositories ++ Seq(
MavenRepository("https://oss.sonatype.org/content/repositories/releases")
@@ -92,12 +93,12 @@ object YourBuild extends ScalaModule {
## Adding a Test Suite
```scala
-import mill._
-import mill.scalalib._
+import mill._, scalalib._
+
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
- object test extends Tests{
+ object test extends Tests {
def ivyDeps = Agg(ivy"com.lihaoyi::utest:0.6.0")
def testFrameworks = Seq("utest.runner.Framework")
}
@@ -154,16 +155,16 @@ You can define multiple test suites if you want, e.g.:
```scala
// build.sc
-import mill._
-import mill.scalalib._
+import mill._, scalalib._
+
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
- object test extends Tests{
+ object test extends Tests {
def ivyDeps = Agg(ivy"com.lihaoyi::utest:0.6.0")
def testFrameworks = Seq("utest.runner.Framework")
}
- object integration extends Tests{
+ object integration extends Tests {
def ivyDeps = Agg(ivy"com.lihaoyi::utest:0.6.0")
def testFrameworks = Seq("utest.runner.Framework")
}
@@ -198,8 +199,8 @@ passing args to the test suite via `mill foo.test arg1 arg2 arg3`
```scala
// build.sc
-import mill._
-import mill.scalalib._
+import mill._, scalalib._
+
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
@@ -221,9 +222,7 @@ To have a formatting per-module you need to make your module extend `mill.scalal
```scala
// build.sc
-import mill._
-import mill.scalalib._
-import mill.scalalib.scalafmt._
+import mill._, scalalib._, scalafmt._
object foo extends ScalaModule with ScalafmtModule {
def scalaVersion = "2.12.4"
@@ -239,9 +238,9 @@ It will reformat all sources that matches `__.sources` query.
```scala
// build.sc
-import mill._
-import mill.scalalib._
-trait CommonModule extends ScalaModule{
+import mill._, scalalib._
+
+trait CommonModule extends ScalaModule {
def scalaVersion = "2.12.4"
}
@@ -285,28 +284,28 @@ Everything declared in the above file will be available to any build you run.
```scala
// build.sc
-import mill._
-import mill.scalalib._
+import mill._, scalalib._
+
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
}
-def lineCount = T{
+def lineCount = T {
import ammonite.ops._
foo.sources().flatMap(ref => ls.rec(ref.path)).filter(_.isFile).flatMap(read.lines).size
}
-def printLineCount() = T.command{
+def printLineCount() = T.command {
println(lineCount())
}
```
-You can define new cached Targets using the `T{...}` syntax, depending on
+You can define new cached Targets using the `T {...}` syntax, depending on
existing Targets e.g. `foo.sources` via the `foo.sources()` syntax to extract
their current value, as shown in `lineCount` above. The return-type of a Target
has to be JSON-serializable (using
[uPickle](https://github.com/lihaoyi/upickle)) and the Target is cached when
-first run until it's inputs change (in this case, if someone edits the
+first run until its inputs change (in this case, if someone edits the
`foo.sources` files which live in `foo/src`. Cached Targets cannot take
parameters.
@@ -316,12 +315,12 @@ You can print the value of your custom target using `show`, e.g.
mill run show lineCount
```
-You can define new un-cached Commands using the `T.command{...}` syntax. These
+You can define new un-cached Commands using the `T.command {...}` syntax. These
are un-cached and re-evaluate every time you run them, but can take parameters.
Their return type needs to be JSON-writable as well, or `(): Unit` if you want
to return nothing.
-Your custom targets can depend on each other using the `def bar = T{... foo()
+Your custom targets can depend on each other using the `def bar = T {... foo()
...}` syntax, and you can create arbitrarily long chains of dependent targets.
Mill will handle the re-evaluation and caching of the targets' output for you,
and will provide you a `T.ctx().dest` folder for you to use as scratch space or
@@ -332,15 +331,15 @@ download files (e.g. using `mill.modules.Util.download`), shell-out to Webpack
to compile some Javascript, generate sources to feed into a compiler, or create
some custom jar/zip assembly with the files you want (e.g. using
`mill.modules.Jvm.createJar`), all of these can simply be custom targets with
-your code running in the `T{...}` block.
+your code running in the `T {...}` block.
## Custom Modules
```scala
// build.sc
-import mill._
-import mill.scalalib._
-object qux extends Module{
+import mill._, scalalib._
+
+object qux extends Module {
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
}
@@ -365,7 +364,7 @@ to represent other things e.g. Javascript bundles, docker image building,:
```scala
// build.sc
-trait MySpecialModule extends Module{
+trait MySpecialModule extends Module {
...
}
object foo extends MySpecialModule
@@ -414,16 +413,15 @@ mill unhyphenatedModule.unhyphenated_target2
```scala
// build.sc
-import mill._
-import mill.scalalib._
+import mill._, scalalib._
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
- def compile = T{
+ def compile = T {
println("Compiling...")
super.compile()
}
- def run(args: String*) = T.command{
+ def run(args: String*) = T.command {
println("Running..." + args.mkString(" "))
super.run(args:_*)
}
@@ -444,12 +442,11 @@ In Mill builds the `override` keyword is optional.
```scala
// build.sc
-import mill._
-import mill.scalalib._
+import mill._, scalalib._
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
- def unmanagedClasspath = T{
+ def unmanagedClasspath = T {
if (!ammonite.ops.exists(millSourcePath / "lib")) Agg()
else Agg.from(ammonite.ops.ls(millSourcePath / "lib"))
}
@@ -481,8 +478,7 @@ main class to your `foo.jar` and `foo.assembly` jars.
```scala
// build.sc
-import mill._
-import mill.scalalib._
+import mill._, scalalib._
object foo extends ScalaModule {
def scalaVersion = "2.12.4"
diff --git a/docs/pages/3 - Common Project Layouts.md b/docs/pages/3 - Common Project Layouts.md
index a53fb1b1..a8c55a10 100644
--- a/docs/pages/3 - Common Project Layouts.md
+++ b/docs/pages/3 - Common Project Layouts.md
@@ -7,15 +7,15 @@ Scala build:
### Java Project with Test Suite
```scala
-trait JUnitTests extends TestModule{
+trait JUnitTests extends TestModule {
def testFrameworks = Seq("com.novocode.junit.JUnitFramework")
def ivyDeps = Agg(ivy"com.novocode:junit-interface:0.11")
}
-object core extends JavaModule{
+object core extends JavaModule {
object test extends Tests with JUnitTests
}
-object app extends JavaModule{
+object app extends JavaModule {
def moduleDeps = Seq(core)
object test extends Tests with JUnitTests
}
@@ -54,9 +54,9 @@ project:
import mill._
import mill.scalalib._
object foo extends Cross[FooModule]("2.10.6", "2.11.11", "2.12.4")
-class FooModule(val crossScalaVersion: String) extends CrossScalaModule{
+class FooModule(val crossScalaVersion: String) extends CrossScalaModule {
...
- object test extends Tests{
+ object test extends Tests {
...
}
}
@@ -135,9 +135,9 @@ to re-organize all your files
import mill._
import mill.scalalib._
object foo extends Cross[FooModule]("2.10.6", "2.11.11", "2.12.4")
-class FooModule(val crossScalaVersion: String) extends CrossSbtModule{
+class FooModule(val crossScalaVersion: String) extends CrossSbtModule {
...
- object test extends Tests{
+ object test extends Tests {
...
}
}
@@ -166,9 +166,9 @@ foo/
```scala
import mill._
import mill.scalalib._
-object foo extends ScalaModule with PublishModule{
+object foo extends ScalaModule with PublishModule {
def scalaVersion = "2.12.4"
- def publishVersion = "0.0.1
+ def publishVersion = "0.0.1"
def pomSettings = PomSettings(
description = "My first library",
organization = "com.lihaoyi",
@@ -232,8 +232,8 @@ integration tests and examples:
- [Mill Build](https://github.com/lihaoyi/mill/blob/master/integration/test/resources/acyclic/build.sc#L1)
-A small single-module cross-build, with few sources minimal dependencies, and
-wired up for publishing to Maven Central
+A small single-module cross-build, with few sources, minimal dependencies, and
+wired up for publishing to Maven Central.
### Better-Files
@@ -257,7 +257,7 @@ versions of Scala.
- [Mill Build](https://github.com/lihaoyi/mill/blob/master/integration/test/resources/upickle/build.sc#L1)
A single cross-platform Scala.js/Scala-JVM module cross-built against multiple
-versions of Scala, including the setup necessary for publishing to Maven Central
+versions of Scala, including the setup necessary for publishing to Maven Central.
### Ammonite
diff --git a/docs/pages/4 - Tasks.md b/docs/pages/4 - Tasks.md
index ef1ef054..d87b324b 100644
--- a/docs/pages/4 - Tasks.md
+++ b/docs/pages/4 - Tasks.md
@@ -1,4 +1,4 @@
-One of Mill's core abstractions is it's *Task Graph*: this is how Mill defines,
+One of Mill's core abstractions is its *Task Graph*: this is how Mill defines,
orders and caches work it needs to do, and exists independently of any support
for building Scala.
@@ -12,22 +12,22 @@ import ammonite.ops._, mill._
// v
// resourceRoot ----> jar
-def sourceRoot = T.sources{ pwd / 'src }
+def sourceRoot = T.sources { pwd / 'src }
-def resourceRoot = T.sources{ pwd / 'resources }
+def resourceRoot = T.sources { pwd / 'resources }
-def allSources = T{ sourceRoot().flatMap(p => ls.rec(p.path)).map(PathRef(_)) }
+def allSources = T { sourceRoot().flatMap(p => ls.rec(p.path)).map(PathRef(_)) }
-def classFiles = T{
+def classFiles = T {
mkdir(T.ctx().dest)
import ammonite.ops._
%("javac", sources().map(_.path.toString()), "-d", T.ctx().dest)(wd = T.ctx().dest)
PathRef(T.ctx().dest)
}
-def jar = T{ Jvm.createJar(Loose.Agg(classFiles().path) ++ resourceRoot().map(_.path)) }
+def jar = T { Jvm.createJar(Loose.Agg(classFiles().path) ++ resourceRoot().map(_.path)) }
-def run(mainClsName: String) = T.command{
+def run(mainClsName: String) = T.command {
%%('java, "-cp", classFiles().path, mainClsName)
}
```
@@ -35,7 +35,7 @@ def run(mainClsName: String) = T.command{
Here, we have two `T.sources`s, `sourceRoot` and `resourceRoot`, which act as the
roots of our task graph. `allSources` depends on `sourceRoot` by calling
-`sourceRoot()` to extract it's value, `classFiles` depends on `allSources` the
+`sourceRoot()` to extract its value, `classFiles` depends on `allSources` the
same way, and `jar` depends on both `classFiles` and `resourceRoot`.
Filesystem operations in Mill are done using the
@@ -54,7 +54,7 @@ When you first evaluate `jar` (e.g. via `mill jar` at the command line), it will
evaluate all the defined targets: `sourceRoot`, `allSources`, `classFiles`,
`resourceRoot` and `jar`.
-Subsequent `mill jars` will evaluate only as much as is necessary, depending on
+Subsequent `mill jar`s will evaluate only as much as is necessary, depending on
what input sources changed:
- If the files in `sourceRoot` change, it will re-evaluate `allSources`,
@@ -64,30 +64,30 @@ what input sources changed:
the cached output of `allSources` and `classFiles`
Apart from the `foo()` call-sites which define what each targets depend on, the
-code within each `T{...}` wrapper is arbirary Scala code that can compute an
-arbitrary result from it's inputs.
+code within each `T {...}` wrapper is arbitrary Scala code that can compute an
+arbitrary result from its inputs.
## Different Kinds of Tasks
There are four primary kinds of *Tasks* that you should care about:
-- [Targets](#targets), defined using `T{...}`
-- [Sources](#sources), defined using `T.sources{...}`
-- [Commands](#commands), defined using `T.command{...}`
+- [Targets](#targets), defined using `T {...}`
+- [Sources](#sources), defined using `T.sources {...}`
+- [Commands](#commands), defined using `T.command {...}`
### Targets
```scala
-def allSources = T{ ls.rec(sourceRoot().path).map(PathRef(_)) }
+def allSources = T { ls.rec(sourceRoot().path).map(PathRef(_)) }
```
-`Target`s are defined using the `def foo = T{...}` syntax, and dependencies on
+`Target`s are defined using the `def foo = T {...}` syntax, and dependencies on
other targets are defined using `foo()` to extract the value from them. Apart
-from the `foo()` calls, the `T{...}` block contains arbitrary code that does
+from the `foo()` calls, the `T {...}` block contains arbitrary code that does
some work and returns a result.
-Each target e.g. `classFiles` is assigned a path on disk as scratch space & to
-store it's output files at `out/classFiles/dest/`, and it's returned metadata is
+Each target, e.g. `classFiles`, is assigned a path on disk as scratch space & to
+store its output files at `out/classFiles/dest/`, and its returned metadata is
automatically JSON-serialized and stored at `out/classFiles/meta.json`. The
return-value of targets has to be JSON-serializable via
[uPickle](https://github.com/lihaoyi/upickle).
@@ -97,73 +97,73 @@ write them to disk within your `T.ctx().dest` available through the
[Task Context API](#task-context-api) and return a `PathRef` to the files you
wrote.
-If a target's inputs change but it's output does not, e.g. someone changes a
+If a target's inputs change but its output does not, e.g. someone changes a
comment within the source files that doesn't affect the classfiles, then
downstream targets do not re-evaluate. This is determined using the `.hashCode`
-of the Target's return value. For target's returning `ammonite.ops.Path`s that
+of the Target's return value. For targets returning `ammonite.ops.Path`s that
reference files on disk, you can wrap the `Path` in a `PathRef` (shown above)
whose `.hashCode()` will include the hashes of all files on disk at time of
creation.
The graph of inter-dependent targets is evaluated in topological order; that
-means that the body of a target will not even begin to evaluate if one of it's
+means that the body of a target will not even begin to evaluate if one of its
upstream dependencies has failed. This is unlike normal Scala functions: a plain
old function `foo` would evaluate halfway and then blow up if one of `foo`'s
dependencies throws an exception.
Targets cannot take parameters and must be 0-argument `def`s defined directly
-within a `Module` body
+within a `Module` body.
### Sources
```scala
def sourceRootPath = pwd / 'src
-def sourceRoots = T.sources{ sourceRootPath }
+def sourceRoots = T.sources { sourceRootPath }
```
-`Source`s are defined using `T.sources{ ... }`, taking one-or-more
+`Source`s are defined using `T.sources { ... }`, taking one-or-more
`ammonite.ops.Path`s as arguments. A `Source` is a subclass of
-`Target[Seq[PathRef]]`: this means that it's build signature/`hashCode` depends
+`Target[Seq[PathRef]]`: this means that its build signature/`hashCode` depends
not just on the path it refers to (e.g. `foo/bar/baz`) but also the MD5 hash of
the filesystem tree under that path.
`T.sources` also has an overload which takes `Seq[PathRef]`, to let you
-override-and-extend source lists the same way you would any other `T{...}`
+override-and-extend source lists the same way you would any other `T {...}`
definition:
```scala
-def additionalSources = T.sources{ pwd / 'additionalSources }
-def sourceRoots = T.sources{ super.sourceRoots() ++ additionalSources() }
+def additionalSources = T.sources { pwd / 'additionalSources }
+def sourceRoots = T.sources { super.sourceRoots() ++ additionalSources() }
```
### Commands
```scala
-def run(mainClsName: String) = T.command{
+def run(mainClsName: String) = T.command {
%%('java, "-cp", classFiles().path, mainClsName)
}
```
-Defined using `T.command{ ... }` syntax, `Command`s can run arbitrary code, with
+Defined using `T.command { ... }` syntax, `Command`s can run arbitrary code, with
dependencies declared using the same `foo()` syntax (e.g. `classFiles()` above).
Commands can be parametrized, but their output is not cached, so they will
re-evaluate every time even if none of their inputs have changed.
-Like [Targets](#targets), a command only evaluates after all it's upstream
+Like [Targets](#targets), a command only evaluates after all its upstream
dependencies have completed, and will not begin to run if any upstream
dependency has failed.
-Commands are assigned the same scratch/output directory `out/run/dest/` as
-Targets are, and it's returned metadata stored at the same `out/run/meta.json`
+Commands are assigned the same scratch/output folder `out/run/dest/` as
+Targets are, and its returned metadata stored at the same `out/run/meta.json`
path for consumption by external tools.
Commands can only be defined directly within a `Module` body.
## Task Context API
-There are several APIs available to you within the body of a `T{...}` or
-`T.command{...}` block to help your write the code implementing your Target or
+There are several APIs available to you within the body of a `T {...}` or
+`T.command {...}` block to help your write the code implementing your Target or
Command:
### mill.util.Ctx.Dest
@@ -186,7 +186,7 @@ to those same paths.
This is the default logger provided for every task. While your task is running,
`System.out` and `System.in` are also redirected to this logger. The logs for a
task are streamed to standard out/error as you would expect, but each task's
-specific output is also streamed to a log file on disk e.g. `out/run/log` or
+specific output is also streamed to a log file on disk, e.g. `out/run/log` or
`out/classFiles/log` for you to inspect later.
### mill.util.Ctx.Env
@@ -210,7 +210,7 @@ def envVar = T.input { T.ctx().env.get("ENV_VAR") }
## Other Tasks
-- [Anonymous Tasks](#anonymous-tasks), defined using `T.task{...}`
+- [Anonymous Tasks](#anonymous-tasks), defined using `T.task {...}`
- [Persistent Targets](#persistent-targets)
- [Inputs](#inputs)
- [Workers](#workers)
@@ -219,34 +219,34 @@ def envVar = T.input { T.ctx().env.get("ENV_VAR") }
### Anonymous Tasks
```scala
-def foo(x: Int) = T.task{ ... x ... bar() ... }
+def foo(x: Int) = T.task { ... x ... bar() ... }
```
-You can define anonymous tasks using the `T.task{ ... }` syntax. These are not
+You can define anonymous tasks using the `T.task { ... }` syntax. These are not
runnable from the command-line, but can be used to share common code you find
yourself repeating in `Target`s and `Command`s.
```scala
-def downstreamTarget = T{ ... foo() ... }
-def downstreamCommand = T.command{ ... foo() ... }
+def downstreamTarget = T { ... foo() ... }
+def downstreamCommand = T.command { ... foo() ... }
```
-Anonymous tasks's output does not need to be JSON-serializable, their output is
+Anonymous task's output does not need to be JSON-serializable, their output is
not cached, and they can be defined with or without arguments. Unlike
[Targets](#targets) or [Commands](#commands), anonymous tasks can be defined
anywhere and passed around any way you want, until you finally make use of them
within a downstream target or command.
While an anonymous task `foo`'s own output is not cached, if it is used in a
-downstream target `bar` and the upstream targets's `baz` `qux` haven't changed,
+downstream target `bar` and the upstream targets `baz` `qux` haven't changed,
`bar`'s cached output will be used and `foo`'s evaluation will be skipped
altogether.
### Persistent Targets
```scala
-def foo = T.persistent{ ... }
+def foo = T.persistent { ... }
```
-Identical to [Targets](#targets), except that the `dest/` directory is not
+Identical to [Targets](#targets), except that the `dest/` folder is not
cleared in between runs.
This is useful if you are running external incremental-compilers, such as
@@ -263,11 +263,11 @@ filesystem states existed before.
### Inputs
```scala
-def foo = T.input{ ... }
+def foo = T.input { ... }
```
A generalization of [Sources](#sources), `T.input`s are tasks that re-evaluate
-*every time* (Unlike [Anonymous Tasks](#anonymous-tasks)), containing an
+*every time* (unlike [Anonymous Tasks](#anonymous-tasks)), containing an
arbitrary block of code.
Inputs can be used to force re-evaluation of some external property that may
@@ -275,7 +275,7 @@ affect your build. For example, if I have a [Target](#targets) `bar` that makes
use of the current git version:
```scala
-def bar = T{ ... %%("git", "rev-parse", "HEAD").out.string ... }
+def bar = T { ... %%("git", "rev-parse", "HEAD").out.string ... }
```
`bar` will not know that `git rev-parse` can change, and will
@@ -286,8 +286,8 @@ be out of date!
To fix this, you can wrap your `git rev-parse HEAD` in a `T.input`:
```scala
-def foo = T.input{ %%("git", "rev-parse", "HEAD").out.string }
-def bar = T{ ... foo() ... }
+def foo = T.input { %%("git", "rev-parse", "HEAD").out.string }
+def bar = T { ... foo() ... }
```
This makes `foo` will always re-evaluate every build; if `git rev-parse HEAD`
@@ -303,7 +303,7 @@ targets.
### Workers
```scala
-def foo = T.worker{ ... }
+def foo = T.worker { ... }
```
Most tasks dispose of their in-memory return-value every evaluation; in the case
@@ -316,7 +316,7 @@ Workers are unique in that they store their in-memory return-value between
evaluations. This makes them useful for storing in-memory caches or references
to long-lived external worker processes that you can re-use.
-Mill uses workers to managed long-lived instances of the
+Mill uses workers to manage long-lived instances of the
[Zinc Incremental Scala Compiler](https://github.com/sbt/zinc) and the
[Scala.js Optimizer](https://github.com/scala-js/scala-js). This lets us keep
them in-memory with warm caches and fast incremental execution.
diff --git a/docs/pages/5 - Modules.md b/docs/pages/5 - Modules.md
index 1d8f3d87..2c13a3e9 100644
--- a/docs/pages/5 - Modules.md
+++ b/docs/pages/5 - Modules.md
@@ -10,10 +10,10 @@ path you would use to run tasks within that module from the command line. e.g.
for the following build:
```scala
-object foo extends mill.Module{
- def bar = T{ "hello" }
- object baz extends mill.Module{
- def qux = T{ "world" }
+object foo extends mill.Module {
+ def bar = T { "hello" }
+ object baz extends mill.Module {
+ def qux = T { "world" }
}
}
```
@@ -28,9 +28,9 @@ Modules also provide a way to define and re-use common collections of tasks, via
Scala `trait`s. For example, you can define your own `FooModule` trait:
```scala
-trait FooModule extends mill.Module{
- def bar = T{ "hello" }
- def baz = T{ "world" }
+trait FooModule extends mill.Module {
+ def bar = T { "hello" }
+ def baz = T { "world" }
}
```
@@ -39,8 +39,8 @@ along with any other customizations such as `qux`:
```scala
object foo1 extends FooModule
-object foo2 extends FooModule{
- def qux = T{ "I am Cow" }
+object foo2 extends FooModule {
+ def qux = T { "I am Cow" }
}
```
@@ -62,13 +62,13 @@ typical Scala module.
```scala
trait BaseModule extends Module {
- def foo = T{ Seq("base") }
- def cmd(i: Int) = T.command{ Seq("base" + i) }
+ def foo = T { Seq("base") }
+ def cmd(i: Int) = T.command { Seq("base" + i) }
}
object canOverrideSuper with BaseModule {
- def foo = T{ super.foo() ++ Seq("object") }
- def cmd(i: Int) = T.command{ super.cmd(i)() ++ Seq("object" + i) }
+ def foo = T { super.foo() ++ Seq("object") }
+ def cmd(i: Int) = T.command { super.cmd(i)() ++ Seq("object" + i) }
}
```
@@ -79,13 +79,13 @@ keyword in Mill builds.
## millSourcePath
Each Module has a `millSourcePath` field that corresponds to the path that module
-expects it's input files to be on disk. Re-visiting our examples above:
+expects its input files to be on disk. Re-visiting our examples above:
```scala
-object foo extends mill.Module{
- def bar = T{ "hello" }
- object baz extends mill.Module{
- def qux = T{ "world" }
+object foo extends mill.Module {
+ def bar = T { "hello" }
+ object baz extends mill.Module {
+ def qux = T { "world" }
}
}
```
@@ -93,31 +93,31 @@ object foo extends mill.Module{
The `foo` module has a `millSourcePath` of `./foo`, while the `foo.baz` module has a
`millSourcePath` of `./foo/baz`.
-You can use `millSourcePath` to automatically set the source directories of your
+You can use `millSourcePath` to automatically set the source folders of your
modules to match the build structure. You are not forced to rigidly use
`millSourcePath` to define the source folders of all your code, but it can simplify
-the common case where you probably want your build-layout on on-disk-layout to
+the common case where you probably want your build-layout and on-disk-layout to
be the same.
-e.g. for `mill.scalalib.ScalaModule`, the Scala source code is assumed by
+E.g. for `mill.scalalib.ScalaModule`, the Scala source code is assumed by
default to be in `millSourcePath/"src"` while resources are automatically assumed to
be in `millSourcePath/"resources"`.
You can override `millSourcePath`:
```scala
-object foo extends mill.Module{
+object foo extends mill.Module {
def millSourcePath = super.millSourcePath / "lols"
- def bar = T{ "hello" }
- object baz extends mill.Module{
- def qux = T{ "world" }
+ def bar = T { "hello" }
+ object baz extends mill.Module {
+ def qux = T { "world" }
}
}
```
And any overrides propagate down to the module's children: in the above example,
-module `foo` would have it's `millSourcePath` be `./foo/lols` while module` foo.baz`
-would have it's `millSourcePath` be `./foo/lols/baz`.
+module `foo` would have its `millSourcePath` be `./foo/lols` while module` foo.baz`
+would have its `millSourcePath` be `./foo/lols/baz`.
Note that `millSourcePath` is generally only used for a module's input source files.
Output is always in the `out/` folder and cannot be changed, e.g. even with the
@@ -134,8 +134,8 @@ package foo
import mill._
object Bar extends mill.define.ExternalModule {
- def baz = T{ 1 }
- def qux() = T.command{ println(baz() + 1) }
+ def baz = T { 1 }
+ def qux() = T.command { println(baz() + 1) }
lazy val millDiscover = mill.define.Discover[this.type]
}
@@ -159,7 +159,7 @@ needing to define your own `T.command` in your `build.sc` file
## Foreign Modules
-Mill can load other mill projects from external (or sub) directories,
+Mill can load other mill projects from external (or sub) folders,
using Ammonite's `$file` magic import, allowing to depend on foreign modules.
This allows, for instance, to depend on other projects' sources, or split
your build logic into smaller files.
diff --git a/docs/pages/6 - Cross Builds.md b/docs/pages/6 - Cross Builds.md
index f92678d5..98540fae 100644
--- a/docs/pages/6 - Cross Builds.md
+++ b/docs/pages/6 - Cross Builds.md
@@ -7,9 +7,9 @@ You can use this as follows:
```scala
object foo extends mill.Cross[FooModule]("2.10", "2.11", "2.12")
-class FooModule(crossVersion: String) extends Module{
- def suffix = T{ crossVersion }
- def bigSuffix = T{ suffix().toUpperCase() }
+class FooModule(crossVersion: String) extends Module {
+ def suffix = T { crossVersion }
+ def bigSuffix = T { suffix().toUpperCase() }
}
```
@@ -48,15 +48,15 @@ foo/2.12/bigSuffix
You can also have a cross-build with multiple inputs:
```scala
-val crossMatrix = for{
+val crossMatrix = for {
crossVersion <- Seq("210", "211", "212")
platform <- Seq("jvm", "js", "native")
if !(platform == "native" && crossVersion != "212")
} yield (crossVersion, platform)
object foo extends mill.Cross[FooModule](crossMatrix:_*)
-class FooModule(crossVersion: String, platform: String) extends Module{
- def suffix = T{ crossVersion + "_" + platform }
+class FooModule(crossVersion: String, platform: String) extends Module {
+ def suffix = T { crossVersion + "_" + platform }
}
```
@@ -81,11 +81,11 @@ You can refer to targets defined in cross-modules as follows:
```scala
object foo extends mill.Cross[FooModule]("2.10", "2.11", "2.12")
-class FooModule(crossVersion: String) extends Module{
- def suffix = T{ crossVersion }
+class FooModule(crossVersion: String) extends Module {
+ def suffix = T { crossVersion }
}
-def bar = T{ "hello " + foo("2.10").suffix }
+def bar = T { "hello " + foo("2.10").suffix }
```
Here, `foo("2.10")` references the `"2.10"` instance of `FooModule`. You can
@@ -94,11 +94,11 @@ versions of the cross-module in the same target:
```scala
object foo extends mill.Cross[FooModule]("2.10", "2.11", "2.12")
-class FooModule(crossVersion: String) extends Module{
- def suffix = T{ crossVersion }
+class FooModule(crossVersion: String) extends Module {
+ def suffix = T { crossVersion }
}
-def bar = T{ "hello " + foo("2.10").suffix + " world " + foo("2.12").suffix }
+def bar = T { "hello " + foo("2.10").suffix + " world " + foo("2.12").suffix }
```
## Using Cross Modules from other Cross Modules
@@ -108,13 +108,13 @@ targets:
```scala
object foo extends mill.Cross[FooModule]("2.10", "2.11", "2.12")
-class FooModule(crossVersion: String) extends Module{
- def suffix = T{ crossVersion }
+class FooModule(crossVersion: String) extends Module {
+ def suffix = T { crossVersion }
}
object bar extends mill.Cross[BarModule]("2.10", "2.11", "2.12")
-class BarModule(crossVersion: String) extends Module{
- def bigSuffix = T{ foo(crossVersion).suffix().toUpperCase() }
+class BarModule(crossVersion: String) extends Module {
+ def bigSuffix = T { foo(crossVersion).suffix().toUpperCase() }
}
```
@@ -132,31 +132,31 @@ mill show bar[2.12].bigSuffix
## Cross Resolvers
-You can define an implicit `mill.define.Cross.Resolve` within your
+You can define an implicit `mill.define.Cross.Resolver` within your
cross-modules, which would let you use a shorthand `foo()` syntax when referring
to other cross-modules with an identical set of cross values:
```scala
-trait MyModule extends Module{
+trait MyModule extends Module {
def crossVersion: String
- implicit object resolver extends mill.define.Cross.Resolver[MyModule]{
+ implicit object resolver extends mill.define.Cross.Resolver[MyModule] {
def resolve[V <: MyModule](c: Cross[V]): V = c.itemMap(List(crossVersion))
}
}
object foo extends mill.Cross[FooModule]("2.10", "2.11", "2.12")
-class FooModule(val crossVersion: String) extends MyModule{
- def suffix = T{ crossVersion }
+class FooModule(val crossVersion: String) extends MyModule {
+ def suffix = T { crossVersion }
}
object bar extends mill.Cross[BarModule]("2.10", "2.11", "2.12")
-class BarModule(val crossVersion: String) extends MyModule{
- def longSuffix = T{ "_" + foo().suffix() }
+class BarModule(val crossVersion: String) extends MyModule {
+ def longSuffix = T { "_" + foo().suffix() }
}
```
While the example `resolver` simply looks up the target `Cross` value for the
cross-module instance with the same `crossVersion`, you can make the resolver
-arbitrarily complex. e.g. the `resolver` for `mill.scalalib.CrossSbtModule`
+arbitrarily complex. E.g. the `resolver` for `mill.scalalib.CrossSbtModule`
looks for a cross-module instance whose `scalaVersion` is binary compatible
-(e.g. 2.10.5 is compatible with 2.10.3) with the current cross-module. \ No newline at end of file
+(e.g. 2.10.5 is compatible with 2.10.3) with the current cross-module.
diff --git a/docs/pages/7 - Extending Mill.md b/docs/pages/7 - Extending Mill.md
index 75b7643a..533dcbd4 100644
--- a/docs/pages/7 - Extending Mill.md
+++ b/docs/pages/7 - Extending Mill.md
@@ -8,8 +8,8 @@ The simplest way of adding custom functionality to Mill is to define a custom
Target or Command:
```scala
-def foo = T{ ... }
-def bar(x: Int, s: String) = T.command{ ... }
+def foo = T { ... }
+def bar(x: Int, s: String) = T.command { ... }
```
These can depend on other Targets, contain arbitrary code, and be placed
@@ -20,7 +20,7 @@ you're done.
For subprocess/filesystem operations, you can use the
[Ammonite-Ops](http://ammonite.io/#Ammonite-Ops) library that comes bundled with
-Mill, or even plain `java.nio`/`java.lang.Process`. Each target gets it's own
+Mill, or even plain `java.nio`/`java.lang.Process`. Each target gets its own
[T.ctx().dest](http://www.lihaoyi.com/mill/page/tasks#millutilctxdestctx) folder
that you can use to place files without worrying about colliding with other
targets.
@@ -32,23 +32,23 @@ This covers use cases like:
```scala
def doWebpackStuff(sources: Seq[PathRef]): PathRef = ???
-def javascriptSources = T.sources{ millSourcePath / "js" }
-def compiledJavascript = T{ doWebpackStuff(javascriptSources()) }
-object foo extends ScalaModule{
- def runClasspath = T{ super.runClasspath() ++ compiledJavascript() }
+def javascriptSources = T.sources { millSourcePath / "js" }
+def compiledJavascript = T { doWebpackStuff(javascriptSources()) }
+object foo extends ScalaModule {
+ def runClasspath = T { super.runClasspath() ++ compiledJavascript() }
}
```
### Deploy your compiled assembly to AWS
```scala
-object foo extends ScalaModule{
+object foo extends ScalaModule {
}
def deploy(assembly: PathRef, credentials: String) = ???
-def deployFoo(credentials: String) = T.command{ deployFoo(foo.assembly()) }
+def deployFoo(credentials: String) = T.command { deployFoo(foo.assembly()) }
```
@@ -56,31 +56,31 @@ def deployFoo(credentials: String) = T.command{ deployFoo(foo.assembly()) }
[Custom Targets & Commands](#custom-targets--commands) are re-computed from
scratch each time; sometimes you want to keep values around in-memory when using
-`--watch` or the Build REPL. e.g. you may want to keep a webpack process running
+`--watch` or the Build REPL. E.g. you may want to keep a webpack process running
so webpack's own internal caches are hot and compilation is fast:
```scala
-def webpackWorker = T.worker{
+def webpackWorker = T.worker {
// Spawn a process using java.lang.Process and return it
}
-def javascriptSources = T.sources{ millSourcePath / "js" }
+def javascriptSources = T.sources { millSourcePath / "js" }
def doWebpackStuff(webpackProcess: Process, sources: Seq[PathRef]): PathRef = ???
-def compiledJavascript = T{ doWebpackStuff(webpackWorker(), javascriptSources()) }
+def compiledJavascript = T { doWebpackStuff(webpackWorker(), javascriptSources()) }
```
-Mill itself uses `T.worker`s for it's built-in Scala support: we keep the Scala
+Mill itself uses `T.worker`s for its built-in Scala support: we keep the Scala
compiler in memory between compilations, rather than discarding it each time, in
order to improve performance.
## Custom Modules
```scala
-trait FooModule extends mill.Module{
- def bar = T{ "hello" }
- def baz = T{ "world" }
+trait FooModule extends mill.Module {
+ def bar = T { "hello" }
+ def baz = T { "world" }
}
```
@@ -91,8 +91,8 @@ want in various `object`s:
```scala
object foo1 extends FooModule
-object foo2 extends FooModule{
- def qux = T{ "I am Cow" }
+object foo2 extends FooModule {
+ def qux = T { "I am Cow" }
}
```
@@ -100,9 +100,9 @@ You can also define a `trait` extending the built-in `ScalaModule` if you have
common configuration you want to apply to all your `ScalaModule`s:
```scala
-trait FooModule extends ScalaModule{
+trait FooModule extends ScalaModule {
def scalaVersion = "2.11.11"
- object test extends Tests{
+ object test extends Tests {
def ivyDeps = Agg(ivy"org.scalatest::scalatest:3.0.4")
def testFrameworks = Seq("org.scalatest.tools.Framework")
}
@@ -122,7 +122,7 @@ def fooValue() = 31337
```scala
// build.sc
import $file.foo
-def printFoo() = T.command{ println(foo.fooValue()) }
+def printFoo() = T.command { println(foo.fooValue()) }
```
Mill's `import $file` syntax supports the full functionality of
@@ -131,14 +131,13 @@ Mill's `import $file` syntax supports the full functionality of
## import $ivy
If you want to pull in artifacts from the public repositories (e.g. Maven
-Central) for use in your build, you can simple use `import $ivy`:
+Central) for use in your build, you can simply use `import $ivy`:
```scala
// build.sc
import $ivy.`com.lihaoyi::scalatags:0.6.2`
-
-def generatedHtml = T{
+def generatedHtml = T {
import scalatags.Text.all._
html(
head(),
@@ -157,7 +156,7 @@ If you want to publish re-usable libraries that *other* people can use in their
builds, simply publish your code as a library to maven central.
For more information, see Ammonite's
-[Ivy Dependencies documentation](http://ammonite.io/#import$ivy)
+[Ivy Dependencies documentation](http://ammonite.io/#import$ivy).
## Evaluator Commands
@@ -167,7 +166,7 @@ example, here is the `mill.scalalib.GenIdea/idea` command which uses this to
traverse the module-tree and generate an Intellij project config for your build.
```scala
-def idea(ev: Evaluator[Any]) = T.command{
+def idea(ev: Evaluator[Any]) = T.command {
mill.scalalib.GenIdea(
implicitly,
ev.rootModule,
diff --git a/docs/pages/8 - Mill Internals.md b/docs/pages/8 - Mill Internals.md
index 3700c9df..28a9a768 100644
--- a/docs/pages/8 - Mill Internals.md
+++ b/docs/pages/8 - Mill Internals.md
@@ -1,7 +1,7 @@
## Mill Design Principles
-A lot of mills design principles are intended to fix SBT's flaws, as described
+A lot of Mill's design principles are intended to fix SBT's flaws, as described
in the blog post
[What's wrong with SBT](http://www.lihaoyi.com/post/SowhatswrongwithSBT.html),
building on the best ideas from tools like [CBT](https://github.com/cvogt/cbt)
@@ -14,7 +14,7 @@ from!
### Dependency graph first
Mill's most important abstraction is the dependency graph of `Task`s.
-Constructed using the `T{...}` `T.task{...}` `T.command{...}` syntax, these
+Constructed using the `T {...}` `T.task {...}` `T.command {...}` syntax, these
track the dependencies between steps of a build, so those steps can be executed
in the correct order, queried, or parallelized.
@@ -22,7 +22,7 @@ While Mill provides helpers like `ScalaModule` and other things you can use to
quickly instantiate a bunch of related tasks (resolve dependencies, find
sources, compile, package into jar, ...) these are secondary. When Mill
executes, the dependency graph is what matters: any other mode of organization
-(hierarchies, modules, inheritence, etc.) is only important to create this
+(hierarchies, modules, inheritance, etc.) is only important to create this
dependency graph of `Task`s.
### Builds are hierarchical
@@ -32,7 +32,7 @@ the same as referencing a target in Scala code, `Foo.bar.baz`
Everything that you can run from the command line lives in an object hierarchy
in your `build.sc` file. Different parts of the hierarchy can have different
-`Target`s available: just add a new `def foo = T{...}` somewhere and you'll be
+`Target`s available: just add a new `def foo = T {...}` somewhere and you'll be
able to run it.
Cross builds, using the `Cross` data structure, are just another kind of node in
@@ -42,7 +42,7 @@ run something via `mill core.cross[a].printIt` while from code you use
### Caching by default
-Every `Target` in a build, defined by `def foo = T{...}`, is cached by default.
+Every `Target` in a build, defined by `def foo = T {...}`, is cached by default.
Currently this is done using a `foo/meta.json` file in the `out/` folder. The
`Target` is also provided a `foo/` path on the filesystem dedicated to it, for
it to store output files etc.
@@ -99,12 +99,12 @@ sorts of useful operations on the graph before running it:
with an exception
In order to avoid making people using `.map` and `.zip` all over the place when
-defining their `Task`s, we use the `T{...}`/`T.task{...}`/`T.command{...}`
+defining their `Task`s, we use the `T {...}`/`T.task {...}`/`T.command {...}`
macros which allow you to use `Task#apply()` within the block to "extract" a
value.
```scala
-def test() = T.command{
+def test() = T.command {
TestRunner.apply(
"mill.UTestFramework",
runDepClasspath().map(_.path) :+ compile().path,
@@ -113,10 +113,10 @@ def test() = T.command{
}
```
-This is roughly to the following:
+This is roughly equivalent to the following:
```scala
-def test() = T.command{ T.zipMap(runDepClasspath, compile, compile){
+def test() = T.command { T.zipMap(runDepClasspath, compile, compile) {
(runDepClasspath1, compile2, compile3) =>
TestRunner.apply(
"mill.UTestFramework",
@@ -127,7 +127,7 @@ def test() = T.command{ T.zipMap(runDepClasspath, compile, compile){
```
This is similar to SBT's `:=`/`.value` macros, or `scala-async`'s
-`async`/`await`. Like those, the `T{...}` macro should let users program most of
+`async`/`await`. Like those, the `T {...}` macro should let users program most of
their code in a "direct" style and have it "automatically" lifted into a graph
of `Task`s.
@@ -146,7 +146,7 @@ Build tools inherently encompass a huge number of different concepts:
- How do tasks pass data to each other? What data do they pass?
- What tasks are cached? Where?
- How are tasks run from the command line?
-- How do you deal with the repetition inherent a build? (e.g. compile, run &
+- How do you deal with the repetition inherent in a build? (e.g. compile, run &
test tasks for every "module")
- What is a "Module"? How do they relate to "Tasks"?
- How do you configure a Module to do something different?
@@ -202,7 +202,7 @@ are sure that it will never clash with any other `Target`'s data.
## The Call Graph
The Scala call graph of "which target references which other target" is core to
-how Mill operates. This graph is reified via the `T{...}` macro to make it
+how Mill operates. This graph is reified via the `T {...}` macro to make it
available to the Mill execution engine at runtime. The call graph tells you:
- Which `Target`s depend on which other `Target`s
@@ -219,7 +219,7 @@ available to the Mill execution engine at runtime. The call graph tells you:
its return value)
- Defining your own task that depends on others is as simple as `def foo =
- T{...}`
+ T {...}`
The call graph within your Scala code is essentially a data-flow graph: by
defining a snippet of code:
@@ -236,15 +236,15 @@ you are telling everyone that the value `a` depends on the values of `b` `c` and
knowing what `Target` depends on what other `Target`s, and what processing it
does on its inputs!
-With Mill, you can take the Scala call graph, wrap everything in the `T{...}`
+With Mill, you can take the Scala call graph, wrap everything in the `T {...}`
macro, and get a `Target`-dependency graph that matches exactly the call-graph
you already had:
```scala
-val b = T{ ... }
-val c = T{ ... }
-val d = T{ ... }
-val a = T{ f(b(), c(), d()) }
+val b = T { ... }
+val c = T { ... }
+val d = T { ... }
+val a = T { f(b(), c(), d()) }
```
Thus, if you are familiar with how data flows through a normal Scala program,
@@ -268,12 +268,12 @@ a build:
- Replacing the default `Target`s within a project, making them do new
things or depend on new `Target`s, is simply `override`-ing them during
- inheritence.
+ inheritance
- Modifying the default `Target`s within a project, making use of the old value
to compute the new value, is simply `override`ing them and using `super.foo()`
-- Required configuration parameters within a `project` are `abstract` members.
+- Required configuration parameters within a `project` are `abstract` members
- Cross-builds are modelled as instantiating a (possibly anonymous) class
multiple times, each instance with its own distinct set of `Target`s
@@ -297,10 +297,10 @@ graph with an Applicative "idiom bracket" macro) where it makes sense.
### Bazel
Mill is largely inspired by [Bazel](https://bazel.build/). In particular, the
-single-build-hierarchy, where every Target has an on-disk-cache/output-directory
+single-build-hierarchy, where every Target has an on-disk-cache/output-folder
according to their position in the hierarchy, comes from Bazel.
-Bazel is a bit odd in it’s own right. The underlying data model is good
+Bazel is a bit odd in its own right. The underlying data model is good
(hierarchy + cached dependency graph) but getting there is hell. It (like SBT) is
also a 3-layer interpretation model, but layers 1 & 2 are almost exactly the
same: mutable python which performs global side effects (layer 3 is the same
@@ -339,7 +339,7 @@ something that's easier to use.
Mill's "direct-style" applicative syntax is inspired by my old
[Scala.Rx](https://github.com/lihaoyi/scala.rx) project. While there are
differences (Mill captures the dependency graph lexically using Macros, Scala.Rx
-captures it at runtime, they are pretty similar.
+captures it at runtime), they are pretty similar.
The end-goal is the same: to write code in a "direct style" and have it
automatically "lifted" into a dependency graph, which you can introspect and use