From 8d8c951d2716873f8b1a2dd00b56a8a73ff45e31 Mon Sep 17 00:00:00 2001 From: Sakib Hadžiavdić Date: Thu, 7 Jun 2018 05:41:20 +0200 Subject: Fixed typos, wordings, formatted examples (#365) --- docs/pages/4 - Tasks.md | 106 ++++++++++++++++++++++++------------------------ 1 file changed, 53 insertions(+), 53 deletions(-) (limited to 'docs/pages/4 - Tasks.md') diff --git a/docs/pages/4 - Tasks.md b/docs/pages/4 - Tasks.md index ef1ef054..d87b324b 100644 --- a/docs/pages/4 - Tasks.md +++ b/docs/pages/4 - Tasks.md @@ -1,4 +1,4 @@ -One of Mill's core abstractions is it's *Task Graph*: this is how Mill defines, +One of Mill's core abstractions is its *Task Graph*: this is how Mill defines, orders and caches work it needs to do, and exists independently of any support for building Scala. @@ -12,22 +12,22 @@ import ammonite.ops._, mill._ // v // resourceRoot ----> jar -def sourceRoot = T.sources{ pwd / 'src } +def sourceRoot = T.sources { pwd / 'src } -def resourceRoot = T.sources{ pwd / 'resources } +def resourceRoot = T.sources { pwd / 'resources } -def allSources = T{ sourceRoot().flatMap(p => ls.rec(p.path)).map(PathRef(_)) } +def allSources = T { sourceRoot().flatMap(p => ls.rec(p.path)).map(PathRef(_)) } -def classFiles = T{ +def classFiles = T { mkdir(T.ctx().dest) import ammonite.ops._ %("javac", sources().map(_.path.toString()), "-d", T.ctx().dest)(wd = T.ctx().dest) PathRef(T.ctx().dest) } -def jar = T{ Jvm.createJar(Loose.Agg(classFiles().path) ++ resourceRoot().map(_.path)) } +def jar = T { Jvm.createJar(Loose.Agg(classFiles().path) ++ resourceRoot().map(_.path)) } -def run(mainClsName: String) = T.command{ +def run(mainClsName: String) = T.command { %%('java, "-cp", classFiles().path, mainClsName) } ``` @@ -35,7 +35,7 @@ def run(mainClsName: String) = T.command{ Here, we have two `T.sources`s, `sourceRoot` and `resourceRoot`, which act as the roots of our task graph. `allSources` depends on `sourceRoot` by calling -`sourceRoot()` to extract it's value, `classFiles` depends on `allSources` the +`sourceRoot()` to extract its value, `classFiles` depends on `allSources` the same way, and `jar` depends on both `classFiles` and `resourceRoot`. Filesystem operations in Mill are done using the @@ -54,7 +54,7 @@ When you first evaluate `jar` (e.g. via `mill jar` at the command line), it will evaluate all the defined targets: `sourceRoot`, `allSources`, `classFiles`, `resourceRoot` and `jar`. -Subsequent `mill jars` will evaluate only as much as is necessary, depending on +Subsequent `mill jar`s will evaluate only as much as is necessary, depending on what input sources changed: - If the files in `sourceRoot` change, it will re-evaluate `allSources`, @@ -64,30 +64,30 @@ what input sources changed: the cached output of `allSources` and `classFiles` Apart from the `foo()` call-sites which define what each targets depend on, the -code within each `T{...}` wrapper is arbirary Scala code that can compute an -arbitrary result from it's inputs. +code within each `T {...}` wrapper is arbitrary Scala code that can compute an +arbitrary result from its inputs. ## Different Kinds of Tasks There are four primary kinds of *Tasks* that you should care about: -- [Targets](#targets), defined using `T{...}` -- [Sources](#sources), defined using `T.sources{...}` -- [Commands](#commands), defined using `T.command{...}` +- [Targets](#targets), defined using `T {...}` +- [Sources](#sources), defined using `T.sources {...}` +- [Commands](#commands), defined using `T.command {...}` ### Targets ```scala -def allSources = T{ ls.rec(sourceRoot().path).map(PathRef(_)) } +def allSources = T { ls.rec(sourceRoot().path).map(PathRef(_)) } ``` -`Target`s are defined using the `def foo = T{...}` syntax, and dependencies on +`Target`s are defined using the `def foo = T {...}` syntax, and dependencies on other targets are defined using `foo()` to extract the value from them. Apart -from the `foo()` calls, the `T{...}` block contains arbitrary code that does +from the `foo()` calls, the `T {...}` block contains arbitrary code that does some work and returns a result. -Each target e.g. `classFiles` is assigned a path on disk as scratch space & to -store it's output files at `out/classFiles/dest/`, and it's returned metadata is +Each target, e.g. `classFiles`, is assigned a path on disk as scratch space & to +store its output files at `out/classFiles/dest/`, and its returned metadata is automatically JSON-serialized and stored at `out/classFiles/meta.json`. The return-value of targets has to be JSON-serializable via [uPickle](https://github.com/lihaoyi/upickle). @@ -97,73 +97,73 @@ write them to disk within your `T.ctx().dest` available through the [Task Context API](#task-context-api) and return a `PathRef` to the files you wrote. -If a target's inputs change but it's output does not, e.g. someone changes a +If a target's inputs change but its output does not, e.g. someone changes a comment within the source files that doesn't affect the classfiles, then downstream targets do not re-evaluate. This is determined using the `.hashCode` -of the Target's return value. For target's returning `ammonite.ops.Path`s that +of the Target's return value. For targets returning `ammonite.ops.Path`s that reference files on disk, you can wrap the `Path` in a `PathRef` (shown above) whose `.hashCode()` will include the hashes of all files on disk at time of creation. The graph of inter-dependent targets is evaluated in topological order; that -means that the body of a target will not even begin to evaluate if one of it's +means that the body of a target will not even begin to evaluate if one of its upstream dependencies has failed. This is unlike normal Scala functions: a plain old function `foo` would evaluate halfway and then blow up if one of `foo`'s dependencies throws an exception. Targets cannot take parameters and must be 0-argument `def`s defined directly -within a `Module` body +within a `Module` body. ### Sources ```scala def sourceRootPath = pwd / 'src -def sourceRoots = T.sources{ sourceRootPath } +def sourceRoots = T.sources { sourceRootPath } ``` -`Source`s are defined using `T.sources{ ... }`, taking one-or-more +`Source`s are defined using `T.sources { ... }`, taking one-or-more `ammonite.ops.Path`s as arguments. A `Source` is a subclass of -`Target[Seq[PathRef]]`: this means that it's build signature/`hashCode` depends +`Target[Seq[PathRef]]`: this means that its build signature/`hashCode` depends not just on the path it refers to (e.g. `foo/bar/baz`) but also the MD5 hash of the filesystem tree under that path. `T.sources` also has an overload which takes `Seq[PathRef]`, to let you -override-and-extend source lists the same way you would any other `T{...}` +override-and-extend source lists the same way you would any other `T {...}` definition: ```scala -def additionalSources = T.sources{ pwd / 'additionalSources } -def sourceRoots = T.sources{ super.sourceRoots() ++ additionalSources() } +def additionalSources = T.sources { pwd / 'additionalSources } +def sourceRoots = T.sources { super.sourceRoots() ++ additionalSources() } ``` ### Commands ```scala -def run(mainClsName: String) = T.command{ +def run(mainClsName: String) = T.command { %%('java, "-cp", classFiles().path, mainClsName) } ``` -Defined using `T.command{ ... }` syntax, `Command`s can run arbitrary code, with +Defined using `T.command { ... }` syntax, `Command`s can run arbitrary code, with dependencies declared using the same `foo()` syntax (e.g. `classFiles()` above). Commands can be parametrized, but their output is not cached, so they will re-evaluate every time even if none of their inputs have changed. -Like [Targets](#targets), a command only evaluates after all it's upstream +Like [Targets](#targets), a command only evaluates after all its upstream dependencies have completed, and will not begin to run if any upstream dependency has failed. -Commands are assigned the same scratch/output directory `out/run/dest/` as -Targets are, and it's returned metadata stored at the same `out/run/meta.json` +Commands are assigned the same scratch/output folder `out/run/dest/` as +Targets are, and its returned metadata stored at the same `out/run/meta.json` path for consumption by external tools. Commands can only be defined directly within a `Module` body. ## Task Context API -There are several APIs available to you within the body of a `T{...}` or -`T.command{...}` block to help your write the code implementing your Target or +There are several APIs available to you within the body of a `T {...}` or +`T.command {...}` block to help your write the code implementing your Target or Command: ### mill.util.Ctx.Dest @@ -186,7 +186,7 @@ to those same paths. This is the default logger provided for every task. While your task is running, `System.out` and `System.in` are also redirected to this logger. The logs for a task are streamed to standard out/error as you would expect, but each task's -specific output is also streamed to a log file on disk e.g. `out/run/log` or +specific output is also streamed to a log file on disk, e.g. `out/run/log` or `out/classFiles/log` for you to inspect later. ### mill.util.Ctx.Env @@ -210,7 +210,7 @@ def envVar = T.input { T.ctx().env.get("ENV_VAR") } ## Other Tasks -- [Anonymous Tasks](#anonymous-tasks), defined using `T.task{...}` +- [Anonymous Tasks](#anonymous-tasks), defined using `T.task {...}` - [Persistent Targets](#persistent-targets) - [Inputs](#inputs) - [Workers](#workers) @@ -219,34 +219,34 @@ def envVar = T.input { T.ctx().env.get("ENV_VAR") } ### Anonymous Tasks ```scala -def foo(x: Int) = T.task{ ... x ... bar() ... } +def foo(x: Int) = T.task { ... x ... bar() ... } ``` -You can define anonymous tasks using the `T.task{ ... }` syntax. These are not +You can define anonymous tasks using the `T.task { ... }` syntax. These are not runnable from the command-line, but can be used to share common code you find yourself repeating in `Target`s and `Command`s. ```scala -def downstreamTarget = T{ ... foo() ... } -def downstreamCommand = T.command{ ... foo() ... } +def downstreamTarget = T { ... foo() ... } +def downstreamCommand = T.command { ... foo() ... } ``` -Anonymous tasks's output does not need to be JSON-serializable, their output is +Anonymous task's output does not need to be JSON-serializable, their output is not cached, and they can be defined with or without arguments. Unlike [Targets](#targets) or [Commands](#commands), anonymous tasks can be defined anywhere and passed around any way you want, until you finally make use of them within a downstream target or command. While an anonymous task `foo`'s own output is not cached, if it is used in a -downstream target `bar` and the upstream targets's `baz` `qux` haven't changed, +downstream target `bar` and the upstream targets `baz` `qux` haven't changed, `bar`'s cached output will be used and `foo`'s evaluation will be skipped altogether. ### Persistent Targets ```scala -def foo = T.persistent{ ... } +def foo = T.persistent { ... } ``` -Identical to [Targets](#targets), except that the `dest/` directory is not +Identical to [Targets](#targets), except that the `dest/` folder is not cleared in between runs. This is useful if you are running external incremental-compilers, such as @@ -263,11 +263,11 @@ filesystem states existed before. ### Inputs ```scala -def foo = T.input{ ... } +def foo = T.input { ... } ``` A generalization of [Sources](#sources), `T.input`s are tasks that re-evaluate -*every time* (Unlike [Anonymous Tasks](#anonymous-tasks)), containing an +*every time* (unlike [Anonymous Tasks](#anonymous-tasks)), containing an arbitrary block of code. Inputs can be used to force re-evaluation of some external property that may @@ -275,7 +275,7 @@ affect your build. For example, if I have a [Target](#targets) `bar` that makes use of the current git version: ```scala -def bar = T{ ... %%("git", "rev-parse", "HEAD").out.string ... } +def bar = T { ... %%("git", "rev-parse", "HEAD").out.string ... } ``` `bar` will not know that `git rev-parse` can change, and will @@ -286,8 +286,8 @@ be out of date! To fix this, you can wrap your `git rev-parse HEAD` in a `T.input`: ```scala -def foo = T.input{ %%("git", "rev-parse", "HEAD").out.string } -def bar = T{ ... foo() ... } +def foo = T.input { %%("git", "rev-parse", "HEAD").out.string } +def bar = T { ... foo() ... } ``` This makes `foo` will always re-evaluate every build; if `git rev-parse HEAD` @@ -303,7 +303,7 @@ targets. ### Workers ```scala -def foo = T.worker{ ... } +def foo = T.worker { ... } ``` Most tasks dispose of their in-memory return-value every evaluation; in the case @@ -316,7 +316,7 @@ Workers are unique in that they store their in-memory return-value between evaluations. This makes them useful for storing in-memory caches or references to long-lived external worker processes that you can re-use. -Mill uses workers to managed long-lived instances of the +Mill uses workers to manage long-lived instances of the [Zinc Incremental Scala Compiler](https://github.com/sbt/zinc) and the [Scala.js Optimizer](https://github.com/scala-js/scala-js). This lets us keep them in-memory with warm caches and fast incremental execution. -- cgit v1.2.3