aboutsummaryrefslogtreecommitdiff
path: root/graphx/src
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-03-14 09:07:39 +0000
committerSean Owen <sowen@cloudera.com>2016-03-14 09:07:39 +0000
commitacdf21970334cea9d6cfc287e4ccb8e72de9dee1 (patch)
treedf8bcf3d80dc92ad74c5c27bd3618397205bcc86 /graphx/src
parente58fa19d17db5dd8a00551e20b46921f98b958f7 (diff)
downloadspark-acdf21970334cea9d6cfc287e4ccb8e72de9dee1.tar.gz
spark-acdf21970334cea9d6cfc287e4ccb8e72de9dee1.tar.bz2
spark-acdf21970334cea9d6cfc287e4ccb8e72de9dee1.zip
[MINOR][DOCS] Fix more typos in comments/strings.
## What changes were proposed in this pull request? This PR fixes 135 typos over 107 files: * 121 typos in comments * 11 typos in testcase name * 3 typos in log messages ## How was this patch tested? Manual. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #11689 from dongjoon-hyun/fix_more_typos.
Diffstat (limited to 'graphx/src')
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/Graph.scala2
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala4
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala2
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala4
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/package.scala2
-rw-r--r--graphx/src/main/scala/org/apache/spark/graphx/util/BytecodeUtils.scala2
-rw-r--r--graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala2
7 files changed, 9 insertions, 9 deletions
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/Graph.scala b/graphx/src/main/scala/org/apache/spark/graphx/Graph.scala
index fe884d0022..5485e30f5a 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/Graph.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/Graph.scala
@@ -297,7 +297,7 @@ abstract class Graph[VD: ClassTag, ED: ClassTag] protected () extends Serializab
/**
* Restricts the graph to only the vertices and edges satisfying the predicates. The resulting
- * subgraph satisifies
+ * subgraph satisfies
*
* {{{
* V' = {v : for all v in V where vpred(v)}
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala b/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala
index d537b6141c..fcb1b5999f 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/GraphOps.scala
@@ -236,11 +236,11 @@ class GraphOps[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED]) extends Seriali
* @param preprocess a function to compute new vertex and edge data before filtering
* @param epred edge pred to filter on after preprocess, see more details under
* [[org.apache.spark.graphx.Graph#subgraph]]
- * @param vpred vertex pred to filter on after prerocess, see more details under
+ * @param vpred vertex pred to filter on after preprocess, see more details under
* [[org.apache.spark.graphx.Graph#subgraph]]
* @tparam VD2 vertex type the vpred operates on
* @tparam ED2 edge type the epred operates on
- * @return a subgraph of the orginal graph, with its data unchanged
+ * @return a subgraph of the original graph, with its data unchanged
*
* @example This function can be used to filter the graph based on some property, without
* changing the vertex and edge values in your program. For example, we could remove the vertices
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala b/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala
index 6dab465fb9..a4e293d74a 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/impl/ShippableVertexPartition.scala
@@ -49,7 +49,7 @@ object ShippableVertexPartition {
/**
* Construct a `ShippableVertexPartition` from the given vertices with the specified routing
* table, filling in missing vertices mentioned in the routing table using `defaultVal`,
- * and merging duplicate vertex atrribute with mergeFunc.
+ * and merging duplicate vertex attribute with mergeFunc.
*/
def apply[VD: ClassTag](
iter: Iterator[(VertexId, VD)], routingTable: RoutingTablePartition, defaultVal: VD,
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala b/graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala
index 46faad2e68..00ba358a9b 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala
@@ -54,7 +54,7 @@ import org.apache.spark.graphx._
* }}}
*
* `alpha` is the random reset probability (typically 0.15), `inNbrs[i]` is the set of
- * neighbors whick link to `i` and `outDeg[j]` is the out degree of vertex `j`.
+ * neighbors which link to `i` and `outDeg[j]` is the out degree of vertex `j`.
*
* Note that this is not the "normalized" PageRank and as a consequence pages that have no
* inlinks will have a PageRank of alpha.
@@ -209,7 +209,7 @@ object PageRank extends Logging {
}
// Set the weight on the edges based on the degree
.mapTriplets( e => 1.0 / e.srcAttr )
- // Set the vertex attributes to (initalPR, delta = 0)
+ // Set the vertex attributes to (initialPR, delta = 0)
.mapVertices { (id, attr) =>
if (id == src) (resetProb, Double.NegativeInfinity) else (0.0, 0.0)
}
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/package.scala b/graphx/src/main/scala/org/apache/spark/graphx/package.scala
index 6aab28ff05..dde25b9659 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/package.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/package.scala
@@ -30,7 +30,7 @@ package object graphx {
*/
type VertexId = Long
- /** Integer identifer of a graph partition. Must be less than 2^30. */
+ /** Integer identifier of a graph partition. Must be less than 2^30. */
// TODO: Consider using Char.
type PartitionID = Int
diff --git a/graphx/src/main/scala/org/apache/spark/graphx/util/BytecodeUtils.scala b/graphx/src/main/scala/org/apache/spark/graphx/util/BytecodeUtils.scala
index a6d0cb6409..d76e84ed8c 100644
--- a/graphx/src/main/scala/org/apache/spark/graphx/util/BytecodeUtils.scala
+++ b/graphx/src/main/scala/org/apache/spark/graphx/util/BytecodeUtils.scala
@@ -92,7 +92,7 @@ private[graphx] object BytecodeUtils {
/**
* Given the class name, return whether we should look into the class or not. This is used to
- * skip examing a large quantity of Java or Scala classes that we know for sure wouldn't access
+ * skip examining a large quantity of Java or Scala classes that we know for sure wouldn't access
* the closures. Note that the class name is expected in ASM style (i.e. use "/" instead of ".").
*/
private def skipClass(className: String): Boolean = {
diff --git a/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala b/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala
index f497e001df..cb981797d3 100644
--- a/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala
+++ b/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala
@@ -229,7 +229,7 @@ class GraphSuite extends SparkFunSuite with LocalSparkContext {
test("subgraph") {
withSpark { sc =>
- // Create a star graph of 10 veritces.
+ // Create a star graph of 10 vertices.
val n = 10
val star = starGraph(sc, n)
// Take only vertices whose vids are even