aboutsummaryrefslogtreecommitdiff
path: root/R/pkg/NAMESPACE
diff options
context:
space:
mode:
authorShivaram Venkataraman <shivaram@cs.berkeley.edu>2015-05-05 14:40:33 -0700
committerReynold Xin <rxin@databricks.com>2015-05-05 14:40:33 -0700
commitc688e3c5e46b26cb9fdba7987139c9ea63e2458b (patch)
tree41fd5a477351567896187dc7791005ef112a74d7 /R/pkg/NAMESPACE
parent3059291e2027fb3ebbb2d3376610ce450f7c15a3 (diff)
downloadspark-c688e3c5e46b26cb9fdba7987139c9ea63e2458b.tar.gz
spark-c688e3c5e46b26cb9fdba7987139c9ea63e2458b.tar.bz2
spark-c688e3c5e46b26cb9fdba7987139c9ea63e2458b.zip
[SPARK-7230] [SPARKR] Make RDD private in SparkR.
This change makes the RDD API private in SparkR and all internal uses of the SparkR API use SparkR::: to access private functions. Author: Shivaram Venkataraman <shivaram@cs.berkeley.edu> Closes #5895 from shivaram/rrdd-private and squashes the following commits: bdb2f07 [Shivaram Venkataraman] Make RDD private in SparkR. This change also makes all internal uses of the SparkR API use SparkR::: to access private functions
Diffstat (limited to 'R/pkg/NAMESPACE')
-rw-r--r--R/pkg/NAMESPACE106
1 files changed, 13 insertions, 93 deletions
diff --git a/R/pkg/NAMESPACE b/R/pkg/NAMESPACE
index e077eace74..1fb3311b7f 100644
--- a/R/pkg/NAMESPACE
+++ b/R/pkg/NAMESPACE
@@ -1,117 +1,35 @@
-#exportPattern("^[[:alpha:]]+")
-exportClasses("RDD")
-exportClasses("Broadcast")
-exportMethods(
- "aggregateByKey",
- "aggregateRDD",
- "cache",
- "cartesian",
- "checkpoint",
- "coalesce",
- "cogroup",
- "collect",
- "collectAsMap",
- "collectPartition",
- "combineByKey",
- "count",
- "countByKey",
- "countByValue",
- "distinct",
- "Filter",
- "filterRDD",
- "first",
- "flatMap",
- "flatMapValues",
- "fold",
- "foldByKey",
- "foreach",
- "foreachPartition",
- "fullOuterJoin",
- "glom",
- "groupByKey",
- "intersection",
- "join",
- "keyBy",
- "keys",
- "length",
- "lapply",
- "lapplyPartition",
- "lapplyPartitionsWithIndex",
- "leftOuterJoin",
- "lookup",
- "map",
- "mapPartitions",
- "mapPartitionsWithIndex",
- "mapValues",
- "maximum",
- "minimum",
- "numPartitions",
- "partitionBy",
- "persist",
- "pipeRDD",
- "reduce",
- "reduceByKey",
- "reduceByKeyLocally",
- "repartition",
- "rightOuterJoin",
- "sampleByKey",
- "sampleRDD",
- "saveAsTextFile",
- "saveAsObjectFile",
- "sortBy",
- "sortByKey",
- "subtract",
- "subtractByKey",
- "sumRDD",
- "take",
- "takeOrdered",
- "takeSample",
- "top",
- "unionRDD",
- "unpersist",
- "value",
- "values",
- "zipPartitions",
- "zipRDD",
- "zipWithIndex",
- "zipWithUniqueId"
- )
+# Imports from base R
+importFrom(methods, setGeneric, setMethod, setOldClass)
+useDynLib(SparkR, stringHashCode)
# S3 methods exported
-export(
- "textFile",
- "objectFile",
- "parallelize",
- "hashCode",
- "includePackage",
- "broadcast",
- "setBroadcastValue",
- "setCheckpointDir"
- )
export("sparkR.init")
export("sparkR.stop")
export("print.jobj")
-useDynLib(SparkR, stringHashCode)
-importFrom(methods, setGeneric, setMethod, setOldClass)
-
-# SparkRSQL
exportClasses("DataFrame")
-exportMethods("columns",
+exportMethods("cache",
+ "collect",
+ "columns",
+ "count",
"distinct",
"dtypes",
"except",
"explain",
"filter",
+ "first",
"groupBy",
"head",
"insertInto",
"intersect",
"isLocal",
+ "join",
+ "length",
"limit",
"orderBy",
"names",
+ "persist",
"printSchema",
"registerTempTable",
"repartition",
@@ -125,9 +43,11 @@ exportMethods("columns",
"show",
"showDF",
"sortDF",
+ "take",
"toJSON",
"toRDD",
"unionAll",
+ "unpersist",
"where",
"withColumn",
"withColumnRenamed")