# Imports from base R importFrom(methods, setGeneric, setMethod, setOldClass) useDynLib(SparkR, stringHashCode) # S3 methods exported export("sparkR.init") export("sparkR.stop") export("print.jobj") exportClasses("DataFrame") exportMethods("arrange", "cache", "collect", "columns", "count", "describe", "distinct", "dtypes", "except", "explain", "filter", "first", "group_by", "groupBy", "head", "insertInto", "intersect", "isLocal", "join", "limit", "orderBy", "mutate", "names", "persist", "printSchema", "registerTempTable", "rename", "repartition", "sampleDF", "sample_frac", "saveAsParquetFile", "saveAsTable", "saveDF", "schema", "select", "selectExpr", "show", "showDF", "summarize", "take", "unionAll", "unpersist", "where", "withColumn", "withColumnRenamed") exportClasses("Column") exportMethods("abs", "alias", "approxCountDistinct", "asc", "avg", "cast", "contains", "countDistinct", "desc", "endsWith", "getField", "getItem", "isNotNull", "isNull", "last", "like", "lower", "max", "mean", "min", "n", "n_distinct", "rlike", "sqrt", "startsWith", "substr", "sum", "sumDistinct", "upper") exportClasses("GroupedData") exportMethods("agg") export("sparkRSQL.init", "sparkRHive.init") export("cacheTable", "clearCache", "createDataFrame", "createExternalTable", "dropTempTable", "jsonFile", "loadDF", "parquetFile", "sql", "table", "tableNames", "tables", "uncacheTable") export("structField", "structField.jobj", "structField.character", "print.structField", "structType", "structType.jobj", "structType.structField", "print.structType")